problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_1127
|
rasdani/github-patches
|
git_diff
|
python__mypy-16229
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add setuptools as a dependency on Python 3.12?
Mypyc needs `distutils` or `setuptools` to run, but Python 3.12 no longer bundles `distutils` ([PEP 632](https://peps.python.org/pep-0632/)). This seems to imply that we need to include `setuptools` as a dependency of mypy (at least on Python 3.12 or later), or unbundle mypyc into a separate distribution on PyPI. Thoughts?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from __future__ import annotations
4
5 import glob
6 import os
7 import os.path
8 import sys
9 from typing import TYPE_CHECKING, Any
10
11 if sys.version_info < (3, 8, 0): # noqa: UP036
12 sys.stderr.write("ERROR: You need Python 3.8 or later to use mypy.\n")
13 exit(1)
14
15 # we'll import stuff from the source tree, let's ensure is on the sys path
16 sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
17
18 # This requires setuptools when building; setuptools is not needed
19 # when installing from a wheel file (though it is still needed for
20 # alternative forms of installing, as suggested by README.md).
21 from setuptools import Extension, find_packages, setup
22 from setuptools.command.build_py import build_py
23
24 from mypy.version import __version__ as version
25
26 if TYPE_CHECKING:
27 from typing_extensions import TypeGuard
28
29 description = "Optional static typing for Python"
30 long_description = """
31 Mypy -- Optional Static Typing for Python
32 =========================================
33
34 Add type annotations to your Python programs, and use mypy to type
35 check them. Mypy is essentially a Python linter on steroids, and it
36 can catch many programming errors by analyzing your program, without
37 actually having to run it. Mypy has a powerful type system with
38 features such as type inference, gradual typing, generics and union
39 types.
40 """.lstrip()
41
42
43 def is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]:
44 return all(isinstance(item, Extension) for item in items)
45
46
47 def find_package_data(base, globs, root="mypy"):
48 """Find all interesting data files, for setup(package_data=)
49
50 Arguments:
51 root: The directory to search in.
52 globs: A list of glob patterns to accept files.
53 """
54
55 rv_dirs = [root for root, dirs, files in os.walk(base)]
56 rv = []
57 for rv_dir in rv_dirs:
58 files = []
59 for pat in globs:
60 files += glob.glob(os.path.join(rv_dir, pat))
61 if not files:
62 continue
63 rv.extend([os.path.relpath(f, root) for f in files])
64 return rv
65
66
67 class CustomPythonBuild(build_py):
68 def pin_version(self):
69 path = os.path.join(self.build_lib, "mypy")
70 self.mkpath(path)
71 with open(os.path.join(path, "version.py"), "w") as stream:
72 stream.write(f'__version__ = "{version}"\n')
73
74 def run(self):
75 self.execute(self.pin_version, ())
76 build_py.run(self)
77
78
79 cmdclass = {"build_py": CustomPythonBuild}
80
81 package_data = ["py.typed"]
82
83 package_data += find_package_data(os.path.join("mypy", "typeshed"), ["*.py", "*.pyi"])
84 package_data += [os.path.join("mypy", "typeshed", "stdlib", "VERSIONS")]
85
86 package_data += find_package_data(os.path.join("mypy", "xml"), ["*.xsd", "*.xslt", "*.css"])
87
88 USE_MYPYC = False
89 # To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
90 if len(sys.argv) > 1 and "--use-mypyc" in sys.argv:
91 sys.argv.remove("--use-mypyc")
92 USE_MYPYC = True
93 if os.getenv("MYPY_USE_MYPYC", None) == "1":
94 USE_MYPYC = True
95
96 if USE_MYPYC:
97 MYPYC_BLACKLIST = tuple(
98 os.path.join("mypy", x)
99 for x in (
100 # Need to be runnable as scripts
101 "__main__.py",
102 "pyinfo.py",
103 os.path.join("dmypy", "__main__.py"),
104 # Uses __getattr__/__setattr__
105 "split_namespace.py",
106 # Lies to mypy about code reachability
107 "bogus_type.py",
108 # We don't populate __file__ properly at the top level or something?
109 # Also I think there would be problems with how we generate version.py.
110 "version.py",
111 # Skip these to reduce the size of the build
112 "stubtest.py",
113 "stubgenc.py",
114 "stubdoc.py",
115 "stubutil.py",
116 )
117 ) + (
118 # Don't want to grab this accidentally
119 os.path.join("mypyc", "lib-rt", "setup.py"),
120 # Uses __file__ at top level https://github.com/mypyc/mypyc/issues/700
121 os.path.join("mypyc", "__main__.py"),
122 )
123
124 everything = [os.path.join("mypy", x) for x in find_package_data("mypy", ["*.py"])] + [
125 os.path.join("mypyc", x) for x in find_package_data("mypyc", ["*.py"], root="mypyc")
126 ]
127 # Start with all the .py files
128 all_real_pys = [
129 x for x in everything if not x.startswith(os.path.join("mypy", "typeshed") + os.sep)
130 ]
131 # Strip out anything in our blacklist
132 mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]
133 # Strip out any test code
134 mypyc_targets = [
135 x
136 for x in mypyc_targets
137 if not x.startswith(
138 (
139 os.path.join("mypy", "test") + os.sep,
140 os.path.join("mypyc", "test") + os.sep,
141 os.path.join("mypyc", "doc") + os.sep,
142 os.path.join("mypyc", "test-data") + os.sep,
143 )
144 )
145 ]
146 # ... and add back in the one test module we need
147 mypyc_targets.append(os.path.join("mypy", "test", "visitors.py"))
148
149 # The targets come out of file system apis in an unspecified
150 # order. Sort them so that the mypyc output is deterministic.
151 mypyc_targets.sort()
152
153 use_other_mypyc = os.getenv("ALTERNATE_MYPYC_PATH", None)
154 if use_other_mypyc:
155 # This bit is super unfortunate: we want to use a different
156 # mypy/mypyc version, but we've already imported parts, so we
157 # remove the modules that we've imported already, which will
158 # let the right versions be imported by mypyc.
159 del sys.modules["mypy"]
160 del sys.modules["mypy.version"]
161 del sys.modules["mypy.git"]
162 sys.path.insert(0, use_other_mypyc)
163
164 from mypyc.build import mypycify
165
166 opt_level = os.getenv("MYPYC_OPT_LEVEL", "3")
167 debug_level = os.getenv("MYPYC_DEBUG_LEVEL", "1")
168 force_multifile = os.getenv("MYPYC_MULTI_FILE", "") == "1"
169 ext_modules = mypycify(
170 mypyc_targets + ["--config-file=mypy_bootstrap.ini"],
171 opt_level=opt_level,
172 debug_level=debug_level,
173 # Use multi-file compilation mode on windows because without it
174 # our Appveyor builds run out of memory sometimes.
175 multi_file=sys.platform == "win32" or force_multifile,
176 )
177 assert is_list_of_setuptools_extension(ext_modules), "Expected mypycify to use setuptools"
178
179 else:
180 ext_modules = []
181
182
183 classifiers = [
184 "Development Status :: 5 - Production/Stable",
185 "Environment :: Console",
186 "Intended Audience :: Developers",
187 "License :: OSI Approved :: MIT License",
188 "Programming Language :: Python :: 3",
189 "Programming Language :: Python :: 3.8",
190 "Programming Language :: Python :: 3.9",
191 "Programming Language :: Python :: 3.10",
192 "Programming Language :: Python :: 3.11",
193 "Topic :: Software Development",
194 "Typing :: Typed",
195 ]
196
197 setup(
198 name="mypy",
199 version=version,
200 description=description,
201 long_description=long_description,
202 author="Jukka Lehtosalo",
203 author_email="[email protected]",
204 url="https://www.mypy-lang.org/",
205 license="MIT License",
206 py_modules=[],
207 ext_modules=ext_modules,
208 packages=find_packages(),
209 package_data={"mypy": package_data},
210 entry_points={
211 "console_scripts": [
212 "mypy=mypy.__main__:console_entry",
213 "stubgen=mypy.stubgen:main",
214 "stubtest=mypy.stubtest:main",
215 "dmypy=mypy.dmypy.client:console_entry",
216 "mypyc=mypyc.__main__:main",
217 ]
218 },
219 classifiers=classifiers,
220 cmdclass=cmdclass,
221 # When changing this, also update mypy-requirements.txt.
222 install_requires=[
223 "typing_extensions>=4.1.0",
224 "mypy_extensions >= 1.0.0",
225 "tomli>=1.1.0; python_version<'3.11'",
226 ],
227 # Same here.
228 extras_require={
229 "dmypy": "psutil >= 4.0",
230 "python2": "",
231 "reports": "lxml",
232 "install-types": "pip",
233 },
234 python_requires=">=3.8",
235 include_package_data=True,
236 project_urls={
237 "News": "https://mypy-lang.org/news.html",
238 "Documentation": "https://mypy.readthedocs.io/en/stable/index.html",
239 "Repository": "https://github.com/python/mypy",
240 },
241 )
242
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -227,6 +227,7 @@
# Same here.
extras_require={
"dmypy": "psutil >= 4.0",
+ "mypyc": "setuptools >= 50",
"python2": "",
"reports": "lxml",
"install-types": "pip",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -227,6 +227,7 @@\n # Same here.\n extras_require={\n \"dmypy\": \"psutil >= 4.0\",\n+ \"mypyc\": \"setuptools >= 50\",\n \"python2\": \"\",\n \"reports\": \"lxml\",\n \"install-types\": \"pip\",\n", "issue": "Add setuptools as a dependency on Python 3.12?\nMypyc needs `distutils` or `setuptools` to run, but Python 3.12 no longer bundles `distutils` ([PEP 632](https://peps.python.org/pep-0632/)). This seems to imply that we need to include `setuptools` as a dependency of mypy (at least on Python 3.12 or later), or unbundle mypyc into a separate distribution on PyPI. Thoughts?\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import annotations\n\nimport glob\nimport os\nimport os.path\nimport sys\nfrom typing import TYPE_CHECKING, Any\n\nif sys.version_info < (3, 8, 0): # noqa: UP036\n sys.stderr.write(\"ERROR: You need Python 3.8 or later to use mypy.\\n\")\n exit(1)\n\n# we'll import stuff from the source tree, let's ensure is on the sys path\nsys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))\n\n# This requires setuptools when building; setuptools is not needed\n# when installing from a wheel file (though it is still needed for\n# alternative forms of installing, as suggested by README.md).\nfrom setuptools import Extension, find_packages, setup\nfrom setuptools.command.build_py import build_py\n\nfrom mypy.version import __version__ as version\n\nif TYPE_CHECKING:\n from typing_extensions import TypeGuard\n\ndescription = \"Optional static typing for Python\"\nlong_description = \"\"\"\nMypy -- Optional Static Typing for Python\n=========================================\n\nAdd type annotations to your Python programs, and use mypy to type\ncheck them. Mypy is essentially a Python linter on steroids, and it\ncan catch many programming errors by analyzing your program, without\nactually having to run it. Mypy has a powerful type system with\nfeatures such as type inference, gradual typing, generics and union\ntypes.\n\"\"\".lstrip()\n\n\ndef is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]:\n return all(isinstance(item, Extension) for item in items)\n\n\ndef find_package_data(base, globs, root=\"mypy\"):\n \"\"\"Find all interesting data files, for setup(package_data=)\n\n Arguments:\n root: The directory to search in.\n globs: A list of glob patterns to accept files.\n \"\"\"\n\n rv_dirs = [root for root, dirs, files in os.walk(base)]\n rv = []\n for rv_dir in rv_dirs:\n files = []\n for pat in globs:\n files += glob.glob(os.path.join(rv_dir, pat))\n if not files:\n continue\n rv.extend([os.path.relpath(f, root) for f in files])\n return rv\n\n\nclass CustomPythonBuild(build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"mypy\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as stream:\n stream.write(f'__version__ = \"{version}\"\\n')\n\n def run(self):\n self.execute(self.pin_version, ())\n build_py.run(self)\n\n\ncmdclass = {\"build_py\": CustomPythonBuild}\n\npackage_data = [\"py.typed\"]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"typeshed\"), [\"*.py\", \"*.pyi\"])\npackage_data += [os.path.join(\"mypy\", \"typeshed\", \"stdlib\", \"VERSIONS\")]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"xml\"), [\"*.xsd\", \"*.xslt\", \"*.css\"])\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and \"--use-mypyc\" in sys.argv:\n sys.argv.remove(\"--use-mypyc\")\n USE_MYPYC = True\nif os.getenv(\"MYPY_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n MYPYC_BLACKLIST = tuple(\n os.path.join(\"mypy\", x)\n for x in (\n # Need to be runnable as scripts\n \"__main__.py\",\n \"pyinfo.py\",\n os.path.join(\"dmypy\", \"__main__.py\"),\n # Uses __getattr__/__setattr__\n \"split_namespace.py\",\n # Lies to mypy about code reachability\n \"bogus_type.py\",\n # We don't populate __file__ properly at the top level or something?\n # Also I think there would be problems with how we generate version.py.\n \"version.py\",\n # Skip these to reduce the size of the build\n \"stubtest.py\",\n \"stubgenc.py\",\n \"stubdoc.py\",\n \"stubutil.py\",\n )\n ) + (\n # Don't want to grab this accidentally\n os.path.join(\"mypyc\", \"lib-rt\", \"setup.py\"),\n # Uses __file__ at top level https://github.com/mypyc/mypyc/issues/700\n os.path.join(\"mypyc\", \"__main__.py\"),\n )\n\n everything = [os.path.join(\"mypy\", x) for x in find_package_data(\"mypy\", [\"*.py\"])] + [\n os.path.join(\"mypyc\", x) for x in find_package_data(\"mypyc\", [\"*.py\"], root=\"mypyc\")\n ]\n # Start with all the .py files\n all_real_pys = [\n x for x in everything if not x.startswith(os.path.join(\"mypy\", \"typeshed\") + os.sep)\n ]\n # Strip out anything in our blacklist\n mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]\n # Strip out any test code\n mypyc_targets = [\n x\n for x in mypyc_targets\n if not x.startswith(\n (\n os.path.join(\"mypy\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"doc\") + os.sep,\n os.path.join(\"mypyc\", \"test-data\") + os.sep,\n )\n )\n ]\n # ... and add back in the one test module we need\n mypyc_targets.append(os.path.join(\"mypy\", \"test\", \"visitors.py\"))\n\n # The targets come out of file system apis in an unspecified\n # order. Sort them so that the mypyc output is deterministic.\n mypyc_targets.sort()\n\n use_other_mypyc = os.getenv(\"ALTERNATE_MYPYC_PATH\", None)\n if use_other_mypyc:\n # This bit is super unfortunate: we want to use a different\n # mypy/mypyc version, but we've already imported parts, so we\n # remove the modules that we've imported already, which will\n # let the right versions be imported by mypyc.\n del sys.modules[\"mypy\"]\n del sys.modules[\"mypy.version\"]\n del sys.modules[\"mypy.git\"]\n sys.path.insert(0, use_other_mypyc)\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n debug_level = os.getenv(\"MYPYC_DEBUG_LEVEL\", \"1\")\n force_multifile = os.getenv(\"MYPYC_MULTI_FILE\", \"\") == \"1\"\n ext_modules = mypycify(\n mypyc_targets + [\"--config-file=mypy_bootstrap.ini\"],\n opt_level=opt_level,\n debug_level=debug_level,\n # Use multi-file compilation mode on windows because without it\n # our Appveyor builds run out of memory sometimes.\n multi_file=sys.platform == \"win32\" or force_multifile,\n )\n assert is_list_of_setuptools_extension(ext_modules), \"Expected mypycify to use setuptools\"\n\nelse:\n ext_modules = []\n\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development\",\n \"Typing :: Typed\",\n]\n\nsetup(\n name=\"mypy\",\n version=version,\n description=description,\n long_description=long_description,\n author=\"Jukka Lehtosalo\",\n author_email=\"[email protected]\",\n url=\"https://www.mypy-lang.org/\",\n license=\"MIT License\",\n py_modules=[],\n ext_modules=ext_modules,\n packages=find_packages(),\n package_data={\"mypy\": package_data},\n entry_points={\n \"console_scripts\": [\n \"mypy=mypy.__main__:console_entry\",\n \"stubgen=mypy.stubgen:main\",\n \"stubtest=mypy.stubtest:main\",\n \"dmypy=mypy.dmypy.client:console_entry\",\n \"mypyc=mypyc.__main__:main\",\n ]\n },\n classifiers=classifiers,\n cmdclass=cmdclass,\n # When changing this, also update mypy-requirements.txt.\n install_requires=[\n \"typing_extensions>=4.1.0\",\n \"mypy_extensions >= 1.0.0\",\n \"tomli>=1.1.0; python_version<'3.11'\",\n ],\n # Same here.\n extras_require={\n \"dmypy\": \"psutil >= 4.0\",\n \"python2\": \"\",\n \"reports\": \"lxml\",\n \"install-types\": \"pip\",\n },\n python_requires=\">=3.8\",\n include_package_data=True,\n project_urls={\n \"News\": \"https://mypy-lang.org/news.html\",\n \"Documentation\": \"https://mypy.readthedocs.io/en/stable/index.html\",\n \"Repository\": \"https://github.com/python/mypy\",\n },\n)\n", "path": "setup.py"}]}
| 3,378 | 93 |
gh_patches_debug_37357
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-3792
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't add an user in UserGroupsControlpanelView to a Group with pas.plugin.ldap with plone.many_users=True
### What I did:
- add plone group `testgroup1` via /@@usergroup-groupprefs
- set registry entries for `plone.many_users` and `plone.many_groups` to `True`
- add a Folder
- go to the folder and call the sharing view /myfolder/@@sharing
- add the group `testgroup1` with reader, editor roles
- click on the group to add some users `/@@usergroup-groupmembership?groupname=testgroup1`
- search a user
- select a user (should be a LDAP User) and save
### What I expect to happen:
- the user is added to the group
### What actually happened:
- endless waiting for response after click on "Add" Button
### What version of Plone/ Addons I am using:
- Plone 6.0.4
- pas.plugin.ldap 1.8.2
- node.ext.ldap 1.2
- node 1.2.1
- LDAP Backend user objects > 10000
### Some investigations
- in the ZMI acl_users -> source_groups the action is possible without errors
- with loglevel DEBUG i see thousands of queries to the LDAP Backend
- the task run many minutes
- its a limitation of [pas.plugin.ldap](https://github.com/collective/pas.plugins.ldap#limitations-and-future-optimizations)
the [GroupMembershipControlPanel](https://github.com/plone/Products.CMFPlone/blob/a5b48c0c24e6eebbe01aa2874eaaa9aa3d49f155/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py#L65) clean the searchstring in the request and pass it to the membership search in UsersGroupsControlPanelView.
An empty searchstring is like a intention "list all users" and a violation against the option `many_users = True`. The search with empty searchstring should not be performed.
With the following patch, all is fine ;-)
```
if searchGroups:
if not self.many_groups or bool(searchString):
groupResults = searchView.merge(chain(*[searchView.searchGroups(**{field: searchString}) for field in ['id', 'title']]), 'groupid')
groupResults = [gtool.getGroupById(g['id']) for g in groupResults if g['id'] not in ignore]
groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))
if searchUsers:
if not self.many_users or bool(searchString):
userResults = searchView.merge(chain(*[searchView.searchUsers(**{field: searchString}) for field in ['login', 'fullname', 'email']]), 'userid')
userResults = [mtool.getMemberById(u['id']) for u in userResults if u['id'] not in ignore]
userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')
```
</issue>
<code>
[start of Products/CMFPlone/controlpanel/browser/usergroups.py]
1 from AccessControl import getSecurityManager
2 from Acquisition import aq_inner
3 from itertools import chain
4 from plone.autoform.form import AutoExtensibleForm
5 from plone.base import PloneMessageFactory as _
6 from plone.base.interfaces import ISecuritySchema
7 from plone.base.interfaces import IUserGroupsSettingsSchema
8 from plone.z3cform import layout
9 from Products.CMFCore.permissions import ManagePortal
10 from Products.CMFCore.utils import getToolByName
11 from Products.CMFPlone.utils import normalizeString
12 from Products.Five.browser import BrowserView
13 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
14 from z3c.form import button
15 from z3c.form import form
16 from zope.component import getAdapter
17 from zope.component import getMultiAdapter
18 from ZTUtils import make_query
19
20
21 class UserGroupsSettingsControlPanel(AutoExtensibleForm, form.EditForm):
22 schema = IUserGroupsSettingsSchema
23 id = "usergroupsettings-control-panel"
24 label = _("Users and Groups")
25 form_name = _("User/Groups settings")
26 control_panel_view = "usergroups-controlpanel"
27
28 @button.buttonAndHandler(_("label_save", default="Save"), name="save")
29 def handleApply(self, action):
30 super().handleApply(self, action)
31
32 def updateActions(self):
33 super().updateActions()
34 if self.actions and "save" in self.actions:
35 self.actions["save"].addClass("btn-primary")
36
37
38 class ControlPanelFormWrapper(layout.FormWrapper):
39 """Use this form as the plone.z3cform layout wrapper to get the control
40 panel layout.
41 """
42
43 index = ViewPageTemplateFile("controlpanel_usergroups_layout.pt")
44
45
46 UserGroupsSettingsPanelView = layout.wrap_form(
47 UserGroupsSettingsControlPanel, ControlPanelFormWrapper
48 )
49
50
51 class UsersGroupsControlPanelView(BrowserView):
52 @property
53 def portal_roles(self):
54 pmemb = getToolByName(aq_inner(self.context), "portal_membership")
55 return [r for r in pmemb.getPortalRoles() if r != "Owner"]
56
57 @property
58 def many_users(self):
59 return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_users
60
61 @property
62 def many_groups(self):
63 return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_groups
64
65 @property
66 def email_as_username(self):
67 return getAdapter(
68 aq_inner(self.context), ISecuritySchema
69 ).get_use_email_as_login()
70
71 def makeQuery(self, **kw):
72 return make_query(**kw)
73
74 def membershipSearch(
75 self, searchString="", searchUsers=True, searchGroups=True, ignore=[]
76 ):
77 """Search for users and/or groups, returning actual member and group items
78 Replaces the now-deprecated prefs_user_groups_search.py script"""
79 groupResults = userResults = []
80
81 gtool = getToolByName(self, "portal_groups")
82 mtool = getToolByName(self, "portal_membership")
83
84 searchView = getMultiAdapter(
85 (aq_inner(self.context), self.request), name="pas_search"
86 )
87
88 if searchGroups:
89 groupResults = searchView.merge(
90 chain(
91 *[
92 searchView.searchGroups(**{field: searchString})
93 for field in ["id", "title"]
94 ]
95 ),
96 "groupid",
97 )
98 groupResults = [
99 gtool.getGroupById(g["id"])
100 for g in groupResults
101 if g["id"] not in ignore
102 ]
103 groupResults.sort(
104 key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
105 )
106
107 if searchUsers:
108 userResults = searchView.merge(
109 chain(
110 *[
111 searchView.searchUsers(**{field: searchString})
112 for field in ["login", "fullname", "email"]
113 ]
114 ),
115 "userid",
116 )
117 userResults = [
118 mtool.getMemberById(u["id"])
119 for u in userResults
120 if u["id"] not in ignore
121 ]
122 userResults.sort(
123 key=lambda x: x is not None
124 and x.getProperty("fullname") is not None
125 and normalizeString(x.getProperty("fullname"))
126 or ""
127 )
128
129 return groupResults + userResults
130
131 def atoi(self, s):
132 try:
133 return int(s)
134 except ValueError:
135 return 0
136
137 @property
138 def is_zope_manager(self):
139 return getSecurityManager().checkPermission(ManagePortal, self.context)
140
141 # The next two class methods implement the following truth table:
142 #
143 # MANY USERS/GROUPS SEARCHING CAN LIST USERS/GROUPS RESULT
144 # False False False Lists unavailable
145 # False False True Show all
146 # False True False Show matching
147 # False True True Show matching
148 # True False False Too many to list
149 # True False True Lists unavailable
150 # True True False Show matching
151 # True True True Show matching
152
153 # TODO: Maybe have these methods return a text message (instead of a bool)
154 # corresponding to the actual result, e.g. "Too many to list", "Lists
155 # unavailable"
156
157 @property
158 def show_group_listing_warning(self):
159 if not self.searchString:
160 acl = getToolByName(self, "acl_users")
161 if acl.canListAllGroups():
162 if self.many_groups:
163 return True
164 return False
165
166 @property
167 def show_users_listing_warning(self):
168 if not self.searchString:
169 acl = getToolByName(self, "acl_users")
170 # XXX Huh? Is canListAllUsers broken?
171 if not acl.canListAllUsers():
172 if self.many_users:
173 return True
174 return False
175
[end of Products/CMFPlone/controlpanel/browser/usergroups.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Products/CMFPlone/controlpanel/browser/usergroups.py b/Products/CMFPlone/controlpanel/browser/usergroups.py
--- a/Products/CMFPlone/controlpanel/browser/usergroups.py
+++ b/Products/CMFPlone/controlpanel/browser/usergroups.py
@@ -86,45 +86,49 @@
)
if searchGroups:
- groupResults = searchView.merge(
- chain(
- *[
- searchView.searchGroups(**{field: searchString})
- for field in ["id", "title"]
- ]
- ),
- "groupid",
- )
- groupResults = [
- gtool.getGroupById(g["id"])
- for g in groupResults
- if g["id"] not in ignore
- ]
- groupResults.sort(
- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
- )
+ # Only search for all ('') if the many_users flag is not set.
+ if not (self.many_groups) or bool(self.searchString):
+ groupResults = searchView.merge(
+ chain(
+ *[
+ searchView.searchGroups(**{field: searchString})
+ for field in ["id", "title"]
+ ]
+ ),
+ "groupid",
+ )
+ groupResults = [
+ gtool.getGroupById(g["id"])
+ for g in groupResults
+ if g["id"] not in ignore
+ ]
+ groupResults.sort(
+ key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
+ )
if searchUsers:
- userResults = searchView.merge(
- chain(
- *[
- searchView.searchUsers(**{field: searchString})
- for field in ["login", "fullname", "email"]
- ]
- ),
- "userid",
- )
- userResults = [
- mtool.getMemberById(u["id"])
- for u in userResults
- if u["id"] not in ignore
- ]
- userResults.sort(
- key=lambda x: x is not None
- and x.getProperty("fullname") is not None
- and normalizeString(x.getProperty("fullname"))
- or ""
- )
+ # Only search for all ('') if the many_users flag is not set.
+ if not (self.many_users) or bool(self.searchString):
+ userResults = searchView.merge(
+ chain(
+ *[
+ searchView.searchUsers(**{field: searchString})
+ for field in ["login", "fullname", "email"]
+ ]
+ ),
+ "userid",
+ )
+ userResults = [
+ mtool.getMemberById(u["id"])
+ for u in userResults
+ if u["id"] not in ignore
+ ]
+ userResults.sort(
+ key=lambda x: x is not None
+ and x.getProperty("fullname") is not None
+ and normalizeString(x.getProperty("fullname"))
+ or ""
+ )
return groupResults + userResults
|
{"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups.py b/Products/CMFPlone/controlpanel/browser/usergroups.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups.py\n@@ -86,45 +86,49 @@\n )\n \n if searchGroups:\n- groupResults = searchView.merge(\n- chain(\n- *[\n- searchView.searchGroups(**{field: searchString})\n- for field in [\"id\", \"title\"]\n- ]\n- ),\n- \"groupid\",\n- )\n- groupResults = [\n- gtool.getGroupById(g[\"id\"])\n- for g in groupResults\n- if g[\"id\"] not in ignore\n- ]\n- groupResults.sort(\n- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n- )\n+ # Only search for all ('') if the many_users flag is not set.\n+ if not (self.many_groups) or bool(self.searchString):\n+ groupResults = searchView.merge(\n+ chain(\n+ *[\n+ searchView.searchGroups(**{field: searchString})\n+ for field in [\"id\", \"title\"]\n+ ]\n+ ),\n+ \"groupid\",\n+ )\n+ groupResults = [\n+ gtool.getGroupById(g[\"id\"])\n+ for g in groupResults\n+ if g[\"id\"] not in ignore\n+ ]\n+ groupResults.sort(\n+ key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n+ )\n \n if searchUsers:\n- userResults = searchView.merge(\n- chain(\n- *[\n- searchView.searchUsers(**{field: searchString})\n- for field in [\"login\", \"fullname\", \"email\"]\n- ]\n- ),\n- \"userid\",\n- )\n- userResults = [\n- mtool.getMemberById(u[\"id\"])\n- for u in userResults\n- if u[\"id\"] not in ignore\n- ]\n- userResults.sort(\n- key=lambda x: x is not None\n- and x.getProperty(\"fullname\") is not None\n- and normalizeString(x.getProperty(\"fullname\"))\n- or \"\"\n- )\n+ # Only search for all ('') if the many_users flag is not set.\n+ if not (self.many_users) or bool(self.searchString):\n+ userResults = searchView.merge(\n+ chain(\n+ *[\n+ searchView.searchUsers(**{field: searchString})\n+ for field in [\"login\", \"fullname\", \"email\"]\n+ ]\n+ ),\n+ \"userid\",\n+ )\n+ userResults = [\n+ mtool.getMemberById(u[\"id\"])\n+ for u in userResults\n+ if u[\"id\"] not in ignore\n+ ]\n+ userResults.sort(\n+ key=lambda x: x is not None\n+ and x.getProperty(\"fullname\") is not None\n+ and normalizeString(x.getProperty(\"fullname\"))\n+ or \"\"\n+ )\n \n return groupResults + userResults\n", "issue": "Can't add an user in UserGroupsControlpanelView to a Group with pas.plugin.ldap with plone.many_users=True\n### What I did:\r\n\r\n- add plone group `testgroup1` via /@@usergroup-groupprefs\r\n- set registry entries for `plone.many_users` and `plone.many_groups` to `True`\r\n- add a Folder\r\n- go to the folder and call the sharing view /myfolder/@@sharing\r\n- add the group `testgroup1` with reader, editor roles\r\n- click on the group to add some users `/@@usergroup-groupmembership?groupname=testgroup1`\r\n- search a user\r\n- select a user (should be a LDAP User) and save\r\n\r\n### What I expect to happen:\r\n- the user is added to the group\r\n\r\n### What actually happened:\r\n- endless waiting for response after click on \"Add\" Button\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\n- Plone 6.0.4\r\n- pas.plugin.ldap 1.8.2\r\n- node.ext.ldap 1.2\r\n- node 1.2.1\r\n- LDAP Backend user objects > 10000\r\n\r\n### Some investigations\r\n\r\n- in the ZMI acl_users -> source_groups the action is possible without errors\r\n- with loglevel DEBUG i see thousands of queries to the LDAP Backend\r\n- the task run many minutes\r\n- its a limitation of [pas.plugin.ldap](https://github.com/collective/pas.plugins.ldap#limitations-and-future-optimizations)\r\n \r\n\r\nthe [GroupMembershipControlPanel](https://github.com/plone/Products.CMFPlone/blob/a5b48c0c24e6eebbe01aa2874eaaa9aa3d49f155/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py#L65) clean the searchstring in the request and pass it to the membership search in UsersGroupsControlPanelView.\r\n\r\nAn empty searchstring is like a intention \"list all users\" and a violation against the option `many_users = True`. The search with empty searchstring should not be performed.\r\n\r\nWith the following patch, all is fine ;-)\r\n\r\n```\r\nif searchGroups:\r\n if not self.many_groups or bool(searchString):\r\n groupResults = searchView.merge(chain(*[searchView.searchGroups(**{field: searchString}) for field in ['id', 'title']]), 'groupid')\r\n groupResults = [gtool.getGroupById(g['id']) for g in groupResults if g['id'] not in ignore]\r\n groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))\r\n \r\n if searchUsers:\r\n if not self.many_users or bool(searchString):\r\n userResults = searchView.merge(chain(*[searchView.searchUsers(**{field: searchString}) for field in ['login', 'fullname', 'email']]), 'userid')\r\n userResults = [mtool.getMemberById(u['id']) for u in userResults if u['id'] not in ignore]\r\n userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')\r\n```\r\n\n", "before_files": [{"content": "from AccessControl import getSecurityManager\nfrom Acquisition import aq_inner\nfrom itertools import chain\nfrom plone.autoform.form import AutoExtensibleForm\nfrom plone.base import PloneMessageFactory as _\nfrom plone.base.interfaces import ISecuritySchema\nfrom plone.base.interfaces import IUserGroupsSettingsSchema\nfrom plone.z3cform import layout\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.utils import normalizeString\nfrom Products.Five.browser import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom z3c.form import button\nfrom z3c.form import form\nfrom zope.component import getAdapter\nfrom zope.component import getMultiAdapter\nfrom ZTUtils import make_query\n\n\nclass UserGroupsSettingsControlPanel(AutoExtensibleForm, form.EditForm):\n schema = IUserGroupsSettingsSchema\n id = \"usergroupsettings-control-panel\"\n label = _(\"Users and Groups\")\n form_name = _(\"User/Groups settings\")\n control_panel_view = \"usergroups-controlpanel\"\n\n @button.buttonAndHandler(_(\"label_save\", default=\"Save\"), name=\"save\")\n def handleApply(self, action):\n super().handleApply(self, action)\n\n def updateActions(self):\n super().updateActions()\n if self.actions and \"save\" in self.actions:\n self.actions[\"save\"].addClass(\"btn-primary\")\n\n\nclass ControlPanelFormWrapper(layout.FormWrapper):\n \"\"\"Use this form as the plone.z3cform layout wrapper to get the control\n panel layout.\n \"\"\"\n\n index = ViewPageTemplateFile(\"controlpanel_usergroups_layout.pt\")\n\n\nUserGroupsSettingsPanelView = layout.wrap_form(\n UserGroupsSettingsControlPanel, ControlPanelFormWrapper\n)\n\n\nclass UsersGroupsControlPanelView(BrowserView):\n @property\n def portal_roles(self):\n pmemb = getToolByName(aq_inner(self.context), \"portal_membership\")\n return [r for r in pmemb.getPortalRoles() if r != \"Owner\"]\n\n @property\n def many_users(self):\n return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_users\n\n @property\n def many_groups(self):\n return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_groups\n\n @property\n def email_as_username(self):\n return getAdapter(\n aq_inner(self.context), ISecuritySchema\n ).get_use_email_as_login()\n\n def makeQuery(self, **kw):\n return make_query(**kw)\n\n def membershipSearch(\n self, searchString=\"\", searchUsers=True, searchGroups=True, ignore=[]\n ):\n \"\"\"Search for users and/or groups, returning actual member and group items\n Replaces the now-deprecated prefs_user_groups_search.py script\"\"\"\n groupResults = userResults = []\n\n gtool = getToolByName(self, \"portal_groups\")\n mtool = getToolByName(self, \"portal_membership\")\n\n searchView = getMultiAdapter(\n (aq_inner(self.context), self.request), name=\"pas_search\"\n )\n\n if searchGroups:\n groupResults = searchView.merge(\n chain(\n *[\n searchView.searchGroups(**{field: searchString})\n for field in [\"id\", \"title\"]\n ]\n ),\n \"groupid\",\n )\n groupResults = [\n gtool.getGroupById(g[\"id\"])\n for g in groupResults\n if g[\"id\"] not in ignore\n ]\n groupResults.sort(\n key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n )\n\n if searchUsers:\n userResults = searchView.merge(\n chain(\n *[\n searchView.searchUsers(**{field: searchString})\n for field in [\"login\", \"fullname\", \"email\"]\n ]\n ),\n \"userid\",\n )\n userResults = [\n mtool.getMemberById(u[\"id\"])\n for u in userResults\n if u[\"id\"] not in ignore\n ]\n userResults.sort(\n key=lambda x: x is not None\n and x.getProperty(\"fullname\") is not None\n and normalizeString(x.getProperty(\"fullname\"))\n or \"\"\n )\n\n return groupResults + userResults\n\n def atoi(self, s):\n try:\n return int(s)\n except ValueError:\n return 0\n\n @property\n def is_zope_manager(self):\n return getSecurityManager().checkPermission(ManagePortal, self.context)\n\n # The next two class methods implement the following truth table:\n #\n # MANY USERS/GROUPS SEARCHING CAN LIST USERS/GROUPS RESULT\n # False False False Lists unavailable\n # False False True Show all\n # False True False Show matching\n # False True True Show matching\n # True False False Too many to list\n # True False True Lists unavailable\n # True True False Show matching\n # True True True Show matching\n\n # TODO: Maybe have these methods return a text message (instead of a bool)\n # corresponding to the actual result, e.g. \"Too many to list\", \"Lists\n # unavailable\"\n\n @property\n def show_group_listing_warning(self):\n if not self.searchString:\n acl = getToolByName(self, \"acl_users\")\n if acl.canListAllGroups():\n if self.many_groups:\n return True\n return False\n\n @property\n def show_users_listing_warning(self):\n if not self.searchString:\n acl = getToolByName(self, \"acl_users\")\n # XXX Huh? Is canListAllUsers broken?\n if not acl.canListAllUsers():\n if self.many_users:\n return True\n return False\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups.py"}]}
| 2,917 | 697 |
gh_patches_debug_32749
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-3083
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue on calling action-alias match in CLI
calling
`st2 action-alias match "st2 pack install" `
Gives the error
```
2016-11-30 04:02:38,984 140340458909360 INFO hooks [-] 25e7cbee-ec68-4040-9adc-f3f82f11774c - POST /v1/actionalias/match with filters={} (remote_addr='127.0.0.1',method='POST',filters={},request_id='25e7cbee-ec68-4040-9adc-f3f82f11774c',path='/v1/actionalias/match')
2016-11-30 04:02:38,985 140340458909360 AUDIT auth [-] Token provided in headers
2016-11-30 04:02:38,986 140340458909360 AUDIT auth [-] Token with id "583e4f54c4da5f27da3c32ea" is validated.
2016-11-30 04:02:38,990 140340458909360 WARNING base [-] Type definition for 'action_alias_match_api' argument of 'match' is missing.
2016-11-30 04:02:38,990 140340458909360 INFO resource [-] GET all /v1/actionalias/match with filters={'order_by': ['pack', 'name']} (offset=0,limit='None',filters={'order_by': ['pack', 'name']},sort=[])
2016-11-30 04:02:38,996 140340458909360 ERROR hooks [-] API call failed: 'representation'
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 631, in __call__
self.invoke_controller(controller, args, kwargs, state)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 531, in invoke_controller
result = controller(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py", line 61, in func_wrapper
return func(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py", line 284, in callfunction
raise e
KeyError: 'representation' (_exception_data={},_exception_class='KeyError',_exception_message="'representation'")
2016-11-30 04:02:38,997 140340458909360 ERROR hooks [-] Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 631, in __call__
self.invoke_controller(controller, args, kwargs, state)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 531, in invoke_controller
result = controller(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py", line 61, in func_wrapper
return func(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py", line 284, in callfunction
raise e
KeyError: 'representation'
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 631, in __call__
self.invoke_controller(controller, args, kwargs, state)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 531, in invoke_controller
result = controller(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py", line 61, in func_wrapper
return func(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py", line 284, in callfunction
raise e
KeyError: 'representation'
2016-11-30 04:02:38,997 140340458909360 INFO hooks [-] 25e7cbee-ec68-4040-9adc-f3f82f11774c - POST /v1/actionalias/match result={
"faultstring": "Internal Server Error"
} (result='{\n "faultstring": "Internal Server Error"\n}',request_id='25e7cbee-ec68-4040-9adc-f3f82f11774c',status_code='500 Internal Server Error',remote_addr='127.0.0.1',path='/v1/actionalias/match',method='POST')
```
</issue>
<code>
[start of st2common/st2common/util/actionalias_matching.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import six
17
18 from st2common.exceptions.content import ParseException
19 from st2common.models.utils.action_alias_utils import extract_parameters
20
21 __all__ = [
22 'list_format_strings_from_aliases',
23 'normalise_alias_format_string',
24 'match_command_to_alias'
25 ]
26
27
28 def list_format_strings_from_aliases(aliases):
29 '''
30 List patterns from a collection of alias objects
31
32 :param aliases: The list of aliases
33 :type aliases: ``list`` of :class:`st2common.models.api.action.ActionAliasAPI`
34
35 :return: A description of potential execution patterns in a list of aliases.
36 :rtype: ``list`` of ``list``
37 '''
38 patterns = []
39 for alias in aliases:
40 for format_ in alias.formats:
41 display, representations = normalise_alias_format_string(format_)
42 patterns.extend([(display, representation) for representation in representations])
43 return patterns
44
45
46 def normalise_alias_format_string(alias_format):
47 '''
48 StackStorm action aliases can have two types;
49 1. A simple string holding the format
50 2. A dictionary which hold numerous alias format "representation(s)"
51 With a single "display" for help about the action alias.
52 This function processes both forms and returns a standardized form.
53
54 :param alias_format: The alias format
55 :type alias_format: ``str`` or ``dict``
56
57 :return: The representation of the alias
58 :rtype: ``tuple`` of (``str``, ``str``)
59 '''
60 display = None
61 representation = []
62
63 if isinstance(alias_format, six.string_types):
64 display = alias_format
65 representation.append(alias_format)
66 elif isinstance(alias_format, dict):
67 display = alias_format['display']
68 representation = alias_format['representation']
69 else:
70 raise TypeError("alias_format '%s' is neither a dictionary or string type."
71 % repr(alias_format))
72 return (display, representation)
73
74
75 def match_command_to_alias(command, aliases):
76 """
77 Match the text against an action and return the action reference.
78 """
79 results = []
80
81 for alias in aliases:
82 format_strings = list_format_strings_from_aliases([alias])
83 for format_string in format_strings:
84 try:
85 extract_parameters(format_str=format_string[1],
86 param_stream=command)
87 except ParseException:
88 continue
89
90 results.append((alias, format_string[0], format_string[1]))
91 return results
92
[end of st2common/st2common/util/actionalias_matching.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2common/st2common/util/actionalias_matching.py b/st2common/st2common/util/actionalias_matching.py
--- a/st2common/st2common/util/actionalias_matching.py
+++ b/st2common/st2common/util/actionalias_matching.py
@@ -39,16 +39,20 @@
for alias in aliases:
for format_ in alias.formats:
display, representations = normalise_alias_format_string(format_)
- patterns.extend([(display, representation) for representation in representations])
+ if display and len(representations) == 0:
+ patterns.extend([(display, [])])
+ else:
+ patterns.extend([(display, representation) for representation in representations])
return patterns
def normalise_alias_format_string(alias_format):
'''
- StackStorm action aliases can have two types;
- 1. A simple string holding the format
- 2. A dictionary which hold numerous alias format "representation(s)"
- With a single "display" for help about the action alias.
+ StackStorm action aliases come in two forms;
+ 1. A string holding the format, which is also used as the help string.
+ 2. A dictionary containing "display" and/or "representation" keys.
+ "representation": a list of numerous alias format "representation(s)"
+ "display": a help string to be displayed.
This function processes both forms and returns a standardized form.
:param alias_format: The alias format
@@ -64,8 +68,10 @@
display = alias_format
representation.append(alias_format)
elif isinstance(alias_format, dict):
- display = alias_format['display']
- representation = alias_format['representation']
+ display = alias_format.get('display')
+ representation = alias_format.get('representation') or []
+ if isinstance(representation, six.string_types):
+ representation = [representation]
else:
raise TypeError("alias_format '%s' is neither a dictionary or string type."
% repr(alias_format))
|
{"golden_diff": "diff --git a/st2common/st2common/util/actionalias_matching.py b/st2common/st2common/util/actionalias_matching.py\n--- a/st2common/st2common/util/actionalias_matching.py\n+++ b/st2common/st2common/util/actionalias_matching.py\n@@ -39,16 +39,20 @@\n for alias in aliases:\n for format_ in alias.formats:\n display, representations = normalise_alias_format_string(format_)\n- patterns.extend([(display, representation) for representation in representations])\n+ if display and len(representations) == 0:\n+ patterns.extend([(display, [])])\n+ else:\n+ patterns.extend([(display, representation) for representation in representations])\n return patterns\n \n \n def normalise_alias_format_string(alias_format):\n '''\n- StackStorm action aliases can have two types;\n- 1. A simple string holding the format\n- 2. A dictionary which hold numerous alias format \"representation(s)\"\n- With a single \"display\" for help about the action alias.\n+ StackStorm action aliases come in two forms;\n+ 1. A string holding the format, which is also used as the help string.\n+ 2. A dictionary containing \"display\" and/or \"representation\" keys.\n+ \"representation\": a list of numerous alias format \"representation(s)\"\n+ \"display\": a help string to be displayed.\n This function processes both forms and returns a standardized form.\n \n :param alias_format: The alias format\n@@ -64,8 +68,10 @@\n display = alias_format\n representation.append(alias_format)\n elif isinstance(alias_format, dict):\n- display = alias_format['display']\n- representation = alias_format['representation']\n+ display = alias_format.get('display')\n+ representation = alias_format.get('representation') or []\n+ if isinstance(representation, six.string_types):\n+ representation = [representation]\n else:\n raise TypeError(\"alias_format '%s' is neither a dictionary or string type.\"\n % repr(alias_format))\n", "issue": "Issue on calling action-alias match in CLI\ncalling \r\n`st2 action-alias match \"st2 pack install\" `\r\nGives the error\r\n```\r\n2016-11-30 04:02:38,984 140340458909360 INFO hooks [-] 25e7cbee-ec68-4040-9adc-f3f82f11774c - POST /v1/actionalias/match with filters={} (remote_addr='127.0.0.1',method='POST',filters={},request_id='25e7cbee-ec68-4040-9adc-f3f82f11774c',path='/v1/actionalias/match')\r\n2016-11-30 04:02:38,985 140340458909360 AUDIT auth [-] Token provided in headers\r\n2016-11-30 04:02:38,986 140340458909360 AUDIT auth [-] Token with id \"583e4f54c4da5f27da3c32ea\" is validated.\r\n2016-11-30 04:02:38,990 140340458909360 WARNING base [-] Type definition for 'action_alias_match_api' argument of 'match' is missing.\r\n2016-11-30 04:02:38,990 140340458909360 INFO resource [-] GET all /v1/actionalias/match with filters={'order_by': ['pack', 'name']} (offset=0,limit='None',filters={'order_by': ['pack', 'name']},sort=[])\r\n2016-11-30 04:02:38,996 140340458909360 ERROR hooks [-] API call failed: 'representation'\r\nTraceback (most recent call last):\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 631, in __call__\r\n self.invoke_controller(controller, args, kwargs, state)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 531, in invoke_controller\r\n result = controller(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py\", line 61, in func_wrapper\r\n return func(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py\", line 284, in callfunction\r\n raise e\r\nKeyError: 'representation' (_exception_data={},_exception_class='KeyError',_exception_message=\"'representation'\")\r\n2016-11-30 04:02:38,997 140340458909360 ERROR hooks [-] Traceback (most recent call last):\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 631, in __call__\r\n self.invoke_controller(controller, args, kwargs, state)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 531, in invoke_controller\r\n result = controller(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py\", line 61, in func_wrapper\r\n return func(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py\", line 284, in callfunction\r\n raise e\r\nKeyError: 'representation'\r\nTraceback (most recent call last):\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 631, in __call__\r\n self.invoke_controller(controller, args, kwargs, state)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 531, in invoke_controller\r\n result = controller(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py\", line 61, in func_wrapper\r\n return func(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py\", line 284, in callfunction\r\n raise e\r\nKeyError: 'representation'\r\n2016-11-30 04:02:38,997 140340458909360 INFO hooks [-] 25e7cbee-ec68-4040-9adc-f3f82f11774c - POST /v1/actionalias/match result={\r\n \"faultstring\": \"Internal Server Error\"\r\n} (result='{\\n \"faultstring\": \"Internal Server Error\"\\n}',request_id='25e7cbee-ec68-4040-9adc-f3f82f11774c',status_code='500 Internal Server Error',remote_addr='127.0.0.1',path='/v1/actionalias/match',method='POST')\r\n```\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\n\nfrom st2common.exceptions.content import ParseException\nfrom st2common.models.utils.action_alias_utils import extract_parameters\n\n__all__ = [\n 'list_format_strings_from_aliases',\n 'normalise_alias_format_string',\n 'match_command_to_alias'\n]\n\n\ndef list_format_strings_from_aliases(aliases):\n '''\n List patterns from a collection of alias objects\n\n :param aliases: The list of aliases\n :type aliases: ``list`` of :class:`st2common.models.api.action.ActionAliasAPI`\n\n :return: A description of potential execution patterns in a list of aliases.\n :rtype: ``list`` of ``list``\n '''\n patterns = []\n for alias in aliases:\n for format_ in alias.formats:\n display, representations = normalise_alias_format_string(format_)\n patterns.extend([(display, representation) for representation in representations])\n return patterns\n\n\ndef normalise_alias_format_string(alias_format):\n '''\n StackStorm action aliases can have two types;\n 1. A simple string holding the format\n 2. A dictionary which hold numerous alias format \"representation(s)\"\n With a single \"display\" for help about the action alias.\n This function processes both forms and returns a standardized form.\n\n :param alias_format: The alias format\n :type alias_format: ``str`` or ``dict``\n\n :return: The representation of the alias\n :rtype: ``tuple`` of (``str``, ``str``)\n '''\n display = None\n representation = []\n\n if isinstance(alias_format, six.string_types):\n display = alias_format\n representation.append(alias_format)\n elif isinstance(alias_format, dict):\n display = alias_format['display']\n representation = alias_format['representation']\n else:\n raise TypeError(\"alias_format '%s' is neither a dictionary or string type.\"\n % repr(alias_format))\n return (display, representation)\n\n\ndef match_command_to_alias(command, aliases):\n \"\"\"\n Match the text against an action and return the action reference.\n \"\"\"\n results = []\n\n for alias in aliases:\n format_strings = list_format_strings_from_aliases([alias])\n for format_string in format_strings:\n try:\n extract_parameters(format_str=format_string[1],\n param_stream=command)\n except ParseException:\n continue\n\n results.append((alias, format_string[0], format_string[1]))\n return results\n", "path": "st2common/st2common/util/actionalias_matching.py"}]}
| 2,715 | 438 |
gh_patches_debug_7496
|
rasdani/github-patches
|
git_diff
|
GoogleCloudPlatform__professional-services-326
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Asset exporter tool - getting ImportError in GAE
Just tried to set up from scratch in new project. Followed steps from readme
When running the cron job I get this
ImportError: cannot import name 'expr_pb2' from 'google.type' (/env/lib/python3.7/site-packages/google/type/__init__.py)
at <module> (/env/lib/python3.7/site-packages/google/iam/v1/policy_pb2.py:16)
at <module> (/env/lib/python3.7/site-packages/google/iam/v1/iam_policy_pb2.py:17)
at <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/proto/assets_pb2.py:19)
at <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/proto/asset_service_pb2.py:20)
at <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/types.py:23)
at <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/__init__.py:20)
at <module> (/srv/lib/asset_inventory/export.py:33)
at <module> (/srv/main.py:45)
at import_app (/env/lib/python3.7/site-packages/gunicorn/util.py:350)
at load_wsgiapp (/env/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py:41)
at load (/env/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py:52)
at wsgi (/env/lib/python3.7/site-packages/gunicorn/app/base.py:67)
at load_wsgi (/env/lib/python3.7/site-packages/gunicorn/workers/base.py:138)
at init_process (/env/lib/python3.7/site-packages/gunicorn/workers/base.py:129)
at init_process (/env/lib/python3.7/site-packages/gunicorn/workers/gthread.py:104)
at spawn_worker (/env/lib/python3.7/site-packages/gunicorn/arbiter.py:583)
</issue>
<code>
[start of tools/asset-inventory/asset_inventory/pipeline_runner.py]
1 # Copyright 2019 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Code to invoke the pipeline."""
15
16 import logging
17 import pprint
18 import time
19
20 from googleapiclient.discovery import build
21 from oauth2client.client import GoogleCredentials
22
23
24 def get_job_name(load_time):
25 """User friendly job name from load_time."""
26 return ('cloud-asset-import-' + load_time.lower().replace(
27 ':', '-').replace(' ', '').replace('.', '-'))
28
29
30 def is_successful_state(final_state):
31 """True if the status is successful.
32
33 Checks both for beam and template runner success codes.
34
35 Args:
36 final_state: Final state the pipeline is in.
37
38 Returns:
39 True if the job was successful.
40 """
41 if final_state not in ['JOB_STATE_DONE', 'DONE']:
42 return False
43 return True
44
45
46 def wait_on_pipeline_job(df_service, pipeline_job):
47 """Poll the job status every 60 seconds until done."""
48 dataflow_project = pipeline_job['projectId']
49 template_region = pipeline_job['location']
50 job_id = pipeline_job['id']
51 pipeline_job = df_service.projects().locations().jobs().get(
52 location=template_region, projectId=dataflow_project,
53 jobId=job_id).execute(num_retries=5)
54 logging.info('job status %s', pprint.pformat(pipeline_job))
55 current_state = pipeline_job['currentState']
56 # We have reached a terminal state.
57 if current_state in [
58 'JOB_STATE_DONE', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED',
59 'JOB_STATE_UPDATED', 'JOB_STATE_DRAINED'
60 ]:
61 logging.info('final pipeline state : %s', current_state)
62 return current_state, pipeline_job
63 logging.info('sleeping 60 seconds before repolling.')
64 time.sleep(60)
65 return wait_on_pipeline_job(df_service, pipeline_job)
66
67
68 def run_pipeline_template(dataflow_project, template_region, template_location,
69 input_location, group_by, write_disposition, dataset,
70 stage, load_time, num_shards, runtime_environment):
71 """Invoke the suplied pipeline template.
72
73 Args:
74 dataflow_project: Project to run the dataflow job in.
75 template_region: Region to run the job in.
76 template_location: GCS path to the template file.
77 input_location: GCS path load json documents from,
78 group_by: How to split assets into tables.
79 write_disposition: To append to or ovewrite BigQuery tables.
80 dataset: BigQuery dataset to write to.
81 stage: GCS path to write BigQuery load files.
82 load_time: Timestamp or date to load data with.
83 num_shards: Shards for for each asset type.
84 runtime_environment: Dict suppling other runtime overrides.
85 Returns:
86 End state of the pipline and job object.
87 """
88 credentials = GoogleCredentials.get_application_default()
89 df_service = build('dataflow', 'v1b3', credentials=credentials)
90
91 # Set the following variables to your values.
92 job_name = get_job_name(load_time)
93 body = {
94 'jobName': job_name,
95 'parameters': {
96 'input': input_location,
97 'load_time': load_time,
98 'stage': stage,
99 'group_by': group_by,
100 'write_disposition': write_disposition,
101 'num_shards': num_shards,
102 'dataset': dataset,
103 },
104 'environment': runtime_environment
105 }
106 logging.info('launching template %s in %s:%s with %s', template_location,
107 dataflow_project, template_region, pprint.pformat(body))
108 launch_result = df_service.projects().locations().templates().launch(
109 location=template_region,
110 projectId=dataflow_project,
111 gcsPath=template_location,
112 body=body).execute(num_retries=5)
113
114 logging.info('waiting on pipeline : %s', pprint.pformat(launch_result))
115 return wait_on_pipeline_job(df_service, launch_result['job'])
116
117
118 def run_pipeline_beam_runner(pipeline_runner, dataflow_project, input_location,
119 group_by, write_disposition, dataset, stage,
120 load_time, num_shards, pipeline_arguments):
121 """Invokes the pipeline with a beam runner.
122
123 Only tested with the dataflow and direct runners.
124
125 Args:
126 pipeline_runner: The Beam runner to use.
127 dataflow_project: Project to run the dataflow job in.
128 input_location: GCS path load json documents from,
129 group_by: How to split assets into tables.
130 write_disposition: To append to or ovewrite BigQuery tables.
131 dataset: BigQuery dataset to write to.
132 stage: GCS path to write BigQuery load files.
133 load_time: Timestamp to add to data during during BigQuery load.
134 num_shards: Shards for for each asset type.
135 pipeline_arguments: List of additional runner arguments.
136 Returns:
137 The end state of the pipeline run (a string), and PipelineResult.
138 """
139
140 # pylint: disable=import-error
141 # import on demand as we don't want to depend on pipeline code which imports
142 # apache beam code unless we are using a beam runner and not invoking a
143 # template.
144 from asset_inventory import import_pipeline
145 job_name = get_job_name(load_time)
146
147 pipeline_parameters = pipeline_arguments
148
149 parameters = {
150 '--load_time': load_time,
151 '--job_name': job_name,
152 '--project': dataflow_project,
153 '--input': input_location,
154 '--group_by': group_by,
155 '--write_disposition': write_disposition,
156 '--num_shards': num_shards,
157 '--dataset': dataset,
158 '--stage': stage,
159 '--runner': pipeline_runner
160 }
161 for arg_name, value in parameters.items():
162 if value and arg_name not in pipeline_parameters:
163 pipeline_parameters += [arg_name, value]
164 pipeline_result = import_pipeline.run(pipeline_parameters)
165 logging.info('waiting on pipeline : %s', pprint.pformat(pipeline_result))
166 state = pipeline_result.wait_until_finish()
167 logging.info('final pipeline state: %s', state)
168 return pipeline_result.state, pipeline_result
169
[end of tools/asset-inventory/asset_inventory/pipeline_runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/asset-inventory/asset_inventory/pipeline_runner.py b/tools/asset-inventory/asset_inventory/pipeline_runner.py
--- a/tools/asset-inventory/asset_inventory/pipeline_runner.py
+++ b/tools/asset-inventory/asset_inventory/pipeline_runner.py
@@ -86,7 +86,8 @@
End state of the pipline and job object.
"""
credentials = GoogleCredentials.get_application_default()
- df_service = build('dataflow', 'v1b3', credentials=credentials)
+ df_service = build('dataflow', 'v1b3', credentials=credentials,
+ cache_discovery=False)
# Set the following variables to your values.
job_name = get_job_name(load_time)
|
{"golden_diff": "diff --git a/tools/asset-inventory/asset_inventory/pipeline_runner.py b/tools/asset-inventory/asset_inventory/pipeline_runner.py\n--- a/tools/asset-inventory/asset_inventory/pipeline_runner.py\n+++ b/tools/asset-inventory/asset_inventory/pipeline_runner.py\n@@ -86,7 +86,8 @@\n End state of the pipline and job object.\n \"\"\"\n credentials = GoogleCredentials.get_application_default()\n- df_service = build('dataflow', 'v1b3', credentials=credentials)\n+ df_service = build('dataflow', 'v1b3', credentials=credentials,\n+ cache_discovery=False)\n \n # Set the following variables to your values.\n job_name = get_job_name(load_time)\n", "issue": "Asset exporter tool - getting ImportError in GAE \nJust tried to set up from scratch in new project. Followed steps from readme\r\nWhen running the cron job I get this\r\n\r\nImportError: cannot import name 'expr_pb2' from 'google.type' (/env/lib/python3.7/site-packages/google/type/__init__.py)\r\nat <module> (/env/lib/python3.7/site-packages/google/iam/v1/policy_pb2.py:16)\r\nat <module> (/env/lib/python3.7/site-packages/google/iam/v1/iam_policy_pb2.py:17)\r\nat <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/proto/assets_pb2.py:19)\r\nat <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/proto/asset_service_pb2.py:20)\r\nat <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/types.py:23)\r\nat <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/__init__.py:20)\r\nat <module> (/srv/lib/asset_inventory/export.py:33)\r\nat <module> (/srv/main.py:45)\r\nat import_app (/env/lib/python3.7/site-packages/gunicorn/util.py:350)\r\nat load_wsgiapp (/env/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py:41)\r\nat load (/env/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py:52)\r\nat wsgi (/env/lib/python3.7/site-packages/gunicorn/app/base.py:67)\r\nat load_wsgi (/env/lib/python3.7/site-packages/gunicorn/workers/base.py:138)\r\nat init_process (/env/lib/python3.7/site-packages/gunicorn/workers/base.py:129)\r\nat init_process (/env/lib/python3.7/site-packages/gunicorn/workers/gthread.py:104)\r\nat spawn_worker (/env/lib/python3.7/site-packages/gunicorn/arbiter.py:583)\n", "before_files": [{"content": "# Copyright 2019 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Code to invoke the pipeline.\"\"\"\n\nimport logging\nimport pprint\nimport time\n\nfrom googleapiclient.discovery import build\nfrom oauth2client.client import GoogleCredentials\n\n\ndef get_job_name(load_time):\n \"\"\"User friendly job name from load_time.\"\"\"\n return ('cloud-asset-import-' + load_time.lower().replace(\n ':', '-').replace(' ', '').replace('.', '-'))\n\n\ndef is_successful_state(final_state):\n \"\"\"True if the status is successful.\n\n Checks both for beam and template runner success codes.\n\n Args:\n final_state: Final state the pipeline is in.\n\n Returns:\n True if the job was successful.\n \"\"\"\n if final_state not in ['JOB_STATE_DONE', 'DONE']:\n return False\n return True\n\n\ndef wait_on_pipeline_job(df_service, pipeline_job):\n \"\"\"Poll the job status every 60 seconds until done.\"\"\"\n dataflow_project = pipeline_job['projectId']\n template_region = pipeline_job['location']\n job_id = pipeline_job['id']\n pipeline_job = df_service.projects().locations().jobs().get(\n location=template_region, projectId=dataflow_project,\n jobId=job_id).execute(num_retries=5)\n logging.info('job status %s', pprint.pformat(pipeline_job))\n current_state = pipeline_job['currentState']\n # We have reached a terminal state.\n if current_state in [\n 'JOB_STATE_DONE', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED',\n 'JOB_STATE_UPDATED', 'JOB_STATE_DRAINED'\n ]:\n logging.info('final pipeline state : %s', current_state)\n return current_state, pipeline_job\n logging.info('sleeping 60 seconds before repolling.')\n time.sleep(60)\n return wait_on_pipeline_job(df_service, pipeline_job)\n\n\ndef run_pipeline_template(dataflow_project, template_region, template_location,\n input_location, group_by, write_disposition, dataset,\n stage, load_time, num_shards, runtime_environment):\n \"\"\"Invoke the suplied pipeline template.\n\n Args:\n dataflow_project: Project to run the dataflow job in.\n template_region: Region to run the job in.\n template_location: GCS path to the template file.\n input_location: GCS path load json documents from,\n group_by: How to split assets into tables.\n write_disposition: To append to or ovewrite BigQuery tables.\n dataset: BigQuery dataset to write to.\n stage: GCS path to write BigQuery load files.\n load_time: Timestamp or date to load data with.\n num_shards: Shards for for each asset type.\n runtime_environment: Dict suppling other runtime overrides.\n Returns:\n End state of the pipline and job object.\n \"\"\"\n credentials = GoogleCredentials.get_application_default()\n df_service = build('dataflow', 'v1b3', credentials=credentials)\n\n # Set the following variables to your values.\n job_name = get_job_name(load_time)\n body = {\n 'jobName': job_name,\n 'parameters': {\n 'input': input_location,\n 'load_time': load_time,\n 'stage': stage,\n 'group_by': group_by,\n 'write_disposition': write_disposition,\n 'num_shards': num_shards,\n 'dataset': dataset,\n },\n 'environment': runtime_environment\n }\n logging.info('launching template %s in %s:%s with %s', template_location,\n dataflow_project, template_region, pprint.pformat(body))\n launch_result = df_service.projects().locations().templates().launch(\n location=template_region,\n projectId=dataflow_project,\n gcsPath=template_location,\n body=body).execute(num_retries=5)\n\n logging.info('waiting on pipeline : %s', pprint.pformat(launch_result))\n return wait_on_pipeline_job(df_service, launch_result['job'])\n\n\ndef run_pipeline_beam_runner(pipeline_runner, dataflow_project, input_location,\n group_by, write_disposition, dataset, stage,\n load_time, num_shards, pipeline_arguments):\n \"\"\"Invokes the pipeline with a beam runner.\n\n Only tested with the dataflow and direct runners.\n\n Args:\n pipeline_runner: The Beam runner to use.\n dataflow_project: Project to run the dataflow job in.\n input_location: GCS path load json documents from,\n group_by: How to split assets into tables.\n write_disposition: To append to or ovewrite BigQuery tables.\n dataset: BigQuery dataset to write to.\n stage: GCS path to write BigQuery load files.\n load_time: Timestamp to add to data during during BigQuery load.\n num_shards: Shards for for each asset type.\n pipeline_arguments: List of additional runner arguments.\n Returns:\n The end state of the pipeline run (a string), and PipelineResult.\n \"\"\"\n\n # pylint: disable=import-error\n # import on demand as we don't want to depend on pipeline code which imports\n # apache beam code unless we are using a beam runner and not invoking a\n # template.\n from asset_inventory import import_pipeline\n job_name = get_job_name(load_time)\n\n pipeline_parameters = pipeline_arguments\n\n parameters = {\n '--load_time': load_time,\n '--job_name': job_name,\n '--project': dataflow_project,\n '--input': input_location,\n '--group_by': group_by,\n '--write_disposition': write_disposition,\n '--num_shards': num_shards,\n '--dataset': dataset,\n '--stage': stage,\n '--runner': pipeline_runner\n }\n for arg_name, value in parameters.items():\n if value and arg_name not in pipeline_parameters:\n pipeline_parameters += [arg_name, value]\n pipeline_result = import_pipeline.run(pipeline_parameters)\n logging.info('waiting on pipeline : %s', pprint.pformat(pipeline_result))\n state = pipeline_result.wait_until_finish()\n logging.info('final pipeline state: %s', state)\n return pipeline_result.state, pipeline_result\n", "path": "tools/asset-inventory/asset_inventory/pipeline_runner.py"}]}
| 2,826 | 162 |
gh_patches_debug_19844
|
rasdani/github-patches
|
git_diff
|
lisa-lab__pylearn2-1040
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
target_format.py introduces hard dependence on scipy
scipy imports are all meant to be guarded by try/except statements so you can run the core library without installing scipy
</issue>
<code>
[start of pylearn2/format/target_format.py]
1 """Code for reformatting supervised learning targets."""
2 from operator import mul
3
4 import numpy as np
5 import scipy
6 import scipy.sparse
7 import theano.sparse
8 from theano import tensor, config
9
10
11 class OneHotFormatter(object):
12 """
13 A target formatter that transforms labels from integers in both single
14 and batch mode.
15
16 Parameters
17 ----------
18 max_labels : int
19 The number of possible classes/labels. This means that all labels
20 should be < max_labels. Example: For MNIST there are 10 numbers
21 and hence max_labels = 10.
22 dtype : dtype, optional
23 The desired dtype for the converted one-hot vectors. Defaults to
24 `config.floatX` if not given.
25 """
26 def __init__(self, max_labels, dtype=None):
27 """
28 Initializes the formatter given the number of max labels.
29 """
30 try:
31 np.empty(max_labels)
32 except (ValueError, TypeError):
33 raise ValueError("%s got bad max_labels argument '%s'" %
34 (self.__class__.__name__, str(max_labels)))
35 self._max_labels = max_labels
36 if dtype is None:
37 self._dtype = config.floatX
38 else:
39 try:
40 np.dtype(dtype)
41 except TypeError:
42 raise TypeError("%s got bad dtype identifier %s" %
43 (self.__class__.__name__, str(dtype)))
44 self._dtype = dtype
45
46 def format(self, targets, mode='stack', sparse=False):
47 """
48 Formats a given array of target labels into a one-hot
49 vector. If labels appear multiple times, their value
50 in the one-hot vector is incremented.
51
52 Parameters
53 ----------
54 targets : ndarray
55 A 1D array of targets, or a batch (2D array) where
56 each row is a list of targets.
57 mode : string
58 The way in which to convert the labels to arrays. Takes
59 three different options:
60
61 - "concatenate" : concatenates the one-hot vectors from
62 multiple labels
63 - "stack" : returns a matrix where each row is the
64 one-hot vector of a label
65 - "merge" : merges the one-hot vectors together to
66 form a vector where the elements are
67 the result of an indicator function
68 NB: As the result of an indicator function
69 the result is the same in case a label
70 is duplicated in the input.
71 sparse : bool
72 If true then the return value is sparse matrix. Note that
73 if sparse is True, then mode cannot be 'stack' because
74 sparse matrices need to be 2D
75
76 Returns
77 -------
78 one_hot : a NumPy array (can be 1D-3D depending on settings)
79 where normally the first axis are the different batch items,
80 the second axis the labels, the third axis the one_hot
81 vectors. Can be dense or sparse.
82 """
83 if mode not in ('concatenate', 'stack', 'merge'):
84 raise ValueError("%s got bad mode argument '%s'" %
85 (self.__class__.__name__, str(self._max_labels)))
86 elif mode == 'stack' and sparse:
87 raise ValueError("Sparse matrices need to be 2D, hence they"
88 "cannot be stacked")
89 if targets.ndim > 2:
90 raise ValueError("Targets needs to be 1D or 2D, but received %d "
91 "dimensions" % targets.ndim)
92 if 'int' not in str(targets.dtype):
93 raise TypeError("need an integer array for targets")
94 if sparse:
95 if mode == 'concatenate':
96 one_hot = scipy.sparse.csr_matrix(
97 (np.ones(targets.size, dtype=self._dtype),
98 (targets.flatten() + np.arange(targets.size)
99 * self._max_labels)
100 % (self._max_labels * targets.shape[1]),
101 np.arange(targets.shape[0] + 1) * targets.shape[1]),
102 (targets.shape[0], self._max_labels * targets.shape[1])
103 )
104 elif mode == 'merge':
105 one_hot = scipy.sparse.csr_matrix(
106 (np.ones(targets.size), targets.flatten(),
107 np.arange(targets.shape[0] + 1) * targets.shape[1]),
108 (targets.shape[0], self._max_labels)
109 )
110 else:
111 one_hot = np.zeros(targets.shape + (self._max_labels,),
112 dtype=self._dtype)
113 shape = (np.prod(one_hot.shape[:-1]), one_hot.shape[-1])
114 one_hot.reshape(shape)[np.arange(shape[0]), targets.flatten()] = 1
115 if mode == 'concatenate':
116 shape = one_hot.shape[-3:-2] + (reduce(mul,
117 one_hot.shape[-2:], 1),)
118 one_hot = one_hot.reshape(shape)
119 elif mode == 'merge':
120 one_hot = np.minimum(one_hot.sum(axis=one_hot.ndim - 2), 1)
121 return one_hot
122
123 def theano_expr(self, targets, mode='stack', sparse=False):
124 """
125 Return the one-hot transformation as a symbolic expression.
126 If labels appear multiple times, their value in the one-hot
127 vector is incremented.
128
129 Parameters
130 ----------
131 targets : tensor_like, 1- or 2-dimensional, integer dtype
132 A symbolic tensor representing labels as integers
133 between 0 and `max_labels` - 1, `max_labels` supplied
134 at formatter construction.
135 mode : string
136 The way in which to convert the labels to arrays. Takes
137 three different options:
138
139 - "concatenate" : concatenates the one-hot vectors from
140 multiple labels
141 - "stack" : returns a matrix where each row is the
142 one-hot vector of a label
143 - "merge" : merges the one-hot vectors together to
144 form a vector where the elements are
145 the result of an indicator function
146 NB: As the result of an indicator function
147 the result is the same in case a label
148 is duplicated in the input.
149 sparse : bool
150 If true then the return value is sparse matrix. Note that
151 if sparse is True, then mode cannot be 'stack' because
152 sparse matrices need to be 2D
153
154 Returns
155 -------
156 one_hot : TensorVariable, 1, 2 or 3-dimensional, sparse or dense
157 A symbolic tensor representing a one-hot encoding of the
158 supplied labels.
159 """
160 if mode not in ('concatenate', 'stack', 'merge'):
161 raise ValueError("%s got bad mode argument '%s'" %
162 (self.__class__.__name__, str(self._max_labels)))
163 elif mode == 'stack' and sparse:
164 raise ValueError("Sparse matrices need to be 2D, hence they"
165 "cannot be stacked")
166 squeeze_required = False
167 if targets.ndim != 2:
168 if targets.ndim == 1:
169 squeeze_required = True
170 targets = targets.dimshuffle('x', 0)
171 else:
172 raise ValueError("targets tensor must be 1 or 2-dimensional")
173 if 'int' not in str(targets.dtype):
174 raise TypeError("need an integer tensor for targets")
175 if sparse:
176 if mode == 'concatenate':
177 one_hot = theano.sparse.CSR(
178 tensor.ones_like(targets, dtype=self._dtype).flatten(),
179 (targets.flatten() + tensor.arange(targets.size) *
180 self._max_labels) % (self._max_labels * targets.shape[1]),
181 tensor.arange(targets.shape[0] + 1) * targets.shape[1],
182 tensor.stack(targets.shape[0],
183 self._max_labels * targets.shape[1])
184 )
185 else:
186 one_hot = theano.sparse.CSR(
187 tensor.ones_like(targets, dtype=self._dtype).flatten(),
188 targets.flatten(),
189 tensor.arange(targets.shape[0] + 1) * targets.shape[1],
190 tensor.stack(targets.shape[0], self._max_labels)
191 )
192 else:
193 if mode == 'concatenate':
194 one_hot = tensor.zeros((targets.shape[0] * targets.shape[1],
195 self._max_labels))
196 one_hot = tensor.set_subtensor(
197 one_hot[tensor.arange(targets.size),
198 targets.flatten()], 1)
199 one_hot = one_hot.reshape(
200 (targets.shape[0], targets.shape[1] * self._max_labels)
201 )
202 elif mode == 'merge':
203 one_hot = tensor.zeros((targets.shape[0], self._max_labels))
204 one_hot = tensor.set_subtensor(
205 one_hot[tensor.arange(targets.size) % targets.shape[0],
206 targets.T.flatten()], 1)
207 else:
208 one_hot = tensor.zeros((targets.shape[0], targets.shape[1],
209 self._max_labels))
210 one_hot = tensor.set_subtensor(one_hot[
211 tensor.arange(targets.shape[0]).reshape((targets.shape[0],
212 1)),
213 tensor.arange(targets.shape[1]),
214 targets
215 ], 1)
216 if squeeze_required:
217 if one_hot.ndim == 2:
218 one_hot = one_hot.reshape((one_hot.shape[1],))
219 if one_hot.ndim == 3:
220 one_hot = one_hot.reshape((one_hot.shape[1],
221 one_hot.shape[2]))
222 return one_hot
223
224
225 def convert_to_one_hot(integer_vector, dtype=None, max_labels=None,
226 mode='stack', sparse=False):
227 """
228 Formats a given array of target labels into a one-hot
229 vector.
230
231 Parameters
232 ----------
233 max_labels : int, optional
234 The number of possible classes/labels. This means that
235 all labels should be < max_labels. Example: For MNIST
236 there are 10 numbers and hence max_labels = 10. If not
237 given it defaults to max(integer_vector) + 1.
238 dtype : dtype, optional
239 The desired dtype for the converted one-hot vectors.
240 Defaults to config.floatX if not given.
241 integer_vector : ndarray
242 A 1D array of targets, or a batch (2D array) where
243 each row is a list of targets.
244 mode : string
245 The way in which to convert the labels to arrays. Takes
246 three different options:
247
248 - "concatenate" : concatenates the one-hot vectors from
249 multiple labels
250 - "stack" : returns a matrix where each row is the
251 one-hot vector of a label
252 - "merge" : merges the one-hot vectors together to
253 form a vector where the elements are
254 the result of an indicator function
255 sparse : bool
256 If true then the return value is sparse matrix. Note that
257 if sparse is True, then mode cannot be 'stack' because
258 sparse matrices need to be 2D
259
260 Returns
261 -------
262 one_hot : NumPy array
263 Can be 1D-3D depending on settings. Normally, the first axis are
264 the different batch items, the second axis the labels, the third
265 axis the one_hot vectors. Can be dense or sparse.
266 """
267 if dtype is None:
268 dtype = config.floatX
269 if isinstance(integer_vector, list):
270 integer_vector = np.array(integer_vector)
271 assert np.min(integer_vector) >= 0
272 assert integer_vector.ndim <= 2
273 if max_labels is None:
274 max_labels = max(integer_vector) + 1
275 return OneHotFormatter(max_labels, dtype=dtype).format(
276 integer_vector, mode=mode, sparse=sparse
277 )
278
[end of pylearn2/format/target_format.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pylearn2/format/target_format.py b/pylearn2/format/target_format.py
--- a/pylearn2/format/target_format.py
+++ b/pylearn2/format/target_format.py
@@ -2,9 +2,12 @@
from operator import mul
import numpy as np
-import scipy
-import scipy.sparse
import theano.sparse
+if theano.sparse.enable_sparse:
+ scipy_available = True
+ import scipy.sparse
+else:
+ scipy_available = False
from theano import tensor, config
@@ -92,6 +95,10 @@
if 'int' not in str(targets.dtype):
raise TypeError("need an integer array for targets")
if sparse:
+ if not scipy_available:
+ raise RuntimeError("The converting of indices to a sparse "
+ "one-hot vector requires scipy to be "
+ "installed")
if mode == 'concatenate':
one_hot = scipy.sparse.csr_matrix(
(np.ones(targets.size, dtype=self._dtype),
|
{"golden_diff": "diff --git a/pylearn2/format/target_format.py b/pylearn2/format/target_format.py\n--- a/pylearn2/format/target_format.py\n+++ b/pylearn2/format/target_format.py\n@@ -2,9 +2,12 @@\n from operator import mul\n \n import numpy as np\n-import scipy\n-import scipy.sparse\n import theano.sparse\n+if theano.sparse.enable_sparse:\n+ scipy_available = True\n+ import scipy.sparse\n+else:\n+ scipy_available = False\n from theano import tensor, config\n \n \n@@ -92,6 +95,10 @@\n if 'int' not in str(targets.dtype):\n raise TypeError(\"need an integer array for targets\")\n if sparse:\n+ if not scipy_available:\n+ raise RuntimeError(\"The converting of indices to a sparse \"\n+ \"one-hot vector requires scipy to be \"\n+ \"installed\")\n if mode == 'concatenate':\n one_hot = scipy.sparse.csr_matrix(\n (np.ones(targets.size, dtype=self._dtype),\n", "issue": "target_format.py introduces hard dependence on scipy\nscipy imports are all meant to be guarded by try/except statements so you can run the core library without installing scipy\n\n", "before_files": [{"content": "\"\"\"Code for reformatting supervised learning targets.\"\"\"\nfrom operator import mul\n\nimport numpy as np\nimport scipy\nimport scipy.sparse\nimport theano.sparse\nfrom theano import tensor, config\n\n\nclass OneHotFormatter(object):\n \"\"\"\n A target formatter that transforms labels from integers in both single\n and batch mode.\n\n Parameters\n ----------\n max_labels : int\n The number of possible classes/labels. This means that all labels\n should be < max_labels. Example: For MNIST there are 10 numbers\n and hence max_labels = 10.\n dtype : dtype, optional\n The desired dtype for the converted one-hot vectors. Defaults to\n `config.floatX` if not given.\n \"\"\"\n def __init__(self, max_labels, dtype=None):\n \"\"\"\n Initializes the formatter given the number of max labels.\n \"\"\"\n try:\n np.empty(max_labels)\n except (ValueError, TypeError):\n raise ValueError(\"%s got bad max_labels argument '%s'\" %\n (self.__class__.__name__, str(max_labels)))\n self._max_labels = max_labels\n if dtype is None:\n self._dtype = config.floatX\n else:\n try:\n np.dtype(dtype)\n except TypeError:\n raise TypeError(\"%s got bad dtype identifier %s\" %\n (self.__class__.__name__, str(dtype)))\n self._dtype = dtype\n\n def format(self, targets, mode='stack', sparse=False):\n \"\"\"\n Formats a given array of target labels into a one-hot\n vector. If labels appear multiple times, their value\n in the one-hot vector is incremented.\n\n Parameters\n ----------\n targets : ndarray\n A 1D array of targets, or a batch (2D array) where\n each row is a list of targets.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n NB: As the result of an indicator function\n the result is the same in case a label\n is duplicated in the input.\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : a NumPy array (can be 1D-3D depending on settings)\n where normally the first axis are the different batch items,\n the second axis the labels, the third axis the one_hot\n vectors. Can be dense or sparse.\n \"\"\"\n if mode not in ('concatenate', 'stack', 'merge'):\n raise ValueError(\"%s got bad mode argument '%s'\" %\n (self.__class__.__name__, str(self._max_labels)))\n elif mode == 'stack' and sparse:\n raise ValueError(\"Sparse matrices need to be 2D, hence they\"\n \"cannot be stacked\")\n if targets.ndim > 2:\n raise ValueError(\"Targets needs to be 1D or 2D, but received %d \"\n \"dimensions\" % targets.ndim)\n if 'int' not in str(targets.dtype):\n raise TypeError(\"need an integer array for targets\")\n if sparse:\n if mode == 'concatenate':\n one_hot = scipy.sparse.csr_matrix(\n (np.ones(targets.size, dtype=self._dtype),\n (targets.flatten() + np.arange(targets.size)\n * self._max_labels)\n % (self._max_labels * targets.shape[1]),\n np.arange(targets.shape[0] + 1) * targets.shape[1]),\n (targets.shape[0], self._max_labels * targets.shape[1])\n )\n elif mode == 'merge':\n one_hot = scipy.sparse.csr_matrix(\n (np.ones(targets.size), targets.flatten(),\n np.arange(targets.shape[0] + 1) * targets.shape[1]),\n (targets.shape[0], self._max_labels)\n )\n else:\n one_hot = np.zeros(targets.shape + (self._max_labels,),\n dtype=self._dtype)\n shape = (np.prod(one_hot.shape[:-1]), one_hot.shape[-1])\n one_hot.reshape(shape)[np.arange(shape[0]), targets.flatten()] = 1\n if mode == 'concatenate':\n shape = one_hot.shape[-3:-2] + (reduce(mul,\n one_hot.shape[-2:], 1),)\n one_hot = one_hot.reshape(shape)\n elif mode == 'merge':\n one_hot = np.minimum(one_hot.sum(axis=one_hot.ndim - 2), 1)\n return one_hot\n\n def theano_expr(self, targets, mode='stack', sparse=False):\n \"\"\"\n Return the one-hot transformation as a symbolic expression.\n If labels appear multiple times, their value in the one-hot\n vector is incremented.\n\n Parameters\n ----------\n targets : tensor_like, 1- or 2-dimensional, integer dtype\n A symbolic tensor representing labels as integers\n between 0 and `max_labels` - 1, `max_labels` supplied\n at formatter construction.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n NB: As the result of an indicator function\n the result is the same in case a label\n is duplicated in the input.\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : TensorVariable, 1, 2 or 3-dimensional, sparse or dense\n A symbolic tensor representing a one-hot encoding of the\n supplied labels.\n \"\"\"\n if mode not in ('concatenate', 'stack', 'merge'):\n raise ValueError(\"%s got bad mode argument '%s'\" %\n (self.__class__.__name__, str(self._max_labels)))\n elif mode == 'stack' and sparse:\n raise ValueError(\"Sparse matrices need to be 2D, hence they\"\n \"cannot be stacked\")\n squeeze_required = False\n if targets.ndim != 2:\n if targets.ndim == 1:\n squeeze_required = True\n targets = targets.dimshuffle('x', 0)\n else:\n raise ValueError(\"targets tensor must be 1 or 2-dimensional\")\n if 'int' not in str(targets.dtype):\n raise TypeError(\"need an integer tensor for targets\")\n if sparse:\n if mode == 'concatenate':\n one_hot = theano.sparse.CSR(\n tensor.ones_like(targets, dtype=self._dtype).flatten(),\n (targets.flatten() + tensor.arange(targets.size) *\n self._max_labels) % (self._max_labels * targets.shape[1]),\n tensor.arange(targets.shape[0] + 1) * targets.shape[1],\n tensor.stack(targets.shape[0],\n self._max_labels * targets.shape[1])\n )\n else:\n one_hot = theano.sparse.CSR(\n tensor.ones_like(targets, dtype=self._dtype).flatten(),\n targets.flatten(),\n tensor.arange(targets.shape[0] + 1) * targets.shape[1],\n tensor.stack(targets.shape[0], self._max_labels)\n )\n else:\n if mode == 'concatenate':\n one_hot = tensor.zeros((targets.shape[0] * targets.shape[1],\n self._max_labels))\n one_hot = tensor.set_subtensor(\n one_hot[tensor.arange(targets.size),\n targets.flatten()], 1)\n one_hot = one_hot.reshape(\n (targets.shape[0], targets.shape[1] * self._max_labels)\n )\n elif mode == 'merge':\n one_hot = tensor.zeros((targets.shape[0], self._max_labels))\n one_hot = tensor.set_subtensor(\n one_hot[tensor.arange(targets.size) % targets.shape[0],\n targets.T.flatten()], 1)\n else:\n one_hot = tensor.zeros((targets.shape[0], targets.shape[1],\n self._max_labels))\n one_hot = tensor.set_subtensor(one_hot[\n tensor.arange(targets.shape[0]).reshape((targets.shape[0],\n 1)),\n tensor.arange(targets.shape[1]),\n targets\n ], 1)\n if squeeze_required:\n if one_hot.ndim == 2:\n one_hot = one_hot.reshape((one_hot.shape[1],))\n if one_hot.ndim == 3:\n one_hot = one_hot.reshape((one_hot.shape[1],\n one_hot.shape[2]))\n return one_hot\n\n\ndef convert_to_one_hot(integer_vector, dtype=None, max_labels=None,\n mode='stack', sparse=False):\n \"\"\"\n Formats a given array of target labels into a one-hot\n vector.\n\n Parameters\n ----------\n max_labels : int, optional\n The number of possible classes/labels. This means that\n all labels should be < max_labels. Example: For MNIST\n there are 10 numbers and hence max_labels = 10. If not\n given it defaults to max(integer_vector) + 1.\n dtype : dtype, optional\n The desired dtype for the converted one-hot vectors.\n Defaults to config.floatX if not given.\n integer_vector : ndarray\n A 1D array of targets, or a batch (2D array) where\n each row is a list of targets.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : NumPy array\n Can be 1D-3D depending on settings. Normally, the first axis are\n the different batch items, the second axis the labels, the third\n axis the one_hot vectors. Can be dense or sparse.\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n if isinstance(integer_vector, list):\n integer_vector = np.array(integer_vector)\n assert np.min(integer_vector) >= 0\n assert integer_vector.ndim <= 2\n if max_labels is None:\n max_labels = max(integer_vector) + 1\n return OneHotFormatter(max_labels, dtype=dtype).format(\n integer_vector, mode=mode, sparse=sparse\n )\n", "path": "pylearn2/format/target_format.py"}]}
| 3,757 | 219 |
gh_patches_debug_926
|
rasdani/github-patches
|
git_diff
|
Pyomo__pyomo-429
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Review objects exposed by environ
At the request of @jsiirola after I brought this to his attention, some Pyomo objects are not exposed by environ that would otherwise be expected. One that I have encountered is `TerminationCondition`, which needs to be imported from `pyomo.opt`.
</issue>
<code>
[start of pyomo/environ/__init__.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 import sys as _sys
12 if _sys.version_info[0] >= 3:
13 import importlib
14
15 def _do_import(pkg_name):
16 importlib.import_module(pkg_name)
17 else:
18 def _do_import(pkg_name):
19 __import__(pkg_name, globals(), locals(), [], -1)
20
21 #
22 # These packages contain plugins that need to be loaded
23 #
24 _packages = [
25 'pyomo.opt',
26 'pyomo.core',
27 'pyomo.checker',
28 'pyomo.repn',
29 'pyomo.pysp',
30 'pyomo.neos',
31 'pyomo.solvers',
32 'pyomo.gdp',
33 'pyomo.mpec',
34 'pyomo.dae',
35 'pyomo.bilevel',
36 'pyomo.scripting',
37 ]
38 #
39 #
40 # These packages also contain plugins that need to be loaded, but
41 # we silently ignore any import errors because these
42 # packages are optional and/or under development.
43 #
44 _optional_packages = set([
45 'pyomo.contrib.example',
46 'pyomo.contrib.preprocessing',
47 'pyomo.contrib.gdpopt',
48 'pyomo.contrib.trustregion',
49 ])
50
51
52 def _import_packages():
53 #
54 # Import required packages
55 #
56 for name in _packages:
57 pname = name+'.plugins'
58 try:
59 _do_import(pname)
60 except ImportError:
61 exctype, err, tb = _sys.exc_info() # BUG?
62 import traceback
63 msg = "pyomo.environ failed to import %s:\nOriginal %s: %s\n"\
64 "Traceback:\n%s" \
65 % (pname, exctype.__name__, err,
66 ''.join(traceback.format_tb(tb)),)
67 # clear local variables to remove circular references
68 exctype = err = tb = None
69 # TODO: Should this just log an error and re-raise the
70 # original exception?
71 raise ImportError(msg)
72
73 pkg = _sys.modules[pname]
74 pkg.load()
75 #
76 # Import optional packages
77 #
78 for name in _optional_packages:
79 pname = name+'.plugins'
80 try:
81 _do_import(pname)
82 except ImportError:
83 continue
84 pkg = _sys.modules[pname]
85 pkg.load()
86
87 from pyomo.util.plugin import PluginGlobals as _PG
88 _PG.add_env("pyomo")
89 _import_packages()
90 _PG.pop_env()
91
92 #
93 # Expose the symbols from pyomo.core
94 #
95 from pyomo.core import *
96 from pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver
97
[end of pyomo/environ/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyomo/environ/__init__.py b/pyomo/environ/__init__.py
--- a/pyomo/environ/__init__.py
+++ b/pyomo/environ/__init__.py
@@ -93,4 +93,7 @@
# Expose the symbols from pyomo.core
#
from pyomo.core import *
-from pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver
+from pyomo.opt import (
+ SolverFactory, SolverManagerFactory, UnknownSolver,
+ TerminationCondition, SolverStatus,
+)
|
{"golden_diff": "diff --git a/pyomo/environ/__init__.py b/pyomo/environ/__init__.py\n--- a/pyomo/environ/__init__.py\n+++ b/pyomo/environ/__init__.py\n@@ -93,4 +93,7 @@\n # Expose the symbols from pyomo.core\n #\n from pyomo.core import *\n-from pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver\n+from pyomo.opt import (\n+ SolverFactory, SolverManagerFactory, UnknownSolver,\n+ TerminationCondition, SolverStatus,\n+)\n", "issue": "Review objects exposed by environ\nAt the request of @jsiirola after I brought this to his attention, some Pyomo objects are not exposed by environ that would otherwise be expected. One that I have encountered is `TerminationCondition`, which needs to be imported from `pyomo.opt`.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport sys as _sys\nif _sys.version_info[0] >= 3:\n import importlib\n\n def _do_import(pkg_name):\n importlib.import_module(pkg_name)\nelse:\n def _do_import(pkg_name):\n __import__(pkg_name, globals(), locals(), [], -1)\n\n#\n# These packages contain plugins that need to be loaded\n#\n_packages = [\n 'pyomo.opt',\n 'pyomo.core',\n 'pyomo.checker',\n 'pyomo.repn',\n 'pyomo.pysp',\n 'pyomo.neos',\n 'pyomo.solvers',\n 'pyomo.gdp',\n 'pyomo.mpec',\n 'pyomo.dae',\n 'pyomo.bilevel',\n 'pyomo.scripting',\n]\n#\n#\n# These packages also contain plugins that need to be loaded, but\n# we silently ignore any import errors because these\n# packages are optional and/or under development.\n#\n_optional_packages = set([\n 'pyomo.contrib.example',\n 'pyomo.contrib.preprocessing',\n 'pyomo.contrib.gdpopt',\n 'pyomo.contrib.trustregion',\n])\n\n\ndef _import_packages():\n #\n # Import required packages\n #\n for name in _packages:\n pname = name+'.plugins'\n try:\n _do_import(pname)\n except ImportError:\n exctype, err, tb = _sys.exc_info() # BUG?\n import traceback\n msg = \"pyomo.environ failed to import %s:\\nOriginal %s: %s\\n\"\\\n \"Traceback:\\n%s\" \\\n % (pname, exctype.__name__, err,\n ''.join(traceback.format_tb(tb)),)\n # clear local variables to remove circular references\n exctype = err = tb = None\n # TODO: Should this just log an error and re-raise the\n # original exception?\n raise ImportError(msg)\n\n pkg = _sys.modules[pname]\n pkg.load()\n #\n # Import optional packages\n #\n for name in _optional_packages:\n pname = name+'.plugins'\n try:\n _do_import(pname)\n except ImportError:\n continue\n pkg = _sys.modules[pname]\n pkg.load()\n\nfrom pyomo.util.plugin import PluginGlobals as _PG\n_PG.add_env(\"pyomo\")\n_import_packages()\n_PG.pop_env()\n\n#\n# Expose the symbols from pyomo.core\n#\nfrom pyomo.core import *\nfrom pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver\n", "path": "pyomo/environ/__init__.py"}]}
| 1,432 | 118 |
gh_patches_debug_27512
|
rasdani/github-patches
|
git_diff
|
blakeblackshear__frigate-5133
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Config Support]: Docker fails to start with KeyError: 'go2rtc'
### Describe the problem you are having
Starting an instance of frigate with a minimal config gives error `KeyError: 'go2rtc'`
Config file was based on the startup walk though from DOCs https://deploy-preview-4055--frigate-docs.netlify.app/guides/getting_started
Not sure if its a doc or build issue. Also tried putting in basic restream config and getting same error
### Version
v0.12.0-beta4
### Frigate config file
```yaml
mqtt:
enabled: false
cameras:
camera_1:
ffmpeg:
inputs:
- path: rtsp://10.0.20.102:554/s0
roles:
- detect
hwaccel_args: -c:v h264_cuvid
detect:
width: 1920
height: 1080
```
### Relevant log output
```shell
docker-compose up
Pulling frigate (ghcr.io/blakeblackshear/frigate:0.12.0-beta4-tensorrt)...
0.12.0-beta4-tensorrt: Pulling from blakeblackshear/frigate
8740c948ffd4: Pull complete
b84a1ada9828: Pull complete
832b257640b7: Pull complete
60beb73d7e76: Pull complete
8e29f3174a08: Pull complete
4b46f368fd71: Pull complete
b651bc39c0a3: Pull complete
43b988f71200: Pull complete
Digest: sha256:7010839f6794a5d21a98bc9088400fdc3c382e089539f0409e0cec1ca64473fa
Status: Downloaded newer image for ghcr.io/blakeblackshear/frigate:0.12.0-beta4-tensorrt
Creating frigate ... done
Attaching to frigate
frigate | s6-rc: info: service s6rc-oneshot-runner: starting
frigate | s6-rc: info: service s6rc-oneshot-runner successfully started
frigate | s6-rc: info: service fix-attrs: starting
frigate | s6-rc: info: service fix-attrs successfully started
frigate | s6-rc: info: service legacy-cont-init: starting
frigate | cont-init: info: running /etc/cont-init.d/prepare-logs.sh
frigate | cont-init: info: /etc/cont-init.d/prepare-logs.sh exited 0
frigate | s6-rc: info: service legacy-cont-init successfully started
frigate | s6-rc: info: service legacy-services: starting
frigate | services-up: info: copying legacy longrun frigate (no readiness notification)
frigate | services-up: info: copying legacy longrun go2rtc (no readiness notification)
frigate | services-up: info: copying legacy longrun nginx (no readiness notification)
frigate | s6-rc: info: service legacy-services successfully started
frigate | Traceback (most recent call last):
frigate | File "/usr/local/go2rtc/create_config.py", line 23, in <module>
frigate | go2rtc_config: dict[str, any] = config["go2rtc"]
frigate | KeyError: 'go2rtc'
frigate | s6-rc: info: service legacy-services: stopping
frigate | s6-svwait: fatal: supervisor died
frigate | s6-rc: info: service legacy-services successfully stopped
frigate | s6-rc: info: service legacy-cont-init: stopping
frigate | s6-rc: info: service legacy-cont-init successfully stopped
frigate | s6-rc: info: service fix-attrs: stopping
frigate | s6-rc: info: service fix-attrs successfully stopped
frigate | s6-rc: info: service s6rc-oneshot-runner: stopping
frigate | s6-rc: info: service s6rc-oneshot-runner successfully stopped
```
### Frigate stats
```json
N/A Frigate doesn't start
```
### Operating system
Other Linux
### Install method
Docker Compose
### Coral version
CPU (no coral)
### Any other information that may be helpful
Using a basic restream config:
``` yaml
mqtt:
enabled: false
#detectors:
#tensorrt:
# type: tensorrt
# device: 0 #This is the default, select the first GPU
go2rtc:
streams:
test_cam: ffmpeg:rtsp://10.0.20.102:554/s0
cameras:
camera_1:
ffmpeg:
inputs:
- path: rtsp://127.0.0.1:8554/test_cam?video=copy
input_args: preset-rtsp-restream
roles:
- detect
hwaccel_args: -c:v h264_cuvid
detect:
width: 1920
height: 1080
```
results in the same error
</issue>
<code>
[start of docker/rootfs/usr/local/go2rtc/create_config.py]
1 """Creates a go2rtc config file."""
2
3 import json
4 import os
5 import yaml
6
7
8 config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
9
10 # Check if we can use .yaml instead of .yml
11 config_file_yaml = config_file.replace(".yml", ".yaml")
12 if os.path.isfile(config_file_yaml):
13 config_file = config_file_yaml
14
15 with open(config_file) as f:
16 raw_config = f.read()
17
18 if config_file.endswith((".yaml", ".yml")):
19 config = yaml.safe_load(raw_config)
20 elif config_file.endswith(".json"):
21 config = json.loads(raw_config)
22
23 go2rtc_config: dict[str, any] = config["go2rtc"]
24
25 if not go2rtc_config.get("log", {}).get("format"):
26 go2rtc_config["log"] = {"format": "text"}
27
28 if not go2rtc_config.get("webrtc", {}).get("candidates", []):
29 go2rtc_config["webrtc"] = {"candidates": ["stun:8555"]}
30
31 print(json.dumps(go2rtc_config))
[end of docker/rootfs/usr/local/go2rtc/create_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py
--- a/docker/rootfs/usr/local/go2rtc/create_config.py
+++ b/docker/rootfs/usr/local/go2rtc/create_config.py
@@ -5,6 +5,7 @@
import yaml
+BTBN_PATH = "/usr/lib/btbn-ffmpeg"
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
@@ -20,12 +21,27 @@
elif config_file.endswith(".json"):
config = json.loads(raw_config)
-go2rtc_config: dict[str, any] = config["go2rtc"]
+go2rtc_config: dict[str, any] = config.get("go2rtc", {})
-if not go2rtc_config.get("log", {}).get("format"):
+# we want to ensure that logs are easy to read
+if go2rtc_config.get("log") is None:
go2rtc_config["log"] = {"format": "text"}
+elif go2rtc_config["log"].get("format") is None:
+ go2rtc_config["log"]["format"] = "text"
+# should set default stun server so webrtc can work
if not go2rtc_config.get("webrtc", {}).get("candidates", []):
go2rtc_config["webrtc"] = {"candidates": ["stun:8555"]}
-print(json.dumps(go2rtc_config))
\ No newline at end of file
+# need to replace ffmpeg command when using ffmpeg4
+if not os.path.exists(BTBN_PATH):
+ if go2rtc_config.get("ffmpeg") is None:
+ go2rtc_config["ffmpeg"] = {
+ "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
+ }
+ elif go2rtc_config["ffmpeg"].get("rtsp") is None:
+ go2rtc_config["ffmpeg"][
+ "rtsp"
+ ] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
+
+print(json.dumps(go2rtc_config))
|
{"golden_diff": "diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py\n--- a/docker/rootfs/usr/local/go2rtc/create_config.py\n+++ b/docker/rootfs/usr/local/go2rtc/create_config.py\n@@ -5,6 +5,7 @@\n import yaml\n \n \n+BTBN_PATH = \"/usr/lib/btbn-ffmpeg\"\n config_file = os.environ.get(\"CONFIG_FILE\", \"/config/config.yml\")\n \n # Check if we can use .yaml instead of .yml\n@@ -20,12 +21,27 @@\n elif config_file.endswith(\".json\"):\n config = json.loads(raw_config)\n \n-go2rtc_config: dict[str, any] = config[\"go2rtc\"]\n+go2rtc_config: dict[str, any] = config.get(\"go2rtc\", {})\n \n-if not go2rtc_config.get(\"log\", {}).get(\"format\"):\n+# we want to ensure that logs are easy to read\n+if go2rtc_config.get(\"log\") is None:\n go2rtc_config[\"log\"] = {\"format\": \"text\"}\n+elif go2rtc_config[\"log\"].get(\"format\") is None:\n+ go2rtc_config[\"log\"][\"format\"] = \"text\"\n \n+# should set default stun server so webrtc can work\n if not go2rtc_config.get(\"webrtc\", {}).get(\"candidates\", []):\n go2rtc_config[\"webrtc\"] = {\"candidates\": [\"stun:8555\"]}\n \n-print(json.dumps(go2rtc_config))\n\\ No newline at end of file\n+# need to replace ffmpeg command when using ffmpeg4\n+if not os.path.exists(BTBN_PATH):\n+ if go2rtc_config.get(\"ffmpeg\") is None:\n+ go2rtc_config[\"ffmpeg\"] = {\n+ \"rtsp\": \"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}\"\n+ }\n+ elif go2rtc_config[\"ffmpeg\"].get(\"rtsp\") is None:\n+ go2rtc_config[\"ffmpeg\"][\n+ \"rtsp\"\n+ ] = \"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}\"\n+\n+print(json.dumps(go2rtc_config))\n", "issue": "[Config Support]: Docker fails to start with KeyError: 'go2rtc'\n### Describe the problem you are having\n\nStarting an instance of frigate with a minimal config gives error `KeyError: 'go2rtc'`\r\n\r\nConfig file was based on the startup walk though from DOCs https://deploy-preview-4055--frigate-docs.netlify.app/guides/getting_started\r\n\r\nNot sure if its a doc or build issue. Also tried putting in basic restream config and getting same error\n\n### Version\n\nv0.12.0-beta4\n\n### Frigate config file\n\n```yaml\nmqtt:\r\n enabled: false\r\n\r\ncameras:\r\n camera_1:\r\n ffmpeg:\r\n inputs:\r\n - path: rtsp://10.0.20.102:554/s0\r\n roles:\r\n - detect\r\n hwaccel_args: -c:v h264_cuvid\r\n detect:\r\n width: 1920\r\n height: 1080\n```\n\n\n### Relevant log output\n\n```shell\ndocker-compose up\r\nPulling frigate (ghcr.io/blakeblackshear/frigate:0.12.0-beta4-tensorrt)...\r\n0.12.0-beta4-tensorrt: Pulling from blakeblackshear/frigate\r\n8740c948ffd4: Pull complete\r\nb84a1ada9828: Pull complete\r\n832b257640b7: Pull complete\r\n60beb73d7e76: Pull complete\r\n8e29f3174a08: Pull complete\r\n4b46f368fd71: Pull complete\r\nb651bc39c0a3: Pull complete\r\n43b988f71200: Pull complete\r\nDigest: sha256:7010839f6794a5d21a98bc9088400fdc3c382e089539f0409e0cec1ca64473fa\r\nStatus: Downloaded newer image for ghcr.io/blakeblackshear/frigate:0.12.0-beta4-tensorrt\r\nCreating frigate ... done\r\nAttaching to frigate\r\nfrigate | s6-rc: info: service s6rc-oneshot-runner: starting\r\nfrigate | s6-rc: info: service s6rc-oneshot-runner successfully started\r\nfrigate | s6-rc: info: service fix-attrs: starting\r\nfrigate | s6-rc: info: service fix-attrs successfully started\r\nfrigate | s6-rc: info: service legacy-cont-init: starting\r\nfrigate | cont-init: info: running /etc/cont-init.d/prepare-logs.sh\r\nfrigate | cont-init: info: /etc/cont-init.d/prepare-logs.sh exited 0\r\nfrigate | s6-rc: info: service legacy-cont-init successfully started\r\nfrigate | s6-rc: info: service legacy-services: starting\r\nfrigate | services-up: info: copying legacy longrun frigate (no readiness notification)\r\nfrigate | services-up: info: copying legacy longrun go2rtc (no readiness notification)\r\nfrigate | services-up: info: copying legacy longrun nginx (no readiness notification)\r\nfrigate | s6-rc: info: service legacy-services successfully started\r\nfrigate | Traceback (most recent call last):\r\nfrigate | File \"/usr/local/go2rtc/create_config.py\", line 23, in <module>\r\nfrigate | go2rtc_config: dict[str, any] = config[\"go2rtc\"]\r\nfrigate | KeyError: 'go2rtc'\r\nfrigate | s6-rc: info: service legacy-services: stopping\r\nfrigate | s6-svwait: fatal: supervisor died\r\nfrigate | s6-rc: info: service legacy-services successfully stopped\r\nfrigate | s6-rc: info: service legacy-cont-init: stopping\r\nfrigate | s6-rc: info: service legacy-cont-init successfully stopped\r\nfrigate | s6-rc: info: service fix-attrs: stopping\r\nfrigate | s6-rc: info: service fix-attrs successfully stopped\r\nfrigate | s6-rc: info: service s6rc-oneshot-runner: stopping\r\nfrigate | s6-rc: info: service s6rc-oneshot-runner successfully stopped\n```\n\n\n### Frigate stats\n\n```json\nN/A Frigate doesn't start\n```\n\n\n### Operating system\n\nOther Linux\n\n### Install method\n\nDocker Compose\n\n### Coral version\n\nCPU (no coral)\n\n### Any other information that may be helpful\n\nUsing a basic restream config:\r\n``` yaml\r\nmqtt:\r\n enabled: false\r\n\r\n #detectors:\r\n #tensorrt:\r\n # type: tensorrt\r\n # device: 0 #This is the default, select the first GPU\r\n\r\ngo2rtc:\r\n streams:\r\n test_cam: ffmpeg:rtsp://10.0.20.102:554/s0\r\n\r\ncameras:\r\n camera_1:\r\n ffmpeg:\r\n inputs:\r\n - path: rtsp://127.0.0.1:8554/test_cam?video=copy\r\n input_args: preset-rtsp-restream\r\n roles:\r\n - detect\r\n hwaccel_args: -c:v h264_cuvid\r\n detect:\r\n width: 1920\r\n height: 1080\r\n```\r\nresults in the same error\n", "before_files": [{"content": "\"\"\"Creates a go2rtc config file.\"\"\"\n\nimport json\nimport os\nimport yaml\n\n\nconfig_file = os.environ.get(\"CONFIG_FILE\", \"/config/config.yml\")\n\n# Check if we can use .yaml instead of .yml\nconfig_file_yaml = config_file.replace(\".yml\", \".yaml\")\nif os.path.isfile(config_file_yaml):\n config_file = config_file_yaml\n\nwith open(config_file) as f:\n raw_config = f.read()\n\nif config_file.endswith((\".yaml\", \".yml\")):\n config = yaml.safe_load(raw_config)\nelif config_file.endswith(\".json\"):\n config = json.loads(raw_config)\n\ngo2rtc_config: dict[str, any] = config[\"go2rtc\"]\n\nif not go2rtc_config.get(\"log\", {}).get(\"format\"):\n go2rtc_config[\"log\"] = {\"format\": \"text\"}\n\nif not go2rtc_config.get(\"webrtc\", {}).get(\"candidates\", []):\n go2rtc_config[\"webrtc\"] = {\"candidates\": [\"stun:8555\"]}\n\nprint(json.dumps(go2rtc_config))", "path": "docker/rootfs/usr/local/go2rtc/create_config.py"}]}
| 2,079 | 518 |
gh_patches_debug_10359
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-3805
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
keyfinder: Output parsing error
### Problem
Running this command in verbose (`-vv`) mode:
``` sh
$ beet -vv keyfinder anything
```
Led to this problem:
```
user configuration: /home/diomekes/.config/beets/config.yaml
data directory: /home/diomekes/.config/beets
plugin paths:
Sending event: pluginload
inline: adding item field disc_and_track
library database: /home/diomekes/.config/beets/library.db
library directory: /home/diomekes/media/music
Sending event: library_opened
Traceback (most recent call last):
File "/usr/bin/beet", line 9, in <module>
load_entry_point('beets==1.3.19', 'console_scripts', 'beet')()
File "/usr/lib/python2.7/site-packages/beets/ui/__init__.py", line 1266, in main
_raw_main(args)
File "/usr/lib/python2.7/site-packages/beets/ui/__init__.py", line 1253, in _raw_main
subcommand.func(lib, suboptions, subargs)
File "/usr/lib/python2.7/site-packages/beetsplug/keyfinder.py", line 48, in command
self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())
File "/usr/lib/python2.7/site-packages/beetsplug/keyfinder.py", line 74, in find_key
key_raw = output.rsplit(None, 1)[-1]
IndexError: list index out of range
```
keyfinder-cli works if run directly
### Setup
- OS: archlinux
- Python version: 2.7.12
- beets version: 1.3.19
- Turning off plugins made problem go away (yes/no): problem is with keyfinder plugin only
- libkeyfinder-git 239.0a5ec7f-1
- keyfinder-cli-git 49.40a41ab-1
My configuration (output of `beet config`) is:
``` yaml
...
keyfinder:
bin: keyfinder-cli
auto: yes
overwrite: no
plugins: badfiles chroma convert duplicates fetchart fromfilename fuzzy info inline keyfinder lastgenre lyrics mbcollection mbsync missing play random scrub smartplaylist zero
...
```
</issue>
<code>
[start of beetsplug/keyfinder.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Thomas Scholtes.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Uses the `KeyFinder` program to add the `initial_key` field.
17 """
18
19 from __future__ import division, absolute_import, print_function
20
21 import os.path
22 import subprocess
23
24 from beets import ui
25 from beets import util
26 from beets.plugins import BeetsPlugin
27
28
29 class KeyFinderPlugin(BeetsPlugin):
30
31 def __init__(self):
32 super(KeyFinderPlugin, self).__init__()
33 self.config.add({
34 u'bin': u'KeyFinder',
35 u'auto': True,
36 u'overwrite': False,
37 })
38
39 if self.config['auto'].get(bool):
40 self.import_stages = [self.imported]
41
42 def commands(self):
43 cmd = ui.Subcommand('keyfinder',
44 help=u'detect and add initial key from audio')
45 cmd.func = self.command
46 return [cmd]
47
48 def command(self, lib, opts, args):
49 self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())
50
51 def imported(self, session, task):
52 self.find_key(task.imported_items())
53
54 def find_key(self, items, write=False):
55 overwrite = self.config['overwrite'].get(bool)
56 command = [self.config['bin'].as_str()]
57 # The KeyFinder GUI program needs the -f flag before the path.
58 # keyfinder-cli is similar, but just wants the path with no flag.
59 if 'keyfinder-cli' not in os.path.basename(command[0]).lower():
60 command.append('-f')
61
62 for item in items:
63 if item['initial_key'] and not overwrite:
64 continue
65
66 try:
67 output = util.command_output(command + [util.syspath(
68 item.path)]).stdout
69 except (subprocess.CalledProcessError, OSError) as exc:
70 self._log.error(u'execution failed: {0}', exc)
71 continue
72 except UnicodeEncodeError:
73 # Workaround for Python 2 Windows bug.
74 # https://bugs.python.org/issue1759845
75 self._log.error(u'execution failed for Unicode path: {0!r}',
76 item.path)
77 continue
78
79 key_raw = output.rsplit(None, 1)[-1]
80 try:
81 key = util.text_string(key_raw)
82 except UnicodeDecodeError:
83 self._log.error(u'output is invalid UTF-8')
84 continue
85
86 item['initial_key'] = key
87 self._log.info(u'added computed initial key {0} for {1}',
88 key, util.displayable_path(item.path))
89
90 if write:
91 item.try_write()
92 item.store()
93
[end of beetsplug/keyfinder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/beetsplug/keyfinder.py b/beetsplug/keyfinder.py
--- a/beetsplug/keyfinder.py
+++ b/beetsplug/keyfinder.py
@@ -76,7 +76,14 @@
item.path)
continue
- key_raw = output.rsplit(None, 1)[-1]
+ try:
+ key_raw = output.rsplit(None, 1)[-1]
+ except IndexError:
+ # Sometimes keyfinder-cli returns 0 but with no key, usually
+ # when the file is silent or corrupt, so we log and skip.
+ self._log.error(u'no key returned for path: {0}', item.path)
+ continue
+
try:
key = util.text_string(key_raw)
except UnicodeDecodeError:
|
{"golden_diff": "diff --git a/beetsplug/keyfinder.py b/beetsplug/keyfinder.py\n--- a/beetsplug/keyfinder.py\n+++ b/beetsplug/keyfinder.py\n@@ -76,7 +76,14 @@\n item.path)\n continue\n \n- key_raw = output.rsplit(None, 1)[-1]\n+ try:\n+ key_raw = output.rsplit(None, 1)[-1]\n+ except IndexError:\n+ # Sometimes keyfinder-cli returns 0 but with no key, usually\n+ # when the file is silent or corrupt, so we log and skip.\n+ self._log.error(u'no key returned for path: {0}', item.path)\n+ continue\n+\n try:\n key = util.text_string(key_raw)\n except UnicodeDecodeError:\n", "issue": "keyfinder: Output parsing error\n### Problem\n\nRunning this command in verbose (`-vv`) mode:\n\n``` sh\n$ beet -vv keyfinder anything\n```\n\nLed to this problem:\n\n```\nuser configuration: /home/diomekes/.config/beets/config.yaml\ndata directory: /home/diomekes/.config/beets\nplugin paths:\nSending event: pluginload\ninline: adding item field disc_and_track\nlibrary database: /home/diomekes/.config/beets/library.db\nlibrary directory: /home/diomekes/media/music\nSending event: library_opened\nTraceback (most recent call last):\n File \"/usr/bin/beet\", line 9, in <module>\n load_entry_point('beets==1.3.19', 'console_scripts', 'beet')()\n File \"/usr/lib/python2.7/site-packages/beets/ui/__init__.py\", line 1266, in main\n _raw_main(args)\n File \"/usr/lib/python2.7/site-packages/beets/ui/__init__.py\", line 1253, in _raw_main\n subcommand.func(lib, suboptions, subargs)\n File \"/usr/lib/python2.7/site-packages/beetsplug/keyfinder.py\", line 48, in command\n self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())\n File \"/usr/lib/python2.7/site-packages/beetsplug/keyfinder.py\", line 74, in find_key\n key_raw = output.rsplit(None, 1)[-1]\nIndexError: list index out of range\n```\n\nkeyfinder-cli works if run directly\n### Setup\n- OS: archlinux\n- Python version: 2.7.12\n- beets version: 1.3.19\n- Turning off plugins made problem go away (yes/no): problem is with keyfinder plugin only\n- libkeyfinder-git 239.0a5ec7f-1\n- keyfinder-cli-git 49.40a41ab-1\n\nMy configuration (output of `beet config`) is:\n\n``` yaml\n...\nkeyfinder:\n bin: keyfinder-cli\n auto: yes\n overwrite: no\n\nplugins: badfiles chroma convert duplicates fetchart fromfilename fuzzy info inline keyfinder lastgenre lyrics mbcollection mbsync missing play random scrub smartplaylist zero\n...\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Thomas Scholtes.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Uses the `KeyFinder` program to add the `initial_key` field.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os.path\nimport subprocess\n\nfrom beets import ui\nfrom beets import util\nfrom beets.plugins import BeetsPlugin\n\n\nclass KeyFinderPlugin(BeetsPlugin):\n\n def __init__(self):\n super(KeyFinderPlugin, self).__init__()\n self.config.add({\n u'bin': u'KeyFinder',\n u'auto': True,\n u'overwrite': False,\n })\n\n if self.config['auto'].get(bool):\n self.import_stages = [self.imported]\n\n def commands(self):\n cmd = ui.Subcommand('keyfinder',\n help=u'detect and add initial key from audio')\n cmd.func = self.command\n return [cmd]\n\n def command(self, lib, opts, args):\n self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())\n\n def imported(self, session, task):\n self.find_key(task.imported_items())\n\n def find_key(self, items, write=False):\n overwrite = self.config['overwrite'].get(bool)\n command = [self.config['bin'].as_str()]\n # The KeyFinder GUI program needs the -f flag before the path.\n # keyfinder-cli is similar, but just wants the path with no flag.\n if 'keyfinder-cli' not in os.path.basename(command[0]).lower():\n command.append('-f')\n\n for item in items:\n if item['initial_key'] and not overwrite:\n continue\n\n try:\n output = util.command_output(command + [util.syspath(\n item.path)]).stdout\n except (subprocess.CalledProcessError, OSError) as exc:\n self._log.error(u'execution failed: {0}', exc)\n continue\n except UnicodeEncodeError:\n # Workaround for Python 2 Windows bug.\n # https://bugs.python.org/issue1759845\n self._log.error(u'execution failed for Unicode path: {0!r}',\n item.path)\n continue\n\n key_raw = output.rsplit(None, 1)[-1]\n try:\n key = util.text_string(key_raw)\n except UnicodeDecodeError:\n self._log.error(u'output is invalid UTF-8')\n continue\n\n item['initial_key'] = key\n self._log.info(u'added computed initial key {0} for {1}',\n key, util.displayable_path(item.path))\n\n if write:\n item.try_write()\n item.store()\n", "path": "beetsplug/keyfinder.py"}]}
| 1,946 | 172 |
gh_patches_debug_54782
|
rasdani/github-patches
|
git_diff
|
encode__httpx-362
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Releasing 0.7.3
Hi @encode/httpx-maintainers!
It’s been 21 days since 0.7.2 was released, and we’ve got [a bunch of features](https://github.com/encode/httpx/compare/0.7.2...HEAD) ready for 0.7.3 already, eg:
- Digest auth
- SSLKEYLOGFILE
- Response.elapsed
- A host of bug fixes
So regardless of what gets merged until then I think it’s time to release the next version. :)
As suggested by @sethmlarson I-cant-remember-where I’d like to take on this release. I’ll probably take the opportunity to document the release process as well - #313. 👍
Probably will do tonight.
</issue>
<code>
[start of httpx/__version__.py]
1 __title__ = "httpx"
2 __description__ = "A next generation HTTP client, for Python 3."
3 __version__ = "0.7.2"
4
[end of httpx/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/httpx/__version__.py b/httpx/__version__.py
--- a/httpx/__version__.py
+++ b/httpx/__version__.py
@@ -1,3 +1,3 @@
__title__ = "httpx"
__description__ = "A next generation HTTP client, for Python 3."
-__version__ = "0.7.2"
+__version__ = "0.7.3"
|
{"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.7.2\"\n+__version__ = \"0.7.3\"\n", "issue": "Releasing 0.7.3\nHi @encode/httpx-maintainers!\r\n\r\nIt\u2019s been 21 days since 0.7.2 was released, and we\u2019ve got [a bunch of features](https://github.com/encode/httpx/compare/0.7.2...HEAD) ready for 0.7.3 already, eg:\r\n\r\n- Digest auth\r\n- SSLKEYLOGFILE\r\n- Response.elapsed\r\n- A host of bug fixes\r\n\r\nSo regardless of what gets merged until then I think it\u2019s time to release the next version. :)\r\n\r\nAs suggested by @sethmlarson I-cant-remember-where I\u2019d like to take on this release. I\u2019ll probably take the opportunity to document the release process as well - #313. \ud83d\udc4d\r\n\r\nProbably will do tonight.\r\n\r\n\n", "before_files": [{"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.7.2\"\n", "path": "httpx/__version__.py"}]}
| 749 | 94 |
gh_patches_debug_19838
|
rasdani/github-patches
|
git_diff
|
napari__napari-649
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
numpy.pad() issues on version 1.16
## 🐛 Bug
I had numpy version 1.16 and the `test_viewer.py` tests were failing because `np.pad()` was missing the `mode` positional argument (see below). I noticed that in [v1.17, `mode` is optional](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html) and when I upgraded numpy (1.17.3), the tests passed.
## To Reproduce
Steps to reproduce the behavior:
1. Install numpy version 1.16
2. run the tests in `/napari/tests/test_viewer.py`
3. Receive the following error
```python
self = <napari._vispy.vispy_shapes_layer.VispyShapesLayer object at 0x149450f28>
def _on_data_change(self):
faces = self.layer._data_view._mesh.displayed_triangles
colors = self.layer._data_view._mesh.displayed_triangles_colors
vertices = self.layer._data_view._mesh.vertices
# Note that the indicies of the vertices need to be resversed to
# go from numpy style to xyz
if vertices is not None:
vertices = vertices[:, ::-1] + 0.5
if len(vertices) == 0 or len(faces) == 0:
vertices = np.zeros((3, self.layer.dims.ndisplay))
faces = np.array([[0, 1, 2]])
colors = np.array([[0, 0, 0, 0]])
if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
> vertices = np.pad(vertices, ((0, 0), (0, 1)))
E TypeError: pad() missing 1 required positional argument: 'mode'
../_vispy/vispy_shapes_layer.py:47: TypeError
```
## Expected behavior
Currently, the requirements specifty numpy >= 1.10.0, so I think we should bump to 1.17 (or the oldest version that passes).
</issue>
<code>
[start of napari/_vispy/vispy_vectors_layer.py]
1 from vispy.scene.visuals import Mesh as MeshNode
2 from .vispy_base_layer import VispyBaseLayer
3 import numpy as np
4
5
6 class VispyVectorsLayer(VispyBaseLayer):
7 def __init__(self, layer):
8 node = MeshNode()
9 super().__init__(layer, node)
10
11 self.layer.events.edge_color.connect(lambda e: self._on_data_change())
12
13 self._reset_base()
14 self._on_data_change()
15
16 def _on_data_change(self):
17 if (
18 len(self.layer._view_vertices) == 0
19 or len(self.layer._view_faces) == 0
20 ):
21 vertices = np.zeros((3, self.layer.dims.ndisplay))
22 faces = np.array([[0, 1, 2]])
23 else:
24 vertices = self.layer._view_vertices[:, ::-1] + 0.5
25 faces = self.layer._view_faces
26
27 if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
28 vertices = np.pad(vertices, ((0, 0), (0, 1)))
29
30 self.node.set_data(
31 vertices=vertices, faces=faces, color=self.layer.edge_color
32 )
33 self.node.update()
34
[end of napari/_vispy/vispy_vectors_layer.py]
[start of napari/_vispy/vispy_shapes_layer.py]
1 from vispy.scene.visuals import Line, Mesh, Compound
2 from .markers import Markers
3 from .vispy_base_layer import VispyBaseLayer
4 import numpy as np
5
6
7 class VispyShapesLayer(VispyBaseLayer):
8 def __init__(self, layer):
9 # Create a compound visual with the following four subvisuals:
10 # Markers: corresponding to the vertices of the interaction box or the
11 # shapes that are used for highlights.
12 # Lines: The lines of the interaction box used for highlights.
13 # Mesh: The mesh of the outlines for each shape used for highlights.
14 # Mesh: The actual meshes of the shape faces and edges
15 node = Compound([Mesh(), Mesh(), Line(), Markers()])
16
17 super().__init__(layer, node)
18
19 self.layer.events.edge_width.connect(lambda e: self._on_data_change())
20 self.layer.events.edge_color.connect(lambda e: self._on_data_change())
21 self.layer.events.face_color.connect(lambda e: self._on_data_change())
22 self.layer.events.opacity.connect(lambda e: self._on_data_change())
23 self.layer.events.highlight.connect(
24 lambda e: self._on_highlight_change()
25 )
26
27 self._reset_base()
28 self._on_data_change()
29 self._on_highlight_change()
30
31 def _on_data_change(self):
32 faces = self.layer._data_view._mesh.displayed_triangles
33 colors = self.layer._data_view._mesh.displayed_triangles_colors
34 vertices = self.layer._data_view._mesh.vertices
35
36 # Note that the indicies of the vertices need to be resversed to
37 # go from numpy style to xyz
38 if vertices is not None:
39 vertices = vertices[:, ::-1] + 0.5
40
41 if len(vertices) == 0 or len(faces) == 0:
42 vertices = np.zeros((3, self.layer.dims.ndisplay))
43 faces = np.array([[0, 1, 2]])
44 colors = np.array([[0, 0, 0, 0]])
45
46 if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
47 vertices = np.pad(vertices, ((0, 0), (0, 1)))
48
49 self.node._subvisuals[0].set_data(
50 vertices=vertices, faces=faces, face_colors=colors
51 )
52 self.node.update()
53
54 def _on_highlight_change(self):
55 # Compute the vertices and faces of any shape outlines
56 vertices, faces = self.layer._outline_shapes()
57
58 if vertices is None or len(vertices) == 0 or len(faces) == 0:
59 vertices = np.zeros((3, self.layer.dims.ndisplay))
60 faces = np.array([[0, 1, 2]])
61 else:
62 vertices = vertices + 0.5
63
64 self.node._subvisuals[1].set_data(
65 vertices=vertices, faces=faces, color=self.layer._highlight_color
66 )
67
68 # Compute the location and properties of the vertices and box that
69 # need to get rendered
70 (
71 vertices,
72 face_color,
73 edge_color,
74 pos,
75 width,
76 ) = self.layer._compute_vertices_and_box()
77
78 if vertices is None or len(vertices) == 0:
79 vertices = np.zeros((1, self.layer.dims.ndisplay))
80 size = 0
81 else:
82 vertices = vertices + 0.5
83 size = self.layer._vertex_size
84
85 self.node._subvisuals[3].set_data(
86 vertices,
87 size=size,
88 face_color=face_color,
89 edge_color=edge_color,
90 edge_width=1.5,
91 symbol='square',
92 scaling=False,
93 )
94
95 if pos is None or len(pos) == 0:
96 pos = np.zeros((1, self.layer.dims.ndisplay))
97 width = 0
98 else:
99 pos = pos + 0.5
100
101 self.node._subvisuals[2].set_data(
102 pos=pos, color=edge_color, width=width
103 )
104
105 def _on_opacity_change(self):
106 pass
107
[end of napari/_vispy/vispy_shapes_layer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/napari/_vispy/vispy_shapes_layer.py b/napari/_vispy/vispy_shapes_layer.py
--- a/napari/_vispy/vispy_shapes_layer.py
+++ b/napari/_vispy/vispy_shapes_layer.py
@@ -44,7 +44,7 @@
colors = np.array([[0, 0, 0, 0]])
if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
- vertices = np.pad(vertices, ((0, 0), (0, 1)))
+ vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')
self.node._subvisuals[0].set_data(
vertices=vertices, faces=faces, face_colors=colors
diff --git a/napari/_vispy/vispy_vectors_layer.py b/napari/_vispy/vispy_vectors_layer.py
--- a/napari/_vispy/vispy_vectors_layer.py
+++ b/napari/_vispy/vispy_vectors_layer.py
@@ -25,7 +25,7 @@
faces = self.layer._view_faces
if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
- vertices = np.pad(vertices, ((0, 0), (0, 1)))
+ vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')
self.node.set_data(
vertices=vertices, faces=faces, color=self.layer.edge_color
|
{"golden_diff": "diff --git a/napari/_vispy/vispy_shapes_layer.py b/napari/_vispy/vispy_shapes_layer.py\n--- a/napari/_vispy/vispy_shapes_layer.py\n+++ b/napari/_vispy/vispy_shapes_layer.py\n@@ -44,7 +44,7 @@\n colors = np.array([[0, 0, 0, 0]])\n \n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n- vertices = np.pad(vertices, ((0, 0), (0, 1)))\n+ vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')\n \n self.node._subvisuals[0].set_data(\n vertices=vertices, faces=faces, face_colors=colors\ndiff --git a/napari/_vispy/vispy_vectors_layer.py b/napari/_vispy/vispy_vectors_layer.py\n--- a/napari/_vispy/vispy_vectors_layer.py\n+++ b/napari/_vispy/vispy_vectors_layer.py\n@@ -25,7 +25,7 @@\n faces = self.layer._view_faces\n \n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n- vertices = np.pad(vertices, ((0, 0), (0, 1)))\n+ vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')\n \n self.node.set_data(\n vertices=vertices, faces=faces, color=self.layer.edge_color\n", "issue": "numpy.pad() issues on version 1.16\n## \ud83d\udc1b Bug\r\n\r\nI had numpy version 1.16 and the `test_viewer.py` tests were failing because `np.pad()` was missing the `mode` positional argument (see below). I noticed that in [v1.17, `mode` is optional](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html) and when I upgraded numpy (1.17.3), the tests passed.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Install numpy version 1.16\r\n2. run the tests in `/napari/tests/test_viewer.py`\r\n3. Receive the following error\r\n\r\n```python\r\nself = <napari._vispy.vispy_shapes_layer.VispyShapesLayer object at 0x149450f28>\r\n\r\n def _on_data_change(self):\r\n faces = self.layer._data_view._mesh.displayed_triangles\r\n colors = self.layer._data_view._mesh.displayed_triangles_colors\r\n vertices = self.layer._data_view._mesh.vertices\r\n\r\n # Note that the indicies of the vertices need to be resversed to\r\n # go from numpy style to xyz\r\n if vertices is not None:\r\n vertices = vertices[:, ::-1] + 0.5\r\n\r\n if len(vertices) == 0 or len(faces) == 0:\r\n vertices = np.zeros((3, self.layer.dims.ndisplay))\r\n faces = np.array([[0, 1, 2]])\r\n colors = np.array([[0, 0, 0, 0]])\r\n\r\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\r\n> vertices = np.pad(vertices, ((0, 0), (0, 1)))\r\nE TypeError: pad() missing 1 required positional argument: 'mode'\r\n\r\n../_vispy/vispy_shapes_layer.py:47: TypeError\r\n```\r\n\r\n## Expected behavior\r\n\r\nCurrently, the requirements specifty numpy >= 1.10.0, so I think we should bump to 1.17 (or the oldest version that passes).\r\n\r\n\n", "before_files": [{"content": "from vispy.scene.visuals import Mesh as MeshNode\nfrom .vispy_base_layer import VispyBaseLayer\nimport numpy as np\n\n\nclass VispyVectorsLayer(VispyBaseLayer):\n def __init__(self, layer):\n node = MeshNode()\n super().__init__(layer, node)\n\n self.layer.events.edge_color.connect(lambda e: self._on_data_change())\n\n self._reset_base()\n self._on_data_change()\n\n def _on_data_change(self):\n if (\n len(self.layer._view_vertices) == 0\n or len(self.layer._view_faces) == 0\n ):\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n else:\n vertices = self.layer._view_vertices[:, ::-1] + 0.5\n faces = self.layer._view_faces\n\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n vertices = np.pad(vertices, ((0, 0), (0, 1)))\n\n self.node.set_data(\n vertices=vertices, faces=faces, color=self.layer.edge_color\n )\n self.node.update()\n", "path": "napari/_vispy/vispy_vectors_layer.py"}, {"content": "from vispy.scene.visuals import Line, Mesh, Compound\nfrom .markers import Markers\nfrom .vispy_base_layer import VispyBaseLayer\nimport numpy as np\n\n\nclass VispyShapesLayer(VispyBaseLayer):\n def __init__(self, layer):\n # Create a compound visual with the following four subvisuals:\n # Markers: corresponding to the vertices of the interaction box or the\n # shapes that are used for highlights.\n # Lines: The lines of the interaction box used for highlights.\n # Mesh: The mesh of the outlines for each shape used for highlights.\n # Mesh: The actual meshes of the shape faces and edges\n node = Compound([Mesh(), Mesh(), Line(), Markers()])\n\n super().__init__(layer, node)\n\n self.layer.events.edge_width.connect(lambda e: self._on_data_change())\n self.layer.events.edge_color.connect(lambda e: self._on_data_change())\n self.layer.events.face_color.connect(lambda e: self._on_data_change())\n self.layer.events.opacity.connect(lambda e: self._on_data_change())\n self.layer.events.highlight.connect(\n lambda e: self._on_highlight_change()\n )\n\n self._reset_base()\n self._on_data_change()\n self._on_highlight_change()\n\n def _on_data_change(self):\n faces = self.layer._data_view._mesh.displayed_triangles\n colors = self.layer._data_view._mesh.displayed_triangles_colors\n vertices = self.layer._data_view._mesh.vertices\n\n # Note that the indicies of the vertices need to be resversed to\n # go from numpy style to xyz\n if vertices is not None:\n vertices = vertices[:, ::-1] + 0.5\n\n if len(vertices) == 0 or len(faces) == 0:\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n colors = np.array([[0, 0, 0, 0]])\n\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n vertices = np.pad(vertices, ((0, 0), (0, 1)))\n\n self.node._subvisuals[0].set_data(\n vertices=vertices, faces=faces, face_colors=colors\n )\n self.node.update()\n\n def _on_highlight_change(self):\n # Compute the vertices and faces of any shape outlines\n vertices, faces = self.layer._outline_shapes()\n\n if vertices is None or len(vertices) == 0 or len(faces) == 0:\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n else:\n vertices = vertices + 0.5\n\n self.node._subvisuals[1].set_data(\n vertices=vertices, faces=faces, color=self.layer._highlight_color\n )\n\n # Compute the location and properties of the vertices and box that\n # need to get rendered\n (\n vertices,\n face_color,\n edge_color,\n pos,\n width,\n ) = self.layer._compute_vertices_and_box()\n\n if vertices is None or len(vertices) == 0:\n vertices = np.zeros((1, self.layer.dims.ndisplay))\n size = 0\n else:\n vertices = vertices + 0.5\n size = self.layer._vertex_size\n\n self.node._subvisuals[3].set_data(\n vertices,\n size=size,\n face_color=face_color,\n edge_color=edge_color,\n edge_width=1.5,\n symbol='square',\n scaling=False,\n )\n\n if pos is None or len(pos) == 0:\n pos = np.zeros((1, self.layer.dims.ndisplay))\n width = 0\n else:\n pos = pos + 0.5\n\n self.node._subvisuals[2].set_data(\n pos=pos, color=edge_color, width=width\n )\n\n def _on_opacity_change(self):\n pass\n", "path": "napari/_vispy/vispy_shapes_layer.py"}]}
| 2,466 | 351 |
gh_patches_debug_22190
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-11421
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build: expose `ubuntu-24.04` as an option for `build.os`
We are close to Ubuntu 24.04 release. We should expose it to our users.
</issue>
<code>
[start of readthedocs/builds/constants_docker.py]
1 """
2 Define constants here to allow import them without any external dependency.
3
4 There are situations where we want to have access to these values without Django installed
5 (e.g. common/dockerfiles/tasks.py)
6
7 Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.
8 """
9
10 DOCKER_DEFAULT_IMAGE = "readthedocs/build"
11
12 # When adding a new tool/version to this setting, you should:
13 #
14 # - Add a mapping between the expected version in the config file, to the full
15 # version installed via asdf (found via ``asdf list all <tool>``).
16 # - Run the script ``./scripts/compile_version_upload.sh`` in
17 # development to compile and cache the new tool/version.
18 # - Update the CircleCI job on the ``readthedocs-docker-images`` repository with the new versions at
19 # https://github.com/rtfd/readthedocs-docker-images/blob/d2760526abdfe27001946614b749abf8011b7f90/.circleci/config.yml#L38-L44.
20 # - Update the latest aliases for OS and tools (below this setting).
21 # - Update readthedocs/rtd_tests/fixtures/spec/v2/schema.json.
22 # - Update the documentation in ``docs/user/config-file/v2.rst``.
23 RTD_DOCKER_BUILD_SETTINGS = {
24 # Mapping of build.os options to docker image.
25 "os": {
26 "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
27 "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
28 },
29 # Mapping of build.tools options to specific versions.
30 "tools": {
31 "python": {
32 "2.7": "2.7.18",
33 "3.6": "3.6.15",
34 "3.7": "3.7.17",
35 "3.8": "3.8.19",
36 "3.9": "3.9.19",
37 "3.10": "3.10.14",
38 "3.11": "3.11.9",
39 "3.12": "3.12.3",
40 "miniconda3-4.7": "miniconda3-4.7.12",
41 "miniconda3-3.12-24.1": "miniconda3-3.12-24.1.2-0",
42 "mambaforge-4.10": "mambaforge-4.10.3-10",
43 "mambaforge-22.9": "mambaforge-22.9.0-3",
44 "mambaforge-23.11": "mambaforge-23.11.0-0",
45 },
46 "nodejs": {
47 "14": "14.20.1",
48 "16": "16.18.1",
49 "18": "18.16.1",
50 "19": "19.0.1",
51 "20": "20.14.0", # LTS
52 },
53 "ruby": {
54 "3.3": "3.3.2",
55 },
56 "rust": {
57 "1.55": "1.55.0",
58 "1.61": "1.61.0",
59 "1.64": "1.64.0",
60 "1.70": "1.70.0",
61 "1.75": "1.75.0",
62 "1.78": "1.78.0",
63 },
64 "golang": {
65 "1.17": "1.17.13",
66 "1.18": "1.18.10",
67 "1.19": "1.19.13",
68 "1.20": "1.20.14",
69 "1.21": "1.21.11",
70 "1.22": "1.22.4",
71 },
72 },
73 }
74
75 # Set latest aliases for OS and tools.
76 _OS = RTD_DOCKER_BUILD_SETTINGS["os"]
77 _TOOLS = RTD_DOCKER_BUILD_SETTINGS["tools"]
78 _OS["ubuntu-lts-latest"] = _OS["ubuntu-22.04"]
79 _TOOLS["python"]["3"] = _TOOLS["python"]["3.12"]
80 _TOOLS["python"]["latest"] = _TOOLS["python"]["3"]
81 _TOOLS["python"]["miniconda-latest"] = _TOOLS["python"]["miniconda3-3.12-24.1"]
82 _TOOLS["python"]["mambaforge-latest"] = _TOOLS["python"]["mambaforge-23.11"]
83 _TOOLS["nodejs"]["latest"] = _TOOLS["nodejs"]["20"]
84 _TOOLS["ruby"]["latest"] = _TOOLS["ruby"]["3.3"]
85 _TOOLS["rust"]["latest"] = _TOOLS["rust"]["1.78"]
86 _TOOLS["golang"]["latest"] = _TOOLS["golang"]["1.22"]
87
[end of readthedocs/builds/constants_docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -25,6 +25,7 @@
"os": {
"ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
"ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
+ "ubuntu-24.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-24.04",
},
# Mapping of build.tools options to specific versions.
"tools": {
@@ -75,7 +76,11 @@
# Set latest aliases for OS and tools.
_OS = RTD_DOCKER_BUILD_SETTINGS["os"]
_TOOLS = RTD_DOCKER_BUILD_SETTINGS["tools"]
+
+# TODO: point ``ubuntu-lts-latest`` to Ubuntu 24.04 LTS once we have tested it
+# in production after some weeks
_OS["ubuntu-lts-latest"] = _OS["ubuntu-22.04"]
+
_TOOLS["python"]["3"] = _TOOLS["python"]["3.12"]
_TOOLS["python"]["latest"] = _TOOLS["python"]["3"]
_TOOLS["python"]["miniconda-latest"] = _TOOLS["python"]["miniconda3-3.12-24.1"]
|
{"golden_diff": "diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py\n--- a/readthedocs/builds/constants_docker.py\n+++ b/readthedocs/builds/constants_docker.py\n@@ -25,6 +25,7 @@\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n+ \"ubuntu-24.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-24.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n@@ -75,7 +76,11 @@\n # Set latest aliases for OS and tools.\n _OS = RTD_DOCKER_BUILD_SETTINGS[\"os\"]\n _TOOLS = RTD_DOCKER_BUILD_SETTINGS[\"tools\"]\n+\n+# TODO: point ``ubuntu-lts-latest`` to Ubuntu 24.04 LTS once we have tested it\n+# in production after some weeks\n _OS[\"ubuntu-lts-latest\"] = _OS[\"ubuntu-22.04\"]\n+\n _TOOLS[\"python\"][\"3\"] = _TOOLS[\"python\"][\"3.12\"]\n _TOOLS[\"python\"][\"latest\"] = _TOOLS[\"python\"][\"3\"]\n _TOOLS[\"python\"][\"miniconda-latest\"] = _TOOLS[\"python\"][\"miniconda3-3.12-24.1\"]\n", "issue": "Build: expose `ubuntu-24.04` as an option for `build.os`\nWe are close to Ubuntu 24.04 release. We should expose it to our users.\n", "before_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# When adding a new tool/version to this setting, you should:\n#\n# - Add a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``).\n# - Run the script ``./scripts/compile_version_upload.sh`` in\n# development to compile and cache the new tool/version.\n# - Update the CircleCI job on the ``readthedocs-docker-images`` repository with the new versions at\n# https://github.com/rtfd/readthedocs-docker-images/blob/d2760526abdfe27001946614b749abf8011b7f90/.circleci/config.yml#L38-L44.\n# - Update the latest aliases for OS and tools (below this setting).\n# - Update readthedocs/rtd_tests/fixtures/spec/v2/schema.json.\n# - Update the documentation in ``docs/user/config-file/v2.rst``.\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.19\",\n \"3.9\": \"3.9.19\",\n \"3.10\": \"3.10.14\",\n \"3.11\": \"3.11.9\",\n \"3.12\": \"3.12.3\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"miniconda3-3.12-24.1\": \"miniconda3-3.12-24.1.2-0\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n \"mambaforge-23.11\": \"mambaforge-23.11.0-0\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\",\n \"19\": \"19.0.1\",\n \"20\": \"20.14.0\", # LTS\n },\n \"ruby\": {\n \"3.3\": \"3.3.2\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n \"1.75\": \"1.75.0\",\n \"1.78\": \"1.78.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.13\",\n \"1.20\": \"1.20.14\",\n \"1.21\": \"1.21.11\",\n \"1.22\": \"1.22.4\",\n },\n },\n}\n\n# Set latest aliases for OS and tools.\n_OS = RTD_DOCKER_BUILD_SETTINGS[\"os\"]\n_TOOLS = RTD_DOCKER_BUILD_SETTINGS[\"tools\"]\n_OS[\"ubuntu-lts-latest\"] = _OS[\"ubuntu-22.04\"]\n_TOOLS[\"python\"][\"3\"] = _TOOLS[\"python\"][\"3.12\"]\n_TOOLS[\"python\"][\"latest\"] = _TOOLS[\"python\"][\"3\"]\n_TOOLS[\"python\"][\"miniconda-latest\"] = _TOOLS[\"python\"][\"miniconda3-3.12-24.1\"]\n_TOOLS[\"python\"][\"mambaforge-latest\"] = _TOOLS[\"python\"][\"mambaforge-23.11\"]\n_TOOLS[\"nodejs\"][\"latest\"] = _TOOLS[\"nodejs\"][\"20\"]\n_TOOLS[\"ruby\"][\"latest\"] = _TOOLS[\"ruby\"][\"3.3\"]\n_TOOLS[\"rust\"][\"latest\"] = _TOOLS[\"rust\"][\"1.78\"]\n_TOOLS[\"golang\"][\"latest\"] = _TOOLS[\"golang\"][\"1.22\"]\n", "path": "readthedocs/builds/constants_docker.py"}]}
| 1,919 | 325 |
gh_patches_debug_13461
|
rasdani/github-patches
|
git_diff
|
huggingface__optimum-217
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Support for DeBERTaV2
I would like to use DeBERTaV2 for sequence classification as a quantized model. Please let me know what needs to be done to open a PR to add this support!
</issue>
<code>
[start of optimum/onnxruntime/utils.py]
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from pathlib import Path
15 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
16
17 import torch
18 from transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
19 from transformers.utils import logging
20
21 import onnx
22 import onnxruntime as ort
23
24 from ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss
25
26
27 logger = logging.get_logger(__name__)
28
29 ONNX_WEIGHTS_NAME = "model.onnx"
30 OPTIMIZED_ONNX_WEIGHTS_NAME = "optimized_model.onnx"
31 QUANTIZED_ONNX_WEIGHTS_NAME = "q8_model.onnx"
32
33
34 def _is_gpu_available():
35 """
36 checks if a gpu is available.
37 """
38 available_providers = ort.get_available_providers()
39 if "CUDAExecutionProvider" in available_providers and torch.cuda.is_available():
40 return True
41 else:
42 return False
43
44
45 class ORTConfigManager:
46 """
47 A class that contains all the information needed by ONNX Runtime optimization for a given model type.
48
49 Attributes:
50 _conf (`Dict[str, tuple]`):
51 A dictionary mapping each supported model type to a tuple containing the number of attention heads
52 and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.
53 """
54
55 _conf = {
56 "bert": ("num_attention_heads", "hidden_size", "bert"),
57 "albert": ("num_attention_heads", "hidden_size", "bert"),
58 "camembert": ("num_attention_heads", "hidden_size", "bert"),
59 "distilbert": ("n_heads", "dim", "bert"),
60 "electra": ("num_attention_heads", "hidden_size", "bert"),
61 "roberta": ("num_attention_heads", "hidden_size", "bert"),
62 "bart": ("encoder_attention_heads", "d_model", "bart"),
63 "gpt2": ("n_head", "n_embd", "gpt2"),
64 "gpt_neo": ("num_heads", "hidden_size", "gpt2"),
65 }
66
67 @classmethod
68 def get_num_heads_name(cls, model_type: str) -> str:
69 num_heads = "num_attention_heads"
70 try:
71 num_heads = cls._conf[model_type][0]
72 except KeyError:
73 logger.warning(
74 f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to "
75 f"access the number of heads defined in the config is set to `{num_heads}`."
76 )
77 return num_heads
78
79 @classmethod
80 def get_hidden_size_name(cls, model_type: str) -> str:
81 hidden_size = "hidden_size"
82 try:
83 hidden_size = cls._conf[model_type][1]
84 except KeyError:
85 logger.warning(
86 f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to "
87 f"access the hidden size defined in the config is set to `{hidden_size}`."
88 )
89 return hidden_size
90
91 @classmethod
92 def get_model_ort_type(cls, model_type: str) -> str:
93 try:
94 model_type = cls._conf[model_type][2]
95 except KeyError:
96 logger.warning(f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.")
97 return model_type
98
99 @classmethod
100 def check_supported_model_or_raise(cls, model_type: str) -> bool:
101 if model_type not in cls._conf:
102 raise KeyError(
103 f"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. "
104 f"If you want to support {model_type} please propose a PR or open up an issue."
105 )
106
107
108 def generate_identified_filename(filename, identifier):
109 return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
110
111
112 def fix_atenops_to_gather(model_path):
113 # Fix broken ATenOp nodes back to Gather nodes.
114 model = onnx.load(model_path)
115 onnx.checker.check_model(model)
116
117 nodes = model.graph.node
118
119 for node in nodes:
120 if node.op_type in ["ATenOp", "ATen"]:
121 logger.info(f"----Start fixing node: {node.name}----")
122 op_num = node.name.split("_")[-1]
123 new_node = onnx.helper.make_node(
124 "Gather",
125 name="Gather_" + op_num,
126 inputs=[node.input[0], node.input[1]],
127 outputs=node.output,
128 )
129
130 model.graph.node.remove(node)
131 model.graph.node.insert(int(op_num), new_node)
132
133 onnx.checker.check_model(model)
134 onnx.save(model, model_path)
135
136
137 def wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:
138 if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):
139 return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)
140 elif isinstance(onnx_config, OnnxConfigWithPast):
141 return OnnxConfigWithPastAndLoss(onnx_config)
142 else:
143 return OnnxConfigWithLoss(onnx_config)
144
[end of optimum/onnxruntime/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optimum/onnxruntime/utils.py b/optimum/onnxruntime/utils.py
--- a/optimum/onnxruntime/utils.py
+++ b/optimum/onnxruntime/utils.py
@@ -57,6 +57,8 @@
"albert": ("num_attention_heads", "hidden_size", "bert"),
"camembert": ("num_attention_heads", "hidden_size", "bert"),
"distilbert": ("n_heads", "dim", "bert"),
+ "deberta": ("num_attention_heads", "hidden_size", "bert"),
+ "deberta-v2": ("num_attention_heads", "hidden_size", "bert"),
"electra": ("num_attention_heads", "hidden_size", "bert"),
"roberta": ("num_attention_heads", "hidden_size", "bert"),
"bart": ("encoder_attention_heads", "d_model", "bart"),
|
{"golden_diff": "diff --git a/optimum/onnxruntime/utils.py b/optimum/onnxruntime/utils.py\n--- a/optimum/onnxruntime/utils.py\n+++ b/optimum/onnxruntime/utils.py\n@@ -57,6 +57,8 @@\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n+ \"deberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n+ \"deberta-v2\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n", "issue": "Add Support for DeBERTaV2\nI would like to use DeBERTaV2 for sequence classification as a quantized model. Please let me know what needs to be done to open a PR to add this support!\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom transformers.utils import logging\n\nimport onnx\nimport onnxruntime as ort\n\nfrom ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss\n\n\nlogger = logging.get_logger(__name__)\n\nONNX_WEIGHTS_NAME = \"model.onnx\"\nOPTIMIZED_ONNX_WEIGHTS_NAME = \"optimized_model.onnx\"\nQUANTIZED_ONNX_WEIGHTS_NAME = \"q8_model.onnx\"\n\n\ndef _is_gpu_available():\n \"\"\"\n checks if a gpu is available.\n \"\"\"\n available_providers = ort.get_available_providers()\n if \"CUDAExecutionProvider\" in available_providers and torch.cuda.is_available():\n return True\n else:\n return False\n\n\nclass ORTConfigManager:\n \"\"\"\n A class that contains all the information needed by ONNX Runtime optimization for a given model type.\n\n Attributes:\n _conf (`Dict[str, tuple]`):\n A dictionary mapping each supported model type to a tuple containing the number of attention heads\n and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.\n \"\"\"\n\n _conf = {\n \"bert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n \"gpt2\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"gpt_neo\": (\"num_heads\", \"hidden_size\", \"gpt2\"),\n }\n\n @classmethod\n def get_num_heads_name(cls, model_type: str) -> str:\n num_heads = \"num_attention_heads\"\n try:\n num_heads = cls._conf[model_type][0]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the number of heads defined in the config is set to `{num_heads}`.\"\n )\n return num_heads\n\n @classmethod\n def get_hidden_size_name(cls, model_type: str) -> str:\n hidden_size = \"hidden_size\"\n try:\n hidden_size = cls._conf[model_type][1]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the hidden size defined in the config is set to `{hidden_size}`.\"\n )\n return hidden_size\n\n @classmethod\n def get_model_ort_type(cls, model_type: str) -> str:\n try:\n model_type = cls._conf[model_type][2]\n except KeyError:\n logger.warning(f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.\")\n return model_type\n\n @classmethod\n def check_supported_model_or_raise(cls, model_type: str) -> bool:\n if model_type not in cls._conf:\n raise KeyError(\n f\"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. \"\n f\"If you want to support {model_type} please propose a PR or open up an issue.\"\n )\n\n\ndef generate_identified_filename(filename, identifier):\n return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)\n\n\ndef fix_atenops_to_gather(model_path):\n # Fix broken ATenOp nodes back to Gather nodes.\n model = onnx.load(model_path)\n onnx.checker.check_model(model)\n\n nodes = model.graph.node\n\n for node in nodes:\n if node.op_type in [\"ATenOp\", \"ATen\"]:\n logger.info(f\"----Start fixing node: {node.name}----\")\n op_num = node.name.split(\"_\")[-1]\n new_node = onnx.helper.make_node(\n \"Gather\",\n name=\"Gather_\" + op_num,\n inputs=[node.input[0], node.input[1]],\n outputs=node.output,\n )\n\n model.graph.node.remove(node)\n model.graph.node.insert(int(op_num), new_node)\n\n onnx.checker.check_model(model)\n onnx.save(model, model_path)\n\n\ndef wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:\n if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):\n return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)\n elif isinstance(onnx_config, OnnxConfigWithPast):\n return OnnxConfigWithPastAndLoss(onnx_config)\n else:\n return OnnxConfigWithLoss(onnx_config)\n", "path": "optimum/onnxruntime/utils.py"}]}
| 2,197 | 194 |
gh_patches_debug_26700
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-2563
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SSIM update does not work for the last batch (if batch size is different)
## 🐛 Bug description

<!-- A clear and concise description of what the bug is. -->
<!-- Please, add steps on how to reproduce it. -->
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<!-- A clear and concise description of what you expected to happen. -->
## Environment
- PyTorch Version :1.10.1
- Ignite Version ():0.4.8
- OS : Ubuntu
- How you installed Ignite (`conda`, `pip`, source):
- Python version: 3.9
- Any other relevant information:
If the previous batch and the current batch are different then it throws the error. Attaching the screen shot of error.
Screenshot of the basic code is also attached.

</issue>
<code>
[start of ignite/metrics/ssim.py]
1 from typing import Callable, Sequence, Union
2
3 import torch
4 import torch.nn.functional as F
5
6 from ignite.exceptions import NotComputableError
7 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
8
9 __all__ = ["SSIM"]
10
11
12 class SSIM(Metric):
13 """
14 Computes Structual Similarity Index Measure
15
16 Args:
17 data_range: Range of the image. Typically, ``1.0`` or ``255``.
18 kernel_size: Size of the kernel. Default: (11, 11)
19 sigma: Standard deviation of the gaussian kernel.
20 Argument is used if ``gaussian=True``. Default: (1.5, 1.5)
21 k1: Parameter of SSIM. Default: 0.01
22 k2: Parameter of SSIM. Default: 0.03
23 gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel
24 output_transform: A callable that is used to transform the
25 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
26 form expected by the metric.
27 device: specifies which device updates are accumulated on. Setting the metric's
28 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
29 default, CPU.
30
31 Examples:
32 To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
33 The output of the engine's ``process_function`` needs to be in the format of
34 ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
35
36 ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need
37 to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.
38
39 For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
40
41 .. include:: defaults.rst
42 :start-after: :orphan:
43
44 .. testcode::
45
46 metric = SSIM(data_range=1.0)
47 metric.attach(default_evaluator, 'ssim')
48 preds = torch.rand([4, 3, 16, 16])
49 target = preds * 0.75
50 state = default_evaluator.run([[preds, target]])
51 print(state.metrics['ssim'])
52
53 .. testoutput::
54
55 0.9218971...
56
57 .. versionadded:: 0.4.2
58 """
59
60 def __init__(
61 self,
62 data_range: Union[int, float],
63 kernel_size: Union[int, Sequence[int]] = (11, 11),
64 sigma: Union[float, Sequence[float]] = (1.5, 1.5),
65 k1: float = 0.01,
66 k2: float = 0.03,
67 gaussian: bool = True,
68 output_transform: Callable = lambda x: x,
69 device: Union[str, torch.device] = torch.device("cpu"),
70 ):
71 if isinstance(kernel_size, int):
72 self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]
73 elif isinstance(kernel_size, Sequence):
74 self.kernel_size = kernel_size
75 else:
76 raise ValueError("Argument kernel_size should be either int or a sequence of int.")
77
78 if isinstance(sigma, float):
79 self.sigma = [sigma, sigma] # type: Sequence[float]
80 elif isinstance(sigma, Sequence):
81 self.sigma = sigma
82 else:
83 raise ValueError("Argument sigma should be either float or a sequence of float.")
84
85 if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):
86 raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.")
87
88 if any(y <= 0 for y in self.sigma):
89 raise ValueError(f"Expected sigma to have positive number. Got {sigma}.")
90
91 super(SSIM, self).__init__(output_transform=output_transform, device=device)
92 self.gaussian = gaussian
93 self.c1 = (k1 * data_range) ** 2
94 self.c2 = (k2 * data_range) ** 2
95 self.pad_h = (self.kernel_size[0] - 1) // 2
96 self.pad_w = (self.kernel_size[1] - 1) // 2
97 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
98
99 @reinit__is_reduced
100 def reset(self) -> None:
101 # Not a tensor because batch size is not known in advance.
102 self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]
103 self._num_examples = 0
104 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
105
106 def _uniform(self, kernel_size: int) -> torch.Tensor:
107 max, min = 2.5, -2.5
108 ksize_half = (kernel_size - 1) * 0.5
109 kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
110 for i, j in enumerate(kernel):
111 if min <= j <= max:
112 kernel[i] = 1 / (max - min)
113 else:
114 kernel[i] = 0
115
116 return kernel.unsqueeze(dim=0) # (1, kernel_size)
117
118 def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:
119 ksize_half = (kernel_size - 1) * 0.5
120 kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
121 gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))
122 return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)
123
124 def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:
125 if self.gaussian:
126 kernel_x = self._gaussian(kernel_size[0], sigma[0])
127 kernel_y = self._gaussian(kernel_size[1], sigma[1])
128 else:
129 kernel_x = self._uniform(kernel_size[0])
130 kernel_y = self._uniform(kernel_size[1])
131
132 return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)
133
134 @reinit__is_reduced
135 def update(self, output: Sequence[torch.Tensor]) -> None:
136 y_pred, y = output[0].detach(), output[1].detach()
137
138 if y_pred.dtype != y.dtype:
139 raise TypeError(
140 f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
141 )
142
143 if y_pred.shape != y.shape:
144 raise ValueError(
145 f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
146 )
147
148 if len(y_pred.shape) != 4 or len(y.shape) != 4:
149 raise ValueError(
150 f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
151 )
152
153 channel = y_pred.size(1)
154 if len(self._kernel.shape) < 4:
155 self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)
156
157 y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
158 y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
159
160 input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])
161 outputs = F.conv2d(input_list, self._kernel, groups=channel)
162
163 output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]
164
165 mu_pred_sq = output_list[0].pow(2)
166 mu_target_sq = output_list[1].pow(2)
167 mu_pred_target = output_list[0] * output_list[1]
168
169 sigma_pred_sq = output_list[2] - mu_pred_sq
170 sigma_target_sq = output_list[3] - mu_target_sq
171 sigma_pred_target = output_list[4] - mu_pred_target
172
173 a1 = 2 * mu_pred_target + self.c1
174 a2 = 2 * sigma_pred_target + self.c2
175 b1 = mu_pred_sq + mu_target_sq + self.c1
176 b2 = sigma_pred_sq + sigma_target_sq + self.c2
177
178 ssim_idx = (a1 * a2) / (b1 * b2)
179 self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)
180 self._num_examples += y.shape[0]
181
182 @sync_all_reduce("_sum_of_batchwise_ssim", "_num_examples")
183 def compute(self) -> torch.Tensor:
184 if self._num_examples == 0:
185 raise NotComputableError("SSIM must have at least one example before it can be computed.")
186 return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]
187
[end of ignite/metrics/ssim.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py
--- a/ignite/metrics/ssim.py
+++ b/ignite/metrics/ssim.py
@@ -98,8 +98,7 @@
@reinit__is_reduced
def reset(self) -> None:
- # Not a tensor because batch size is not known in advance.
- self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]
+ self._sum_of_ssim = torch.tensor(0.0, device=self._device)
self._num_examples = 0
self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
@@ -176,11 +175,12 @@
b2 = sigma_pred_sq + sigma_target_sq + self.c2
ssim_idx = (a1 * a2) / (b1 * b2)
- self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)
+ self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)
+
self._num_examples += y.shape[0]
- @sync_all_reduce("_sum_of_batchwise_ssim", "_num_examples")
+ @sync_all_reduce("_sum_of_ssim", "_num_examples")
def compute(self) -> torch.Tensor:
if self._num_examples == 0:
raise NotComputableError("SSIM must have at least one example before it can be computed.")
- return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]
+ return self._sum_of_ssim / self._num_examples
|
{"golden_diff": "diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py\n--- a/ignite/metrics/ssim.py\n+++ b/ignite/metrics/ssim.py\n@@ -98,8 +98,7 @@\n \n @reinit__is_reduced\n def reset(self) -> None:\n- # Not a tensor because batch size is not known in advance.\n- self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]\n+ self._sum_of_ssim = torch.tensor(0.0, device=self._device)\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n \n@@ -176,11 +175,12 @@\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n \n ssim_idx = (a1 * a2) / (b1 * b2)\n- self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)\n+ self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)\n+\n self._num_examples += y.shape[0]\n \n- @sync_all_reduce(\"_sum_of_batchwise_ssim\", \"_num_examples\")\n+ @sync_all_reduce(\"_sum_of_ssim\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n- return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]\n+ return self._sum_of_ssim / self._num_examples\n", "issue": "SSIM update does not work for the last batch (if batch size is different)\n## \ud83d\udc1b Bug description\r\n\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n<!-- Please, add steps on how to reproduce it. -->\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## Environment\r\n\r\n- PyTorch Version :1.10.1\r\n- Ignite Version ():0.4.8\r\n- OS : Ubuntu\r\n- How you installed Ignite (`conda`, `pip`, source): \r\n- Python version: 3.9\r\n- Any other relevant information:\r\n\r\nIf the previous batch and the current batch are different then it throws the error. Attaching the screen shot of error. \r\n\r\nScreenshot of the basic code is also attached.\r\n\r\n\n", "before_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"SSIM\"]\n\n\nclass SSIM(Metric):\n \"\"\"\n Computes Structual Similarity Index Measure\n\n Args:\n data_range: Range of the image. Typically, ``1.0`` or ``255``.\n kernel_size: Size of the kernel. Default: (11, 11)\n sigma: Standard deviation of the gaussian kernel.\n Argument is used if ``gaussian=True``. Default: (1.5, 1.5)\n k1: Parameter of SSIM. Default: 0.01\n k2: Parameter of SSIM. Default: 0.03\n gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel\n output_transform: A callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in the format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need\n to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n metric = SSIM(data_range=1.0)\n metric.attach(default_evaluator, 'ssim')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['ssim'])\n\n .. testoutput::\n\n 0.9218971...\n\n .. versionadded:: 0.4.2\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n kernel_size: Union[int, Sequence[int]] = (11, 11),\n sigma: Union[float, Sequence[float]] = (1.5, 1.5),\n k1: float = 0.01,\n k2: float = 0.03,\n gaussian: bool = True,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if isinstance(kernel_size, int):\n self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]\n elif isinstance(kernel_size, Sequence):\n self.kernel_size = kernel_size\n else:\n raise ValueError(\"Argument kernel_size should be either int or a sequence of int.\")\n\n if isinstance(sigma, float):\n self.sigma = [sigma, sigma] # type: Sequence[float]\n elif isinstance(sigma, Sequence):\n self.sigma = sigma\n else:\n raise ValueError(\"Argument sigma should be either float or a sequence of float.\")\n\n if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):\n raise ValueError(f\"Expected kernel_size to have odd positive number. Got {kernel_size}.\")\n\n if any(y <= 0 for y in self.sigma):\n raise ValueError(f\"Expected sigma to have positive number. Got {sigma}.\")\n\n super(SSIM, self).__init__(output_transform=output_transform, device=device)\n self.gaussian = gaussian\n self.c1 = (k1 * data_range) ** 2\n self.c2 = (k2 * data_range) ** 2\n self.pad_h = (self.kernel_size[0] - 1) // 2\n self.pad_w = (self.kernel_size[1] - 1) // 2\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n @reinit__is_reduced\n def reset(self) -> None:\n # Not a tensor because batch size is not known in advance.\n self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n def _uniform(self, kernel_size: int) -> torch.Tensor:\n max, min = 2.5, -2.5\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n for i, j in enumerate(kernel):\n if min <= j <= max:\n kernel[i] = 1 / (max - min)\n else:\n kernel[i] = 0\n\n return kernel.unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:\n if self.gaussian:\n kernel_x = self._gaussian(kernel_size[0], sigma[0])\n kernel_y = self._gaussian(kernel_size[1], sigma[1])\n else:\n kernel_x = self._uniform(kernel_size[0])\n kernel_y = self._uniform(kernel_size[1])\n\n return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n if len(y_pred.shape) != 4 or len(y.shape) != 4:\n raise ValueError(\n f\"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n channel = y_pred.size(1)\n if len(self._kernel.shape) < 4:\n self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)\n\n y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n\n input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])\n outputs = F.conv2d(input_list, self._kernel, groups=channel)\n\n output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\n\n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n mu_pred_target = output_list[0] * output_list[1]\n\n sigma_pred_sq = output_list[2] - mu_pred_sq\n sigma_target_sq = output_list[3] - mu_target_sq\n sigma_pred_target = output_list[4] - mu_pred_target\n\n a1 = 2 * mu_pred_target + self.c1\n a2 = 2 * sigma_pred_target + self.c2\n b1 = mu_pred_sq + mu_target_sq + self.c1\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n\n ssim_idx = (a1 * a2) / (b1 * b2)\n self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_batchwise_ssim\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]\n", "path": "ignite/metrics/ssim.py"}]}
| 3,477 | 418 |
gh_patches_debug_21461
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1902
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unhandled exception in wit.ai matcher (wrong wit.ai response parsing)
# Description
Opsdroid doesn't understand wit.ai time range response
## Steps to Reproduce
Please also include relevant information and steps to reproduce the bug/issue.
1. Connect with wit.ai app
2. Create new indent
3. Train indent to understand `wit/datetime`
4. Send message like `aws cost since december`
5. Opsdroid will fail to understand wit.ai api response
```
INFO opsdroid.parsers.witai.call_witai(): wit.ai response - {"_text": "aws cost since december", "entities": {"intent": [{"confidence": 0.99986626506986, "value": "aws_cost"}], "datetime": [{"confidence": 0.9995, "type": "interval", "from": {"grain": "month", "value": "2022-12-01T00:00:00.000-08:00"}, "values": [{"type": "interval", "from": {"grain": "month", "value": "2022-12-01T00:00:00.000-08:00"}}, {"type": "interval", "from": {"grain": "month", "value": "2023-12-01T00:00:00.000-08:00"}}, {"type": "interval", "from": {"grain": "month", "value": "2024-12-01T00:00:00.000-08:00"}}]}]}, "WARNING": "DEPRECATED", "msg_id": "0eanvH01TmiwU0Era"}.
ERROR aiohttp.server.log_exception(): Error handling request
Traceback (most recent call last):
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\aiohttp\web_protocol.py", line 435, in _handle_request
resp = await request_handler(request)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\aiohttp\web_app.py", line 504, in _handle
resp = await handler(request)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\opsdroid\connector\websocket\__init__.py", line 99, in websocket_handler
await self.opsdroid.parse(message)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\opsdroid\core.py", line 627, in parse
ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\opsdroid\core.py", line 516, in get_ranked_skills
ranked_skills += await parse_witai(self, skills, message, witai)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\opsdroid\parsers\witai.py", line 72, in parse_witai
key, entity[0]["value"], entity[0]["confidence"]
KeyError: 'value'
```
## Expected Functionality
There should be no exception
## Experienced Functionality
Thrown exception
## Versions
- **Opsdroid version:** 0.25.0
- **Python version:** 3.9.5
- **OS/Docker version:** Windows 10
## Configuration File
Please include your version of the configuration file below.
```yaml
# Your code goes here.
parsers:
- name: witai
access-token: [REDACTED]
min-score: 0.6
```
## Additional Details
Raw wit.ai response for `aws cost since december`
```json
{
"_text": "aws cost since december",
"entities": {
"intent": [
{
"confidence": 0.99965322126667,
"value": "aws_cost"
}
],
"datetime": [
{
"confidence": 0.9995,
"type": "interval",
"from": {
"grain": "month",
"value": "2022-12-01T00:00:00.000-08:00"
},
"values": [
{
"type": "interval",
"from": {
"grain": "month",
"value": "2022-12-01T00:00:00.000-08:00"
}
},
{
"type": "interval",
"from": {
"grain": "month",
"value": "2023-12-01T00:00:00.000-08:00"
}
},
{
"type": "interval",
"from": {
"grain": "month",
"value": "2024-12-01T00:00:00.000-08:00"
}
}
]
}
]
},
"WARNING": "DEPRECATED",
"msg_id": "051qg0BBGn4O7xZDj"
}
```
Here we can see that wit.ai sends `values` in `datetime[0]` dict.
Opsdroid expects it to be `value` (without **s**):
https://github.com/opsdroid/opsdroid/blob/c5dad210fe3d9068c75cd4fac9762fcc353335d3/opsdroid/parsers/witai.py#L69-L73
Which is fine for simple response without any matched intents (excluded by `if` in L70):
```json
{
"_text": "aws",
"entities": {
"intent": [
{
"confidence": 0.99692494474705,
"value": "aws_cost"
}
]
},
"WARNING": "DEPRECATED",
"msg_id": "0lbTZJcwDL5RoT2Wi"
}
```
Simple query `aws cost today`. If there is only one field in `values` wit.ai will rewrite it to short version to `values`
```json
{
"_text": "aws cost today",
"entities": {
"intent": [
{
"confidence": 0.99965536553564,
"value": "aws_cost"
}
],
"datetime": [
{
"confidence": 0.9995,
"type": "value",
"grain": "day",
"value": "2022-04-16T00:00:00.000-07:00",
"values": [
{
"type": "value",
"grain": "day",
"value": "2022-04-16T00:00:00.000-07:00"
}
]
}
]
},
"WARNING": "DEPRECATED",
"msg_id": "05vACT9WHhDUmAy9u"
}
```
</issue>
<code>
[start of opsdroid/parsers/witai.py]
1 """A helper function for parsing and executing wit.ai skills."""
2
3 import logging
4 import json
5
6 import aiohttp
7
8 from voluptuous import Required
9
10 from opsdroid.const import WITAI_DEFAULT_VERSION
11 from opsdroid.const import WITAI_API_ENDPOINT
12
13 _LOGGER = logging.getLogger(__name__)
14 CONFIG_SCHEMA = {Required("token"): str, "min-score": float}
15
16
17 async def call_witai(message, config):
18 """Call the wit.ai api and return the response."""
19 async with aiohttp.ClientSession(trust_env=True) as session:
20 headers = {"Authorization": "Bearer " + config["token"]}
21 payload = {"v": WITAI_DEFAULT_VERSION, "q": message.text}
22 resp = await session.get(
23 WITAI_API_ENDPOINT + "v={}&q={}".format(payload["v"], payload["q"]),
24 headers=headers,
25 )
26 result = await resp.json()
27 _LOGGER.info(_("wit.ai response - %s."), json.dumps(result))
28 return result
29
30
31 async def parse_witai(opsdroid, skills, message, config):
32 """Parse a message against all witai skills."""
33 matched_skills = []
34 if "token" in config:
35 try:
36 result = await call_witai(message, config)
37 except aiohttp.ClientOSError:
38 _LOGGER.error(_("No response from wit.ai, check your network."))
39 return matched_skills
40
41 if "code" in result:
42 _LOGGER.error(
43 _("wit.ai error - %s %s"), str(result["code"]), str(result["error"])
44 )
45 return matched_skills
46
47 if result["entities"] == {}:
48 _LOGGER.error(
49 _("wit.ai error - No intent found. Did you forget to create one?")
50 )
51 return matched_skills
52
53 try:
54 confidence = result["entities"]["intent"][0]["confidence"]
55 except KeyError:
56 confidence = 0.0
57 if "min-score" in config and confidence < config["min-score"]:
58 _LOGGER.info(_("wit.ai score lower than min-score."))
59 return matched_skills
60
61 if result:
62 for skill in skills:
63 for matcher in skill.matchers:
64 if "witai_intent" in matcher:
65 if matcher["witai_intent"] in [
66 i["value"] for i in result["entities"]["intent"]
67 ]:
68 message.witai = result
69 for key, entity in result["entities"].items():
70 if key != "intent":
71 message.update_entity(
72 key, entity[0]["value"], entity[0]["confidence"]
73 )
74 matched_skills.append(
75 {
76 "score": confidence,
77 "skill": skill,
78 "config": skill.config,
79 "message": message,
80 }
81 )
82 return matched_skills
83
[end of opsdroid/parsers/witai.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/parsers/witai.py b/opsdroid/parsers/witai.py
--- a/opsdroid/parsers/witai.py
+++ b/opsdroid/parsers/witai.py
@@ -68,8 +68,16 @@
message.witai = result
for key, entity in result["entities"].items():
if key != "intent":
+ witai_entity_value = ""
+ if "value" in entity[0]:
+ witai_entity_value = entity[0]["value"]
+ elif "values" in entity[0]:
+ # we never know which data are important for user,
+ # so we return list with all values
+ witai_entity_value = entity[0]["values"]
+
message.update_entity(
- key, entity[0]["value"], entity[0]["confidence"]
+ key, witai_entity_value, entity[0]["confidence"]
)
matched_skills.append(
{
|
{"golden_diff": "diff --git a/opsdroid/parsers/witai.py b/opsdroid/parsers/witai.py\n--- a/opsdroid/parsers/witai.py\n+++ b/opsdroid/parsers/witai.py\n@@ -68,8 +68,16 @@\n message.witai = result\n for key, entity in result[\"entities\"].items():\n if key != \"intent\":\n+ witai_entity_value = \"\"\n+ if \"value\" in entity[0]:\n+ witai_entity_value = entity[0][\"value\"]\n+ elif \"values\" in entity[0]:\n+ # we never know which data are important for user,\n+ # so we return list with all values\n+ witai_entity_value = entity[0][\"values\"]\n+\n message.update_entity(\n- key, entity[0][\"value\"], entity[0][\"confidence\"]\n+ key, witai_entity_value, entity[0][\"confidence\"]\n )\n matched_skills.append(\n {\n", "issue": "Unhandled exception in wit.ai matcher (wrong wit.ai response parsing)\n# Description\r\nOpsdroid doesn't understand wit.ai time range response\r\n\r\n\r\n## Steps to Reproduce\r\nPlease also include relevant information and steps to reproduce the bug/issue.\r\n1. Connect with wit.ai app\r\n2. Create new indent\r\n3. Train indent to understand `wit/datetime`\r\n4. Send message like `aws cost since december`\r\n5. Opsdroid will fail to understand wit.ai api response\r\n\r\n```\r\nINFO opsdroid.parsers.witai.call_witai(): wit.ai response - {\"_text\": \"aws cost since december\", \"entities\": {\"intent\": [{\"confidence\": 0.99986626506986, \"value\": \"aws_cost\"}], \"datetime\": [{\"confidence\": 0.9995, \"type\": \"interval\", \"from\": {\"grain\": \"month\", \"value\": \"2022-12-01T00:00:00.000-08:00\"}, \"values\": [{\"type\": \"interval\", \"from\": {\"grain\": \"month\", \"value\": \"2022-12-01T00:00:00.000-08:00\"}}, {\"type\": \"interval\", \"from\": {\"grain\": \"month\", \"value\": \"2023-12-01T00:00:00.000-08:00\"}}, {\"type\": \"interval\", \"from\": {\"grain\": \"month\", \"value\": \"2024-12-01T00:00:00.000-08:00\"}}]}]}, \"WARNING\": \"DEPRECATED\", \"msg_id\": \"0eanvH01TmiwU0Era\"}.\r\nERROR aiohttp.server.log_exception(): Error handling request\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\aiohttp\\web_protocol.py\", line 435, in _handle_request\r\n resp = await request_handler(request)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\aiohttp\\web_app.py\", line 504, in _handle\r\n resp = await handler(request)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\opsdroid\\connector\\websocket\\__init__.py\", line 99, in websocket_handler\r\n await self.opsdroid.parse(message)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\opsdroid\\core.py\", line 627, in parse\r\n ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\opsdroid\\core.py\", line 516, in get_ranked_skills\r\n ranked_skills += await parse_witai(self, skills, message, witai)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\opsdroid\\parsers\\witai.py\", line 72, in parse_witai\r\n key, entity[0][\"value\"], entity[0][\"confidence\"]\r\nKeyError: 'value'\r\n```\r\n\r\n## Expected Functionality\r\nThere should be no exception\r\n\r\n\r\n## Experienced Functionality\r\nThrown exception\r\n\r\n## Versions\r\n- **Opsdroid version:** 0.25.0\r\n- **Python version:** 3.9.5 \r\n- **OS/Docker version:** Windows 10\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\n\r\n```yaml\r\n# Your code goes here.\r\nparsers:\r\n - name: witai\r\n access-token: [REDACTED]\r\n min-score: 0.6\r\n```\r\n\r\n## Additional Details\r\n\r\nRaw wit.ai response for `aws cost since december`\r\n\r\n```json\r\n{\r\n \"_text\": \"aws cost since december\",\r\n \"entities\": {\r\n \"intent\": [\r\n {\r\n \"confidence\": 0.99965322126667,\r\n \"value\": \"aws_cost\"\r\n }\r\n ],\r\n \"datetime\": [\r\n {\r\n \"confidence\": 0.9995,\r\n \"type\": \"interval\",\r\n \"from\": {\r\n \"grain\": \"month\",\r\n \"value\": \"2022-12-01T00:00:00.000-08:00\"\r\n },\r\n \"values\": [\r\n {\r\n \"type\": \"interval\",\r\n \"from\": {\r\n \"grain\": \"month\",\r\n \"value\": \"2022-12-01T00:00:00.000-08:00\"\r\n }\r\n },\r\n {\r\n \"type\": \"interval\",\r\n \"from\": {\r\n \"grain\": \"month\",\r\n \"value\": \"2023-12-01T00:00:00.000-08:00\"\r\n }\r\n },\r\n {\r\n \"type\": \"interval\",\r\n \"from\": {\r\n \"grain\": \"month\",\r\n \"value\": \"2024-12-01T00:00:00.000-08:00\"\r\n }\r\n }\r\n ]\r\n }\r\n ]\r\n },\r\n \"WARNING\": \"DEPRECATED\",\r\n \"msg_id\": \"051qg0BBGn4O7xZDj\"\r\n}\r\n\r\n```\r\n\r\nHere we can see that wit.ai sends `values` in `datetime[0]` dict. \r\n\r\nOpsdroid expects it to be `value` (without **s**):\r\n\r\nhttps://github.com/opsdroid/opsdroid/blob/c5dad210fe3d9068c75cd4fac9762fcc353335d3/opsdroid/parsers/witai.py#L69-L73\r\n\r\nWhich is fine for simple response without any matched intents (excluded by `if` in L70):\r\n\r\n```json\r\n{\r\n \"_text\": \"aws\",\r\n \"entities\": {\r\n \"intent\": [\r\n {\r\n \"confidence\": 0.99692494474705,\r\n \"value\": \"aws_cost\"\r\n }\r\n ]\r\n },\r\n \"WARNING\": \"DEPRECATED\",\r\n \"msg_id\": \"0lbTZJcwDL5RoT2Wi\"\r\n}\r\n```\r\n\r\nSimple query `aws cost today`. If there is only one field in `values` wit.ai will rewrite it to short version to `values`\r\n\r\n```json\r\n{\r\n \"_text\": \"aws cost today\",\r\n \"entities\": {\r\n \"intent\": [\r\n {\r\n \"confidence\": 0.99965536553564,\r\n \"value\": \"aws_cost\"\r\n }\r\n ],\r\n \"datetime\": [\r\n {\r\n \"confidence\": 0.9995,\r\n \"type\": \"value\",\r\n \"grain\": \"day\",\r\n \"value\": \"2022-04-16T00:00:00.000-07:00\",\r\n \"values\": [\r\n {\r\n \"type\": \"value\",\r\n \"grain\": \"day\",\r\n \"value\": \"2022-04-16T00:00:00.000-07:00\"\r\n }\r\n ]\r\n }\r\n ]\r\n },\r\n \"WARNING\": \"DEPRECATED\",\r\n \"msg_id\": \"05vACT9WHhDUmAy9u\"\r\n}\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing wit.ai skills.\"\"\"\n\nimport logging\nimport json\n\nimport aiohttp\n\nfrom voluptuous import Required\n\nfrom opsdroid.const import WITAI_DEFAULT_VERSION\nfrom opsdroid.const import WITAI_API_ENDPOINT\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {Required(\"token\"): str, \"min-score\": float}\n\n\nasync def call_witai(message, config):\n \"\"\"Call the wit.ai api and return the response.\"\"\"\n async with aiohttp.ClientSession(trust_env=True) as session:\n headers = {\"Authorization\": \"Bearer \" + config[\"token\"]}\n payload = {\"v\": WITAI_DEFAULT_VERSION, \"q\": message.text}\n resp = await session.get(\n WITAI_API_ENDPOINT + \"v={}&q={}\".format(payload[\"v\"], payload[\"q\"]),\n headers=headers,\n )\n result = await resp.json()\n _LOGGER.info(_(\"wit.ai response - %s.\"), json.dumps(result))\n return result\n\n\nasync def parse_witai(opsdroid, skills, message, config):\n \"\"\"Parse a message against all witai skills.\"\"\"\n matched_skills = []\n if \"token\" in config:\n try:\n result = await call_witai(message, config)\n except aiohttp.ClientOSError:\n _LOGGER.error(_(\"No response from wit.ai, check your network.\"))\n return matched_skills\n\n if \"code\" in result:\n _LOGGER.error(\n _(\"wit.ai error - %s %s\"), str(result[\"code\"]), str(result[\"error\"])\n )\n return matched_skills\n\n if result[\"entities\"] == {}:\n _LOGGER.error(\n _(\"wit.ai error - No intent found. Did you forget to create one?\")\n )\n return matched_skills\n\n try:\n confidence = result[\"entities\"][\"intent\"][0][\"confidence\"]\n except KeyError:\n confidence = 0.0\n if \"min-score\" in config and confidence < config[\"min-score\"]:\n _LOGGER.info(_(\"wit.ai score lower than min-score.\"))\n return matched_skills\n\n if result:\n for skill in skills:\n for matcher in skill.matchers:\n if \"witai_intent\" in matcher:\n if matcher[\"witai_intent\"] in [\n i[\"value\"] for i in result[\"entities\"][\"intent\"]\n ]:\n message.witai = result\n for key, entity in result[\"entities\"].items():\n if key != \"intent\":\n message.update_entity(\n key, entity[0][\"value\"], entity[0][\"confidence\"]\n )\n matched_skills.append(\n {\n \"score\": confidence,\n \"skill\": skill,\n \"config\": skill.config,\n \"message\": message,\n }\n )\n return matched_skills\n", "path": "opsdroid/parsers/witai.py"}]}
| 3,014 | 218 |
gh_patches_debug_8374
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1810
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup.py uses https://cocotb.org which doesn't work (yet)
We have
https://github.com/cocotb/cocotb/blob/e74d508e30027c16778b95ef0985b6bcbc5207c2/setup.py#L90
which doesn't work (yet). Do we also want to use https://docs.cocotb.org temporarily like in GitHub?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 ###############################################################################
3 # Copyright (c) 2013 Potential Ventures Ltd
4 # Copyright (c) 2013 SolarFlare Communications Inc
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of Potential Ventures Ltd,
15 # SolarFlare Communications Inc nor the
16 # names of its contributors may be used to endorse or promote products
17 # derived from this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 ###############################################################################
30
31 import sys
32 if sys.version_info[:2] < (3, 5):
33 msg = [
34 "This version of cocotb requires at least Python 3.5,",
35 "you are running Python %d.%d.%d." % (
36 sys.version_info[0], sys.version_info[1], sys.version_info[2])
37 ]
38 if sys.version_info[0] == 2:
39 msg += [
40 "If you have Python 3 installed on your machine try ",
41 "using 'python3 -m pip' instead of 'pip' to install cocotb."
42 ]
43 msg += [
44 "For more information please refer to the documentation at ",
45 "https://cocotb.readthedocs.io."
46 ]
47
48 raise SystemExit("\n".join(msg))
49
50 import logging
51 from setuptools import setup
52 from setuptools import find_packages
53 from os import path, walk
54 from io import StringIO
55
56 # Note: cocotb is not installed properly yet and is missing dependencies and binaries
57 # We can still import other files next to setup.py, as long as they're in MANIFEST.in
58 from cocotb_build_libs import get_ext, build_ext
59
60
61 def read_file(fname):
62 with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:
63 return f.read()
64
65
66 def package_files(directory):
67 paths = []
68 for (fpath, directories, filenames) in walk(directory):
69 for filename in filenames:
70 paths.append(path.join('..', fpath, filename))
71 return paths
72
73
74 # this sets the __version__ variable
75 exec(read_file(path.join('cocotb', '_version.py')))
76
77 # store log from build_libs and display at the end in verbose mode
78 # see https://github.com/pypa/pip/issues/6634
79 log_stream = StringIO()
80 handler = logging.StreamHandler(log_stream)
81 log = logging.getLogger("cocotb._build_libs")
82 log.setLevel(logging.INFO)
83 log.addHandler(handler)
84
85 setup(
86 name='cocotb',
87 cmdclass={'build_ext': build_ext},
88 version=__version__, # noqa: F821
89 description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
90 url='https://cocotb.org',
91 license='BSD',
92 long_description=read_file('README.md'),
93 long_description_content_type='text/markdown',
94 author='Chris Higgs, Stuart Hodgson',
95 maintainer='cocotb contributors',
96 maintainer_email='[email protected]',
97 install_requires=[],
98 python_requires='>=3.5',
99 packages=find_packages(),
100 include_package_data=True,
101 package_data={'cocotb': package_files('cocotb/share')},
102 ext_modules=get_ext(),
103 entry_points={
104 'console_scripts': [
105 'cocotb-config=cocotb.config:main',
106 ]
107 },
108 platforms='any',
109 classifiers=[
110 "Programming Language :: Python :: 3",
111 "Programming Language :: Python :: 3.5",
112 "Programming Language :: Python :: 3.6",
113 "Programming Language :: Python :: 3.7",
114 "Programming Language :: Python :: 3.8",
115 "License :: OSI Approved :: BSD License",
116 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
117 ],
118
119 # these appear in the sidebar on PyPI
120 project_urls={
121 "Bug Tracker": "https://github.com/cocotb/cocotb/issues",
122 "Source Code": "https://github.com/cocotb/cocotb",
123 "Documentation": "https://docs.cocotb.org",
124 },
125 )
126
127 print(log_stream.getvalue())
128
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -87,7 +87,7 @@
cmdclass={'build_ext': build_ext},
version=__version__, # noqa: F821
description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
- url='https://cocotb.org',
+ url='https://docs.cocotb.org',
license='BSD',
long_description=read_file('README.md'),
long_description_content_type='text/markdown',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -87,7 +87,7 @@\n cmdclass={'build_ext': build_ext},\n version=__version__, # noqa: F821\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n- url='https://cocotb.org',\n+ url='https://docs.cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n", "issue": "setup.py uses https://cocotb.org which doesn't work (yet)\nWe have \r\nhttps://github.com/cocotb/cocotb/blob/e74d508e30027c16778b95ef0985b6bcbc5207c2/setup.py#L90\r\nwhich doesn't work (yet). Do we also want to use https://docs.cocotb.org temporarily like in GitHub?\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\nif sys.version_info[:2] < (3, 5):\n msg = [\n \"This version of cocotb requires at least Python 3.5,\",\n \"you are running Python %d.%d.%d.\" % (\n sys.version_info[0], sys.version_info[1], sys.version_info[2])\n ]\n if sys.version_info[0] == 2:\n msg += [\n \"If you have Python 3 installed on your machine try \",\n \"using 'python3 -m pip' instead of 'pip' to install cocotb.\"\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\"\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\nfrom io import StringIO\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\nfrom cocotb_build_libs import get_ext, build_ext\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\n\n# this sets the __version__ variable\nexec(read_file(path.join('cocotb', '_version.py')))\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n version=__version__, # noqa: F821\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n ext_modules=get_ext(),\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}]}
| 2,061 | 130 |
gh_patches_debug_35091
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2330
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
common_data should contain the current user data
## Description
`common_data` should contain the information of the current logged in user.
</issue>
<code>
[start of mathesar/views.py]
1 from django.conf import settings
2 from django.contrib.auth.decorators import login_required
3 from django.shortcuts import render, redirect, get_object_or_404
4 from rest_framework import status
5 from rest_framework.decorators import api_view
6 from rest_framework.response import Response
7
8 from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer
9 from mathesar.api.serializers.schemas import SchemaSerializer
10 from mathesar.api.serializers.tables import TableSerializer
11 from mathesar.api.serializers.queries import QuerySerializer
12 from mathesar.database.types import UIType
13 from mathesar.models.base import Database, Schema, Table
14 from mathesar.models.query import UIQuery
15 from mathesar.state import reset_reflection
16
17
18 def get_schema_list(request, database):
19 schema_serializer = SchemaSerializer(
20 Schema.objects.filter(database=database),
21 many=True,
22 context={'request': request}
23 )
24 return schema_serializer.data
25
26
27 def get_database_list(request):
28 database_serializer = DatabaseSerializer(
29 Database.objects.all(),
30 many=True,
31 context={'request': request}
32 )
33 return database_serializer.data
34
35
36 def get_table_list(request, schema):
37 if schema is None:
38 return []
39 table_serializer = TableSerializer(
40 Table.objects.filter(schema=schema),
41 many=True,
42 context={'request': request}
43 )
44 return table_serializer.data
45
46
47 def get_queries_list(request, schema):
48 if schema is None:
49 return []
50 query_serializer = QuerySerializer(
51 UIQuery.objects.filter(base_table__schema=schema),
52 many=True,
53 context={'request': request}
54 )
55 return query_serializer.data
56
57
58 def get_ui_type_list(request, database):
59 if database is None:
60 return []
61 type_serializer = TypeSerializer(
62 UIType,
63 many=True,
64 context={'request': request}
65 )
66 return type_serializer.data
67
68
69 def get_common_data(request, database, schema=None):
70 return {
71 'current_db': database.name if database else None,
72 'current_schema': schema.id if schema else None,
73 'schemas': get_schema_list(request, database),
74 'databases': get_database_list(request),
75 'tables': get_table_list(request, schema),
76 'queries': get_queries_list(request, schema),
77 'abstract_types': get_ui_type_list(request, database),
78 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),
79 }
80
81
82 def get_current_database(request, db_name):
83 """Get database from passed name, with fall back behavior."""
84 if db_name is not None:
85 current_database = get_object_or_404(Database, name=db_name)
86 else:
87 request_database_name = request.GET.get('database')
88 try:
89 if request_database_name is not None:
90 # Try to get the database named specified in the request
91 current_database = Database.objects.get(name=request_database_name)
92 else:
93 # Try to get the first database available
94 current_database = Database.objects.order_by('id').first()
95 except Database.DoesNotExist:
96 current_database = None
97 return current_database
98
99
100 def get_current_schema(request, schema_id, database):
101 # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.
102 if schema_id is not None:
103 return get_object_or_404(Schema, id=schema_id)
104 else:
105 try:
106 # Try to get the first schema in the DB
107 return Schema.objects.filter(database=database).order_by('id').first()
108 except Schema.DoesNotExist:
109 return None
110
111
112 def render_schema(request, database, schema):
113 # if there's no schema available, redirect to the schemas page.
114 if not schema:
115 return redirect('schemas', db_name=database.name)
116 else:
117 # We are redirecting so that the correct URL is passed to the frontend.
118 return redirect('schema_home', db_name=database.name, schema_id=schema.id)
119
120
121 @login_required
122 @api_view(['POST'])
123 def reflect_all(_):
124 reset_reflection()
125 return Response(status=status.HTTP_200_OK)
126
127
128 @login_required
129 def home(request):
130 database = get_current_database(request, None)
131 return redirect('schemas', db_name=database.name)
132
133
134 @login_required
135 def schema_home(request, db_name, schema_id, **kwargs):
136 database = get_current_database(request, db_name)
137 schema = get_current_schema(request, schema_id, database)
138 return render(request, 'mathesar/index.html', {
139 'common_data': get_common_data(request, database, schema)
140 })
141
142
143 @login_required
144 def schemas(request, db_name):
145 database = get_current_database(request, db_name)
146 return render(request, 'mathesar/index.html', {
147 'common_data': get_common_data(request, database, None)
148 })
149
[end of mathesar/views.py]
[start of mathesar/api/ui/viewsets/users.py]
1 from django.contrib.auth import get_user_model
2 from rest_access_policy import AccessViewSetMixin
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.exceptions import MethodNotAllowed
6 from rest_framework.generics import get_object_or_404
7 from rest_framework.response import Response
8
9 from mathesar.api.ui.permissions.database_role import DatabaseRoleAccessPolicy
10 from mathesar.api.ui.permissions.schema_role import SchemaRoleAccessPolicy
11 from mathesar.api.ui.serializers.users import (
12 ChangePasswordSerializer, PasswordResetSerializer, UserSerializer, DatabaseRoleSerializer,
13 SchemaRoleSerializer,
14 )
15 from mathesar.api.pagination import DefaultLimitOffsetPagination
16 from mathesar.api.ui.permissions.users import UserAccessPolicy
17 from mathesar.models.users import User, DatabaseRole, SchemaRole
18
19
20 class UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
21 queryset = User.objects.all().order_by('id')
22 serializer_class = UserSerializer
23 pagination_class = DefaultLimitOffsetPagination
24 access_policy = UserAccessPolicy
25
26 @action(methods=['post'], detail=True)
27 def password_reset(self, request, pk=None):
28 serializer = PasswordResetSerializer(data=request.data, context={'request': request})
29 serializer.is_valid(raise_exception=True)
30 user = get_object_or_404(get_user_model(), pk=pk)
31 password = serializer.validated_data["password"]
32 user.set_password(password)
33 # Make sure we redirect user to change password set by the admin on login
34 user.password_change_needed = True
35 user.save()
36 return Response(status=status.HTTP_200_OK)
37
38 @action(methods=['post'], detail=False)
39 def password_change(self, request):
40 serializer = ChangePasswordSerializer(
41 instance=request.user,
42 data=request.data,
43 context={'request': request}
44 )
45 serializer.is_valid(raise_exception=True)
46 serializer.save()
47 return Response(status=status.HTTP_200_OK)
48
49
50 class DatabaseRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
51 queryset = DatabaseRole.objects.all().order_by('id')
52 serializer_class = DatabaseRoleSerializer
53 pagination_class = DefaultLimitOffsetPagination
54 access_policy = DatabaseRoleAccessPolicy
55
56 def get_queryset(self):
57 return self.access_policy.scope_queryset(
58 self.request, super().get_queryset()
59 )
60
61 def update(self, request, pk=None):
62 raise MethodNotAllowed(request.method)
63
64 def partial_update(self, request, pk=None):
65 raise MethodNotAllowed(request.method)
66
67
68 class SchemaRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
69 queryset = SchemaRole.objects.all().order_by('id')
70 serializer_class = SchemaRoleSerializer
71 pagination_class = DefaultLimitOffsetPagination
72 access_policy = SchemaRoleAccessPolicy
73
74 def get_queryset(self):
75 return self.access_policy.scope_queryset(
76 self.request, super().get_queryset()
77 )
78
79 def update(self, request, pk=None):
80 raise MethodNotAllowed(request.method)
81
82 def partial_update(self, request, pk=None):
83 raise MethodNotAllowed(request.method)
84
[end of mathesar/api/ui/viewsets/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mathesar/api/ui/viewsets/users.py b/mathesar/api/ui/viewsets/users.py
--- a/mathesar/api/ui/viewsets/users.py
+++ b/mathesar/api/ui/viewsets/users.py
@@ -14,11 +14,11 @@
)
from mathesar.api.pagination import DefaultLimitOffsetPagination
from mathesar.api.ui.permissions.users import UserAccessPolicy
-from mathesar.models.users import User, DatabaseRole, SchemaRole
+from mathesar.models.users import DatabaseRole, SchemaRole
class UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
- queryset = User.objects.all().order_by('id')
+ queryset = get_user_model().objects.all().order_by('id')
serializer_class = UserSerializer
pagination_class = DefaultLimitOffsetPagination
access_policy = UserAccessPolicy
diff --git a/mathesar/views.py b/mathesar/views.py
--- a/mathesar/views.py
+++ b/mathesar/views.py
@@ -9,6 +9,7 @@
from mathesar.api.serializers.schemas import SchemaSerializer
from mathesar.api.serializers.tables import TableSerializer
from mathesar.api.serializers.queries import QuerySerializer
+from mathesar.api.ui.serializers.users import UserSerializer
from mathesar.database.types import UIType
from mathesar.models.base import Database, Schema, Table
from mathesar.models.query import UIQuery
@@ -66,6 +67,15 @@
return type_serializer.data
+def get_user_data(request):
+ user_serializer = UserSerializer(
+ request.user,
+ many=False,
+ context={'request': request}
+ )
+ return user_serializer.data
+
+
def get_common_data(request, database, schema=None):
return {
'current_db': database.name if database else None,
@@ -75,6 +85,7 @@
'tables': get_table_list(request, schema),
'queries': get_queries_list(request, schema),
'abstract_types': get_ui_type_list(request, database),
+ 'user': get_user_data(request),
'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),
}
|
{"golden_diff": "diff --git a/mathesar/api/ui/viewsets/users.py b/mathesar/api/ui/viewsets/users.py\n--- a/mathesar/api/ui/viewsets/users.py\n+++ b/mathesar/api/ui/viewsets/users.py\n@@ -14,11 +14,11 @@\n )\n from mathesar.api.pagination import DefaultLimitOffsetPagination\n from mathesar.api.ui.permissions.users import UserAccessPolicy\n-from mathesar.models.users import User, DatabaseRole, SchemaRole\n+from mathesar.models.users import DatabaseRole, SchemaRole\n \n \n class UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n- queryset = User.objects.all().order_by('id')\n+ queryset = get_user_model().objects.all().order_by('id')\n serializer_class = UserSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = UserAccessPolicy\ndiff --git a/mathesar/views.py b/mathesar/views.py\n--- a/mathesar/views.py\n+++ b/mathesar/views.py\n@@ -9,6 +9,7 @@\n from mathesar.api.serializers.schemas import SchemaSerializer\n from mathesar.api.serializers.tables import TableSerializer\n from mathesar.api.serializers.queries import QuerySerializer\n+from mathesar.api.ui.serializers.users import UserSerializer\n from mathesar.database.types import UIType\n from mathesar.models.base import Database, Schema, Table\n from mathesar.models.query import UIQuery\n@@ -66,6 +67,15 @@\n return type_serializer.data\n \n \n+def get_user_data(request):\n+ user_serializer = UserSerializer(\n+ request.user,\n+ many=False,\n+ context={'request': request}\n+ )\n+ return user_serializer.data\n+\n+\n def get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n@@ -75,6 +85,7 @@\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database),\n+ 'user': get_user_data(request),\n 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),\n }\n", "issue": "common_data should contain the current user data\n## Description\r\n`common_data` should contain the information of the current logged in user.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.models.query import UIQuery\nfrom mathesar.state import reset_reflection\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n query_serializer = QuerySerializer(\n UIQuery.objects.filter(base_table__schema=schema),\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database),\n 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),\n }\n\n\ndef get_current_database(request, db_name):\n \"\"\"Get database from passed name, with fall back behavior.\"\"\"\n if db_name is not None:\n current_database = get_object_or_404(Database, name=db_name)\n else:\n request_database_name = request.GET.get('database')\n try:\n if request_database_name is not None:\n # Try to get the database named specified in the request\n current_database = Database.objects.get(name=request_database_name)\n else:\n # Try to get the first database available\n current_database = Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n current_database = None\n return current_database\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\n@login_required\n@api_view(['POST'])\ndef reflect_all(_):\n reset_reflection()\n return Response(status=status.HTTP_200_OK)\n\n\n@login_required\ndef home(request):\n database = get_current_database(request, None)\n return redirect('schemas', db_name=database.name)\n\n\n@login_required\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\n@login_required\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n", "path": "mathesar/views.py"}, {"content": "from django.contrib.auth import get_user_model\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import MethodNotAllowed\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\n\nfrom mathesar.api.ui.permissions.database_role import DatabaseRoleAccessPolicy\nfrom mathesar.api.ui.permissions.schema_role import SchemaRoleAccessPolicy\nfrom mathesar.api.ui.serializers.users import (\n ChangePasswordSerializer, PasswordResetSerializer, UserSerializer, DatabaseRoleSerializer,\n SchemaRoleSerializer,\n)\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.ui.permissions.users import UserAccessPolicy\nfrom mathesar.models.users import User, DatabaseRole, SchemaRole\n\n\nclass UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = User.objects.all().order_by('id')\n serializer_class = UserSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = UserAccessPolicy\n\n @action(methods=['post'], detail=True)\n def password_reset(self, request, pk=None):\n serializer = PasswordResetSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n user = get_object_or_404(get_user_model(), pk=pk)\n password = serializer.validated_data[\"password\"]\n user.set_password(password)\n # Make sure we redirect user to change password set by the admin on login\n user.password_change_needed = True\n user.save()\n return Response(status=status.HTTP_200_OK)\n\n @action(methods=['post'], detail=False)\n def password_change(self, request):\n serializer = ChangePasswordSerializer(\n instance=request.user,\n data=request.data,\n context={'request': request}\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_200_OK)\n\n\nclass DatabaseRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = DatabaseRole.objects.all().order_by('id')\n serializer_class = DatabaseRoleSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = DatabaseRoleAccessPolicy\n\n def get_queryset(self):\n return self.access_policy.scope_queryset(\n self.request, super().get_queryset()\n )\n\n def update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n def partial_update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n\nclass SchemaRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = SchemaRole.objects.all().order_by('id')\n serializer_class = SchemaRoleSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = SchemaRoleAccessPolicy\n\n def get_queryset(self):\n return self.access_policy.scope_queryset(\n self.request, super().get_queryset()\n )\n\n def update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n def partial_update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n", "path": "mathesar/api/ui/viewsets/users.py"}]}
| 2,747 | 463 |
gh_patches_debug_28171
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5129
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Correct exception for quantity < max order in tickets schema
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
Validation check message for quantity to be more than max-order needs to be corrected.
**To Reproduce**
A validation check for quantity and max-order is:
```
if 'quantity' in data and 'max_order' in data:
if data['quantity'] < data['max_order']:
raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
"quantity should be lesser than max-order")
```
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
The message should be: "quantity should not be lesser than max-order".
</issue>
<code>
[start of app/api/schema/tickets.py]
1 from marshmallow import validates_schema
2 from marshmallow_jsonapi import fields
3 from marshmallow_jsonapi.flask import Relationship
4 from sqlalchemy.orm.exc import NoResultFound
5
6 from app.api.helpers.exceptions import UnprocessableEntity
7 from app.api.helpers.utilities import dasherize
8 from app.api.schema.base import SoftDeletionSchema
9 from app.models.discount_code import DiscountCode
10 from app.models.ticket import Ticket
11 from utils.common import use_defaults
12
13
14 @use_defaults()
15 class TicketSchemaPublic(SoftDeletionSchema):
16 class Meta:
17 type_ = 'ticket'
18 self_view = 'v1.ticket_detail'
19 self_view_kwargs = {'id': '<id>'}
20 inflect = dasherize
21
22 @validates_schema(pass_original=True)
23 def validate_date(self, data, original_data):
24 if 'id' in original_data['data']:
25 ticket = Ticket.query.filter_by(id=original_data['data']['id']).one()
26
27 if 'sales_starts_at' not in data:
28 data['sales_starts_at'] = ticket.sales_starts_at
29
30 if 'sales_ends_at' not in data:
31 data['sales_ends_at'] = ticket.sales_ends_at
32
33 if data['sales_starts_at'] >= data['sales_ends_at']:
34 raise UnprocessableEntity({'pointer': '/data/attributes/sales-ends-at'},
35 "sales-ends-at should be after sales-starts-at")
36
37 @validates_schema
38 def validate_quantity(self, data):
39 if 'max_order' in data and 'min_order' in data:
40 if data['max_order'] < data['min_order']:
41 raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},
42 "max-order should be greater than min-order")
43
44 if 'quantity' in data and 'min_order' in data:
45 if data['quantity'] < data['min_order']:
46 raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
47 "quantity should be greater than min-order")
48
49 if 'quantity' in data and 'max_order' in data:
50 if data['quantity'] < data['max_order']:
51 raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
52 "quantity should be lesser than max-order")
53
54 @validates_schema(pass_original=True)
55 def validate_discount_code(self, data, original_data):
56 if 'relationships' in original_data and 'discount-codes' in original_data['data']['relationships']:
57 discount_codes = original_data['data']['relationships']['discount-codes']
58 for code in discount_codes['data']:
59 try:
60 DiscountCode.query.filter_by(id=code['id']).one()
61 except NoResultFound:
62 raise UnprocessableEntity(
63 {'pointer': '/data/relationships/discount-codes'}, "Discount code does not exist")
64
65 id = fields.Str(dump_only=True)
66 name = fields.Str(required=True)
67 description = fields.Str(allow_none=True)
68 type = fields.Str(required=True)
69 price = fields.Float(validate=lambda n: n >= 0, allow_none=True)
70 quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
71 is_description_visible = fields.Boolean(default=False)
72 position = fields.Integer(allow_none=True)
73 is_fee_absorbed = fields.Boolean()
74 sales_starts_at = fields.DateTime(required=True)
75 sales_ends_at = fields.DateTime(required=True)
76 is_hidden = fields.Boolean(default=False)
77 min_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
78 max_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
79 is_checkin_restricted = fields.Boolean(default=True)
80 auto_checkin_enabled = fields.Boolean(default=False)
81 event = Relationship(attribute='event',
82 self_view='v1.ticket_event',
83 self_view_kwargs={'id': '<id>'},
84 related_view='v1.event_detail',
85 related_view_kwargs={'ticket_id': '<id>'},
86 schema='EventSchemaPublic',
87 type_='event')
88
89 ticket_tags = Relationship(attribute='tags',
90 self_view='v1.ticket_ticket_tag',
91 self_view_kwargs={'id': '<id>'},
92 related_view='v1.ticket_tag_list',
93 related_view_kwargs={'ticket_id': '<id>'},
94 schema='TicketTagSchema',
95 many=True,
96 type_='ticket-tag')
97
98 discount_codes = Relationship(
99 attribute='discount_codes',
100 self_view='v1.ticket_discount_codes',
101 self_view_kwargs={'id': '<id>'},
102 related_view='v1.discount_code_list',
103 related_view_kwargs={'ticket_id': '<id>'},
104 schema='DiscountCodeSchemaTicket',
105 many=True,
106 type_='discount-code')
107
108
109 class TicketSchema(TicketSchemaPublic):
110 class Meta:
111 type_ = 'ticket'
112 self_view = 'v1.ticket_detail'
113 self_view_kwargs = {'id': '<id>'}
114 inflect = dasherize
115
116 access_codes = Relationship(attribute='access_codes',
117 self_view='v1.ticket_access_code',
118 self_view_kwargs={'id': '<id>'},
119 related_view='v1.access_code_list',
120 related_view_kwargs={'ticket_id': '<id>'},
121 schema='AccessCodeSchema',
122 many=True,
123 type_='access-code')
124 attendees = Relationship(attribute='ticket_holders',
125 self_view='v1.ticket_attendees',
126 self_view_kwargs={'id': '<id>'},
127 related_view='v1.attendee_list_post',
128 related_view_kwargs={'ticket_id': '<id>'},
129 schema='AttendeeSchema',
130 many=True,
131 type_='attendee')
132
[end of app/api/schema/tickets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/schema/tickets.py b/app/api/schema/tickets.py
--- a/app/api/schema/tickets.py
+++ b/app/api/schema/tickets.py
@@ -39,17 +39,17 @@
if 'max_order' in data and 'min_order' in data:
if data['max_order'] < data['min_order']:
raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},
- "max-order should be greater than min-order")
+ "max-order should be greater than or equal to min-order")
if 'quantity' in data and 'min_order' in data:
if data['quantity'] < data['min_order']:
raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
- "quantity should be greater than min-order")
+ "quantity should be greater than or equal to min-order")
if 'quantity' in data and 'max_order' in data:
if data['quantity'] < data['max_order']:
raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
- "quantity should be lesser than max-order")
+ "quantity should be greater than or equal to max-order")
@validates_schema(pass_original=True)
def validate_discount_code(self, data, original_data):
|
{"golden_diff": "diff --git a/app/api/schema/tickets.py b/app/api/schema/tickets.py\n--- a/app/api/schema/tickets.py\n+++ b/app/api/schema/tickets.py\n@@ -39,17 +39,17 @@\n if 'max_order' in data and 'min_order' in data:\n if data['max_order'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},\n- \"max-order should be greater than min-order\")\n+ \"max-order should be greater than or equal to min-order\")\n \n if 'quantity' in data and 'min_order' in data:\n if data['quantity'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n- \"quantity should be greater than min-order\")\n+ \"quantity should be greater than or equal to min-order\")\n \n if 'quantity' in data and 'max_order' in data:\n if data['quantity'] < data['max_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n- \"quantity should be lesser than max-order\")\n+ \"quantity should be greater than or equal to max-order\")\n \n @validates_schema(pass_original=True)\n def validate_discount_code(self, data, original_data):\n", "issue": "Correct exception for quantity < max order in tickets schema\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nValidation check message for quantity to be more than max-order needs to be corrected.\r\n\r\n**To Reproduce**\r\nA validation check for quantity and max-order is:\r\n```\r\n if 'quantity' in data and 'max_order' in data:\r\n if data['quantity'] < data['max_order']:\r\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\r\n \"quantity should be lesser than max-order\")\r\n```\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe message should be: \"quantity should not be lesser than max-order\".\n", "before_files": [{"content": "from marshmallow import validates_schema\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.helpers.exceptions import UnprocessableEntity\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom app.models.discount_code import DiscountCode\nfrom app.models.ticket import Ticket\nfrom utils.common import use_defaults\n\n\n@use_defaults()\nclass TicketSchemaPublic(SoftDeletionSchema):\n class Meta:\n type_ = 'ticket'\n self_view = 'v1.ticket_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n ticket = Ticket.query.filter_by(id=original_data['data']['id']).one()\n\n if 'sales_starts_at' not in data:\n data['sales_starts_at'] = ticket.sales_starts_at\n\n if 'sales_ends_at' not in data:\n data['sales_ends_at'] = ticket.sales_ends_at\n\n if data['sales_starts_at'] >= data['sales_ends_at']:\n raise UnprocessableEntity({'pointer': '/data/attributes/sales-ends-at'},\n \"sales-ends-at should be after sales-starts-at\")\n\n @validates_schema\n def validate_quantity(self, data):\n if 'max_order' in data and 'min_order' in data:\n if data['max_order'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},\n \"max-order should be greater than min-order\")\n\n if 'quantity' in data and 'min_order' in data:\n if data['quantity'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n \"quantity should be greater than min-order\")\n\n if 'quantity' in data and 'max_order' in data:\n if data['quantity'] < data['max_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n \"quantity should be lesser than max-order\")\n\n @validates_schema(pass_original=True)\n def validate_discount_code(self, data, original_data):\n if 'relationships' in original_data and 'discount-codes' in original_data['data']['relationships']:\n discount_codes = original_data['data']['relationships']['discount-codes']\n for code in discount_codes['data']:\n try:\n DiscountCode.query.filter_by(id=code['id']).one()\n except NoResultFound:\n raise UnprocessableEntity(\n {'pointer': '/data/relationships/discount-codes'}, \"Discount code does not exist\")\n\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n description = fields.Str(allow_none=True)\n type = fields.Str(required=True)\n price = fields.Float(validate=lambda n: n >= 0, allow_none=True)\n quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n is_description_visible = fields.Boolean(default=False)\n position = fields.Integer(allow_none=True)\n is_fee_absorbed = fields.Boolean()\n sales_starts_at = fields.DateTime(required=True)\n sales_ends_at = fields.DateTime(required=True)\n is_hidden = fields.Boolean(default=False)\n min_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n max_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n is_checkin_restricted = fields.Boolean(default=True)\n auto_checkin_enabled = fields.Boolean(default=False)\n event = Relationship(attribute='event',\n self_view='v1.ticket_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='EventSchemaPublic',\n type_='event')\n\n ticket_tags = Relationship(attribute='tags',\n self_view='v1.ticket_ticket_tag',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.ticket_tag_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='TicketTagSchema',\n many=True,\n type_='ticket-tag')\n\n discount_codes = Relationship(\n attribute='discount_codes',\n self_view='v1.ticket_discount_codes',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.discount_code_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='DiscountCodeSchemaTicket',\n many=True,\n type_='discount-code')\n\n\nclass TicketSchema(TicketSchemaPublic):\n class Meta:\n type_ = 'ticket'\n self_view = 'v1.ticket_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n access_codes = Relationship(attribute='access_codes',\n self_view='v1.ticket_access_code',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.access_code_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='AccessCodeSchema',\n many=True,\n type_='access-code')\n attendees = Relationship(attribute='ticket_holders',\n self_view='v1.ticket_attendees',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.attendee_list_post',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='AttendeeSchema',\n many=True,\n type_='attendee')\n", "path": "app/api/schema/tickets.py"}]}
| 2,155 | 279 |
gh_patches_debug_55113
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-9819
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Circuit termination on interface not working when accesssed from interface table
### NetBox version
v3.3-beta1
### Python version
3.9
### Steps to Reproduce
1. Create device with interfaces
2. Create circuit with Z-side on same site as device
3. Try to connect interface to circuit termination from interface table
4. Select B Side Cicuit
5. Try to select Side for circuit
### Expected Behavior
On B Side you are able to select side for circuit
### Observed Behavior
Side option menu is empty because.
This is caused because $ is missing for `$termination_{cable_end}_circuit`
https://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141
Circuit termination on interface not working when accesssed from interface table
### NetBox version
v3.3-beta1
### Python version
3.9
### Steps to Reproduce
1. Create device with interfaces
2. Create circuit with Z-side on same site as device
3. Try to connect interface to circuit termination from interface table
4. Select B Side Cicuit
5. Try to select Side for circuit
### Expected Behavior
On B Side you are able to select side for circuit
### Observed Behavior
Side option menu is empty because.
This is caused because $ is missing for `$termination_{cable_end}_circuit`
https://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141
</issue>
<code>
[start of netbox/dcim/forms/connections.py]
1 from django import forms
2
3 from circuits.models import Circuit, CircuitTermination, Provider
4 from dcim.models import *
5 from utilities.forms import DynamicModelChoiceField, DynamicModelMultipleChoiceField
6 from .models import CableForm
7
8
9 def get_cable_form(a_type, b_type):
10
11 class FormMetaclass(forms.models.ModelFormMetaclass):
12
13 def __new__(mcs, name, bases, attrs):
14
15 for cable_end, term_cls in (('a', a_type), ('b', b_type)):
16
17 attrs[f'termination_{cable_end}_region'] = DynamicModelChoiceField(
18 queryset=Region.objects.all(),
19 label='Region',
20 required=False,
21 initial_params={
22 'sites': f'$termination_{cable_end}_site'
23 }
24 )
25 attrs[f'termination_{cable_end}_sitegroup'] = DynamicModelChoiceField(
26 queryset=SiteGroup.objects.all(),
27 label='Site group',
28 required=False,
29 initial_params={
30 'sites': f'$termination_{cable_end}_site'
31 }
32 )
33 attrs[f'termination_{cable_end}_site'] = DynamicModelChoiceField(
34 queryset=Site.objects.all(),
35 label='Site',
36 required=False,
37 query_params={
38 'region_id': f'$termination_{cable_end}_region',
39 'group_id': f'$termination_{cable_end}_sitegroup',
40 }
41 )
42 attrs[f'termination_{cable_end}_location'] = DynamicModelChoiceField(
43 queryset=Location.objects.all(),
44 label='Location',
45 required=False,
46 null_option='None',
47 query_params={
48 'site_id': f'$termination_{cable_end}_site'
49 }
50 )
51
52 # Device component
53 if hasattr(term_cls, 'device'):
54
55 attrs[f'termination_{cable_end}_rack'] = DynamicModelChoiceField(
56 queryset=Rack.objects.all(),
57 label='Rack',
58 required=False,
59 null_option='None',
60 initial_params={
61 'devices': f'$termination_{cable_end}_device'
62 },
63 query_params={
64 'site_id': f'$termination_{cable_end}_site',
65 'location_id': f'$termination_{cable_end}_location',
66 }
67 )
68 attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(
69 queryset=Device.objects.all(),
70 label='Device',
71 required=False,
72 initial_params={
73 f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'
74 },
75 query_params={
76 'site_id': f'$termination_{cable_end}_site',
77 'location_id': f'$termination_{cable_end}_location',
78 'rack_id': f'$termination_{cable_end}_rack',
79 }
80 )
81 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
82 queryset=term_cls.objects.all(),
83 label=term_cls._meta.verbose_name.title(),
84 disabled_indicator='_occupied',
85 query_params={
86 'device_id': f'$termination_{cable_end}_device',
87 }
88 )
89
90 # PowerFeed
91 elif term_cls == PowerFeed:
92
93 attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(
94 queryset=PowerPanel.objects.all(),
95 label='Power Panel',
96 required=False,
97 initial_params={
98 'powerfeeds__in': f'${cable_end}_terminations'
99 },
100 query_params={
101 'site_id': f'$termination_{cable_end}_site',
102 'location_id': f'$termination_{cable_end}_location',
103 }
104 )
105 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
106 queryset=term_cls.objects.all(),
107 label='Power Feed',
108 disabled_indicator='_occupied',
109 query_params={
110 'powerpanel_id': f'$termination_{cable_end}_powerpanel',
111 }
112 )
113
114 # CircuitTermination
115 elif term_cls == CircuitTermination:
116
117 attrs[f'termination_{cable_end}_provider'] = DynamicModelChoiceField(
118 queryset=Provider.objects.all(),
119 label='Provider',
120 initial_params={
121 'circuits': f'$termination_{cable_end}_circuit'
122 },
123 required=False
124 )
125 attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(
126 queryset=Circuit.objects.all(),
127 label='Circuit',
128 initial_params={
129 'terminations__in': f'${cable_end}_terminations'
130 },
131 query_params={
132 'provider_id': f'$termination_{cable_end}_provider',
133 'site_id': f'$termination_{cable_end}_site',
134 }
135 )
136 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
137 queryset=term_cls.objects.all(),
138 label='Side',
139 disabled_indicator='_occupied',
140 query_params={
141 'circuit_id': f'termination_{cable_end}_circuit',
142 }
143 )
144
145 return super().__new__(mcs, name, bases, attrs)
146
147 class _CableForm(CableForm, metaclass=FormMetaclass):
148
149 def __init__(self, *args, **kwargs):
150
151 # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()
152 for field_name in ('a_terminations', 'b_terminations'):
153 if field_name in kwargs.get('initial', {}) and type(kwargs['initial'][field_name]) is not list:
154 kwargs['initial'][field_name] = [kwargs['initial'][field_name]]
155
156 super().__init__(*args, **kwargs)
157
158 if self.instance and self.instance.pk:
159 # Initialize A/B terminations when modifying an existing Cable instance
160 self.initial['a_terminations'] = self.instance.a_terminations
161 self.initial['b_terminations'] = self.instance.b_terminations
162
163 def clean(self):
164 super().clean()
165
166 # Set the A/B terminations on the Cable instance
167 self.instance.a_terminations = self.cleaned_data['a_terminations']
168 self.instance.b_terminations = self.cleaned_data['b_terminations']
169
170 return _CableForm
171
[end of netbox/dcim/forms/connections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py
--- a/netbox/dcim/forms/connections.py
+++ b/netbox/dcim/forms/connections.py
@@ -138,7 +138,7 @@
label='Side',
disabled_indicator='_occupied',
query_params={
- 'circuit_id': f'termination_{cable_end}_circuit',
+ 'circuit_id': f'$termination_{cable_end}_circuit',
}
)
|
{"golden_diff": "diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py\n--- a/netbox/dcim/forms/connections.py\n+++ b/netbox/dcim/forms/connections.py\n@@ -138,7 +138,7 @@\n label='Side',\n disabled_indicator='_occupied',\n query_params={\n- 'circuit_id': f'termination_{cable_end}_circuit',\n+ 'circuit_id': f'$termination_{cable_end}_circuit',\n }\n )\n", "issue": "Circuit termination on interface not working when accesssed from interface table\n### NetBox version\n\nv3.3-beta1\n\n### Python version\n\n3.9\n\n### Steps to Reproduce\n\n1. Create device with interfaces\r\n2. Create circuit with Z-side on same site as device\r\n3. Try to connect interface to circuit termination from interface table\r\n4. Select B Side Cicuit\r\n5. Try to select Side for circuit\n\n### Expected Behavior\n\nOn B Side you are able to select side for circuit\n\n### Observed Behavior\n\nSide option menu is empty because.\r\n\r\nThis is caused because $ is missing for `$termination_{cable_end}_circuit`\r\n\r\nhttps://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141\nCircuit termination on interface not working when accesssed from interface table\n### NetBox version\n\nv3.3-beta1\n\n### Python version\n\n3.9\n\n### Steps to Reproduce\n\n1. Create device with interfaces\r\n2. Create circuit with Z-side on same site as device\r\n3. Try to connect interface to circuit termination from interface table\r\n4. Select B Side Cicuit\r\n5. Try to select Side for circuit\n\n### Expected Behavior\n\nOn B Side you are able to select side for circuit\n\n### Observed Behavior\n\nSide option menu is empty because.\r\n\r\nThis is caused because $ is missing for `$termination_{cable_end}_circuit`\r\n\r\nhttps://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141\n", "before_files": [{"content": "from django import forms\n\nfrom circuits.models import Circuit, CircuitTermination, Provider\nfrom dcim.models import *\nfrom utilities.forms import DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom .models import CableForm\n\n\ndef get_cable_form(a_type, b_type):\n\n class FormMetaclass(forms.models.ModelFormMetaclass):\n\n def __new__(mcs, name, bases, attrs):\n\n for cable_end, term_cls in (('a', a_type), ('b', b_type)):\n\n attrs[f'termination_{cable_end}_region'] = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n label='Region',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_sitegroup'] = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n label='Site group',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_site'] = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n label='Site',\n required=False,\n query_params={\n 'region_id': f'$termination_{cable_end}_region',\n 'group_id': f'$termination_{cable_end}_sitegroup',\n }\n )\n attrs[f'termination_{cable_end}_location'] = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n label='Location',\n required=False,\n null_option='None',\n query_params={\n 'site_id': f'$termination_{cable_end}_site'\n }\n )\n\n # Device component\n if hasattr(term_cls, 'device'):\n\n attrs[f'termination_{cable_end}_rack'] = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n label='Rack',\n required=False,\n null_option='None',\n initial_params={\n 'devices': f'$termination_{cable_end}_device'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n label='Device',\n required=False,\n initial_params={\n f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n 'rack_id': f'$termination_{cable_end}_rack',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=term_cls._meta.verbose_name.title(),\n disabled_indicator='_occupied',\n query_params={\n 'device_id': f'$termination_{cable_end}_device',\n }\n )\n\n # PowerFeed\n elif term_cls == PowerFeed:\n\n attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n label='Power Panel',\n required=False,\n initial_params={\n 'powerfeeds__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Power Feed',\n disabled_indicator='_occupied',\n query_params={\n 'powerpanel_id': f'$termination_{cable_end}_powerpanel',\n }\n )\n\n # CircuitTermination\n elif term_cls == CircuitTermination:\n\n attrs[f'termination_{cable_end}_provider'] = DynamicModelChoiceField(\n queryset=Provider.objects.all(),\n label='Provider',\n initial_params={\n 'circuits': f'$termination_{cable_end}_circuit'\n },\n required=False\n )\n attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(\n queryset=Circuit.objects.all(),\n label='Circuit',\n initial_params={\n 'terminations__in': f'${cable_end}_terminations'\n },\n query_params={\n 'provider_id': f'$termination_{cable_end}_provider',\n 'site_id': f'$termination_{cable_end}_site',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Side',\n disabled_indicator='_occupied',\n query_params={\n 'circuit_id': f'termination_{cable_end}_circuit',\n }\n )\n\n return super().__new__(mcs, name, bases, attrs)\n\n class _CableForm(CableForm, metaclass=FormMetaclass):\n\n def __init__(self, *args, **kwargs):\n\n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n if field_name in kwargs.get('initial', {}) and type(kwargs['initial'][field_name]) is not list:\n kwargs['initial'][field_name] = [kwargs['initial'][field_name]]\n\n super().__init__(*args, **kwargs)\n\n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n self.initial['a_terminations'] = self.instance.a_terminations\n self.initial['b_terminations'] = self.instance.b_terminations\n\n def clean(self):\n super().clean()\n\n # Set the A/B terminations on the Cable instance\n self.instance.a_terminations = self.cleaned_data['a_terminations']\n self.instance.b_terminations = self.cleaned_data['b_terminations']\n\n return _CableForm\n", "path": "netbox/dcim/forms/connections.py"}]}
| 2,671 | 114 |
gh_patches_debug_37978
|
rasdani/github-patches
|
git_diff
|
AnalogJ__lexicon-336
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Memset provider: TypeError: string indices must be integers
Hi,
When using the Memset provider with the default table formatting I get this error:
```bash
$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300
Traceback (most recent call last):
File "/usr/local/bin/lexicon", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 133, in main
handle_output(results, parsed_args.output)
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 109, in handle_output
table = generate_table_result(logger, results, output_type == 'TABLE-NO-HEADER')
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 75, in generate_table_result
array = [[row['id'], row['type'], row['name'], row['content'], row['ttl']] for row in output]
TypeError: string indices must be integers
```
I think this is because `output` is a string not an array - when I added `print output` I got a string like `969f9caabe19859c11249333dd80aa15`.
When I use `--output JSON` I get the same ID plus quotes:
```bash
$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300 --output JSON
"969f9caabe19859c11249333dd80aa15"
```
I know Memset's not public so if you need any help to test it just let me know. For now I'll work around it with `--output QUIET` since I don't really care about the output here.
Thanks!
Dave
</issue>
<code>
[start of lexicon/cli.py]
1 #!/usr/bin/env python
2 """Module for Lexicon command-line interface"""
3 from __future__ import absolute_import, print_function
4 import json
5 import logging
6 import os
7 import sys
8
9 from lexicon.client import Client
10 from lexicon.config import ConfigResolver
11 from lexicon.parser import generate_cli_main_parser
12
13
14 logger = logging.getLogger(__name__) # pylint: disable=C0103
15
16
17 def generate_table_result(lexicon_logger, output=None, without_header=None):
18 """Convert returned JSON into a nice table for command line usage"""
19 try:
20 _ = (entry for entry in output)
21 except TypeError:
22 lexicon_logger.debug('Command output is not iterable, and then cannot '
23 'be printed with --quiet parameter not enabled.')
24 return None
25
26 array = [[
27 row.get('id', ''),
28 row.get('type', ''),
29 row.get('name', ''),
30 row.get('content', ''),
31 row.get('ttl', '')] for row in output]
32
33 # Insert header (insert before calculating the max width of each column
34 # to take headers size into account)
35 if not without_header:
36 headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL']
37 array.insert(0, headers)
38
39 column_widths = [0, 0, 0, 0, 0]
40 # Find max width for each column
41 for row in array:
42 for idx, col in enumerate(row):
43 width = len(str(col))
44 if width > column_widths[idx]:
45 column_widths[idx] = width
46
47 # Add a 'nice' separator
48 if not without_header:
49 array.insert(1, ['-' * column_widths[idx]
50 for idx in range(len(column_widths))])
51
52 # Construct table to be printed
53 table = []
54 for row in array:
55 row_list = []
56 for idx, col in enumerate(row):
57 row_list.append(str(col).ljust(column_widths[idx]))
58 table.append(' '.join(row_list))
59
60 # Return table
61 return '\n'.join(table)
62
63
64 def handle_output(results, output_type):
65 """Print the relevant output for given output_type"""
66 if not output_type == 'QUIET':
67 if not output_type == 'JSON':
68 table = generate_table_result(
69 logger, results, output_type == 'TABLE-NO-HEADER')
70 if table:
71 print(table)
72 else:
73 try:
74 _ = (entry for entry in results)
75 json_str = json.dumps(results)
76 if json_str:
77 print(json_str)
78 except TypeError:
79 logger.debug('Output is not a JSON, and then cannot '
80 'be printed with --output=JSON parameter.')
81
82
83 def main():
84 """Main function of Lexicon."""
85 # Dynamically determine all the providers available and gather command line arguments.
86 parsed_args = generate_cli_main_parser().parse_args()
87
88 log_level = logging.getLevelName(parsed_args.log_level)
89 logging.basicConfig(stream=sys.stdout, level=log_level,
90 format='%(message)s')
91 logger.debug('Arguments: %s', parsed_args)
92
93 # In the CLI context, will get configuration interactively:
94 # * from the command line
95 # * from the environment variables
96 # * from lexicon configuration files in working directory
97 config = ConfigResolver()
98 config.with_args(parsed_args).with_env().with_config_dir(os.getcwd())
99
100 client = Client(config)
101
102 results = client.execute()
103
104 handle_output(results, parsed_args.output)
105
106
107 if __name__ == '__main__':
108 main()
109
[end of lexicon/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lexicon/cli.py b/lexicon/cli.py
--- a/lexicon/cli.py
+++ b/lexicon/cli.py
@@ -14,12 +14,10 @@
logger = logging.getLogger(__name__) # pylint: disable=C0103
-def generate_table_result(lexicon_logger, output=None, without_header=None):
- """Convert returned JSON into a nice table for command line usage"""
- try:
- _ = (entry for entry in output)
- except TypeError:
- lexicon_logger.debug('Command output is not iterable, and then cannot '
+def generate_list_table_result(lexicon_logger, output=None, without_header=None):
+ """Convert returned data from list actions into a nice table for command line usage"""
+ if not isinstance(output, list):
+ lexicon_logger.debug('Command output is not a list, and then cannot '
'be printed with --quiet parameter not enabled.')
return None
@@ -58,26 +56,43 @@
table.append(' '.join(row_list))
# Return table
- return '\n'.join(table)
+ return os.linesep.join(table)
-def handle_output(results, output_type):
+def generate_table_results(output=None, without_header=None):
+ """Convert returned data from non-list actions into a nice table for command line usage"""
+ array = []
+ str_output = str(output)
+
+ if not without_header:
+ array.append('RESULT')
+ array.append('-' * max(6, len(str_output)))
+
+ array.append(str_output)
+ return os.linesep.join(array)
+
+
+def handle_output(results, output_type, action):
"""Print the relevant output for given output_type"""
- if not output_type == 'QUIET':
- if not output_type == 'JSON':
- table = generate_table_result(
+ if output_type == 'QUIET':
+ return
+
+ if not output_type == 'JSON':
+ if action == 'list':
+ table = generate_list_table_result(
logger, results, output_type == 'TABLE-NO-HEADER')
- if table:
- print(table)
else:
- try:
- _ = (entry for entry in results)
- json_str = json.dumps(results)
- if json_str:
- print(json_str)
- except TypeError:
- logger.debug('Output is not a JSON, and then cannot '
- 'be printed with --output=JSON parameter.')
+ table = generate_table_results(results, output_type == 'TABLE-NO-HEADER')
+ if table:
+ print(table)
+ else:
+ try:
+ json_str = json.dumps(results)
+ if json_str:
+ print(json_str)
+ except TypeError:
+ logger.debug('Output is not JSON serializable, and then cannot '
+ 'be printed with --output=JSON parameter.')
def main():
@@ -101,7 +116,7 @@
results = client.execute()
- handle_output(results, parsed_args.output)
+ handle_output(results, parsed_args.output, config.resolve('lexicon:action'))
if __name__ == '__main__':
|
{"golden_diff": "diff --git a/lexicon/cli.py b/lexicon/cli.py\n--- a/lexicon/cli.py\n+++ b/lexicon/cli.py\n@@ -14,12 +14,10 @@\n logger = logging.getLogger(__name__) # pylint: disable=C0103\n \n \n-def generate_table_result(lexicon_logger, output=None, without_header=None):\n- \"\"\"Convert returned JSON into a nice table for command line usage\"\"\"\n- try:\n- _ = (entry for entry in output)\n- except TypeError:\n- lexicon_logger.debug('Command output is not iterable, and then cannot '\n+def generate_list_table_result(lexicon_logger, output=None, without_header=None):\n+ \"\"\"Convert returned data from list actions into a nice table for command line usage\"\"\"\n+ if not isinstance(output, list):\n+ lexicon_logger.debug('Command output is not a list, and then cannot '\n 'be printed with --quiet parameter not enabled.')\n return None\n \n@@ -58,26 +56,43 @@\n table.append(' '.join(row_list))\n \n # Return table\n- return '\\n'.join(table)\n+ return os.linesep.join(table)\n \n \n-def handle_output(results, output_type):\n+def generate_table_results(output=None, without_header=None):\n+ \"\"\"Convert returned data from non-list actions into a nice table for command line usage\"\"\"\n+ array = []\n+ str_output = str(output)\n+\n+ if not without_header:\n+ array.append('RESULT')\n+ array.append('-' * max(6, len(str_output)))\n+\n+ array.append(str_output)\n+ return os.linesep.join(array)\n+\n+\n+def handle_output(results, output_type, action):\n \"\"\"Print the relevant output for given output_type\"\"\"\n- if not output_type == 'QUIET':\n- if not output_type == 'JSON':\n- table = generate_table_result(\n+ if output_type == 'QUIET':\n+ return\n+\n+ if not output_type == 'JSON':\n+ if action == 'list':\n+ table = generate_list_table_result(\n logger, results, output_type == 'TABLE-NO-HEADER')\n- if table:\n- print(table)\n else:\n- try:\n- _ = (entry for entry in results)\n- json_str = json.dumps(results)\n- if json_str:\n- print(json_str)\n- except TypeError:\n- logger.debug('Output is not a JSON, and then cannot '\n- 'be printed with --output=JSON parameter.')\n+ table = generate_table_results(results, output_type == 'TABLE-NO-HEADER')\n+ if table:\n+ print(table)\n+ else:\n+ try:\n+ json_str = json.dumps(results)\n+ if json_str:\n+ print(json_str)\n+ except TypeError:\n+ logger.debug('Output is not JSON serializable, and then cannot '\n+ 'be printed with --output=JSON parameter.')\n \n \n def main():\n@@ -101,7 +116,7 @@\n \n results = client.execute()\n \n- handle_output(results, parsed_args.output)\n+ handle_output(results, parsed_args.output, config.resolve('lexicon:action'))\n \n \n if __name__ == '__main__':\n", "issue": "Memset provider: TypeError: string indices must be integers\nHi,\r\n\r\nWhen using the Memset provider with the default table formatting I get this error:\r\n\r\n```bash\r\n$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/lexicon\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py\", line 133, in main\r\n handle_output(results, parsed_args.output)\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py\", line 109, in handle_output\r\n table = generate_table_result(logger, results, output_type == 'TABLE-NO-HEADER')\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py\", line 75, in generate_table_result\r\n array = [[row['id'], row['type'], row['name'], row['content'], row['ttl']] for row in output]\r\nTypeError: string indices must be integers\r\n```\r\n\r\nI think this is because `output` is a string not an array - when I added `print output` I got a string like `969f9caabe19859c11249333dd80aa15`.\r\n\r\nWhen I use `--output JSON` I get the same ID plus quotes:\r\n\r\n```bash\r\n$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300 --output JSON\r\n\"969f9caabe19859c11249333dd80aa15\"\r\n```\r\n\r\nI know Memset's not public so if you need any help to test it just let me know. For now I'll work around it with `--output QUIET` since I don't really care about the output here.\r\n\r\nThanks!\r\nDave\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Module for Lexicon command-line interface\"\"\"\nfrom __future__ import absolute_import, print_function\nimport json\nimport logging\nimport os\nimport sys\n\nfrom lexicon.client import Client\nfrom lexicon.config import ConfigResolver\nfrom lexicon.parser import generate_cli_main_parser\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=C0103\n\n\ndef generate_table_result(lexicon_logger, output=None, without_header=None):\n \"\"\"Convert returned JSON into a nice table for command line usage\"\"\"\n try:\n _ = (entry for entry in output)\n except TypeError:\n lexicon_logger.debug('Command output is not iterable, and then cannot '\n 'be printed with --quiet parameter not enabled.')\n return None\n\n array = [[\n row.get('id', ''),\n row.get('type', ''),\n row.get('name', ''),\n row.get('content', ''),\n row.get('ttl', '')] for row in output]\n\n # Insert header (insert before calculating the max width of each column\n # to take headers size into account)\n if not without_header:\n headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL']\n array.insert(0, headers)\n\n column_widths = [0, 0, 0, 0, 0]\n # Find max width for each column\n for row in array:\n for idx, col in enumerate(row):\n width = len(str(col))\n if width > column_widths[idx]:\n column_widths[idx] = width\n\n # Add a 'nice' separator\n if not without_header:\n array.insert(1, ['-' * column_widths[idx]\n for idx in range(len(column_widths))])\n\n # Construct table to be printed\n table = []\n for row in array:\n row_list = []\n for idx, col in enumerate(row):\n row_list.append(str(col).ljust(column_widths[idx]))\n table.append(' '.join(row_list))\n\n # Return table\n return '\\n'.join(table)\n\n\ndef handle_output(results, output_type):\n \"\"\"Print the relevant output for given output_type\"\"\"\n if not output_type == 'QUIET':\n if not output_type == 'JSON':\n table = generate_table_result(\n logger, results, output_type == 'TABLE-NO-HEADER')\n if table:\n print(table)\n else:\n try:\n _ = (entry for entry in results)\n json_str = json.dumps(results)\n if json_str:\n print(json_str)\n except TypeError:\n logger.debug('Output is not a JSON, and then cannot '\n 'be printed with --output=JSON parameter.')\n\n\ndef main():\n \"\"\"Main function of Lexicon.\"\"\"\n # Dynamically determine all the providers available and gather command line arguments.\n parsed_args = generate_cli_main_parser().parse_args()\n\n log_level = logging.getLevelName(parsed_args.log_level)\n logging.basicConfig(stream=sys.stdout, level=log_level,\n format='%(message)s')\n logger.debug('Arguments: %s', parsed_args)\n\n # In the CLI context, will get configuration interactively:\n # * from the command line\n # * from the environment variables\n # * from lexicon configuration files in working directory\n config = ConfigResolver()\n config.with_args(parsed_args).with_env().with_config_dir(os.getcwd())\n\n client = Client(config)\n\n results = client.execute()\n\n handle_output(results, parsed_args.output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lexicon/cli.py"}]}
| 1,962 | 703 |
gh_patches_debug_31143
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-17490
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Coarsened target calculation for `pylint` pulls in more files than it should on v2.15.0a0
**Describe the bug**
While testing `v2.15.0a0`, I saw that `./pants lint` would consistently freeze / be OOM killed. Through `py-spy` and logging I found that:
* Much time was being spent merging together source digests when setting up `pylint` runs
* The number of source digests in each of ^^^ setups was much larger than expected (tens of thousands of files for an input batch size of a few hundred)
* `pylint` batches of different sizes always ended up with the same number of source digests
While looking through the `pylint` changes in v2.15, I found that coarsened target calculation is currently running in the "partitioner" rule (see [here](https://github.com/pantsbuild/pants/blob/main/src/python/pants/backend/python/lint/pylint/rules.py#L89-L92)). This will result in too many targets being associated with each `pylint` batch, because the partitions returned by that rule are re-batched into smaller chunks according to `[lint].batch_size`, and there's no support in the re-batching logic for subsetting the partition metadata.
We should push the calculation of coarsened targets into the "runner" rule for `pylint`, so we only compute & hydrate the transitive dependencies that are relevant for the specific inputs used in each batch.
**Pants version**
v2.15.0a0
**OS**
Both
</issue>
<code>
[start of src/python/pants/backend/python/lint/pylint/rules.py]
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7 from typing import Tuple
8
9 from pants.backend.python.lint.pylint.subsystem import (
10 Pylint,
11 PylintFieldSet,
12 PylintFirstPartyPlugins,
13 )
14 from pants.backend.python.subsystems.setup import PythonSetup
15 from pants.backend.python.util_rules import pex_from_targets
16 from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
17 from pants.backend.python.util_rules.partition import (
18 _partition_by_interpreter_constraints_and_resolve,
19 )
20 from pants.backend.python.util_rules.pex import (
21 Pex,
22 PexRequest,
23 VenvPex,
24 VenvPexProcess,
25 VenvPexRequest,
26 )
27 from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
28 from pants.backend.python.util_rules.python_sources import (
29 PythonSourceFiles,
30 PythonSourceFilesRequest,
31 )
32 from pants.core.goals.lint import REPORT_DIR, LintResult, LintTargetsRequest, Partitions
33 from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
34 from pants.core.util_rules.partitions import Partition
35 from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix
36 from pants.engine.process import FallibleProcessResult
37 from pants.engine.rules import Get, MultiGet, collect_rules, rule
38 from pants.engine.target import CoarsenedTargets, CoarsenedTargetsRequest
39 from pants.util.logging import LogLevel
40 from pants.util.strutil import pluralize
41
42
43 @dataclass(frozen=True)
44 class PartitionMetadata:
45 coarsened_targets: CoarsenedTargets
46 # NB: These are the same across every element in a partition
47 resolve_description: str | None
48 interpreter_constraints: InterpreterConstraints
49
50 @property
51 def description(self) -> str:
52 ics = str(sorted(str(c) for c in self.interpreter_constraints))
53 return f"{self.resolve_description}, {ics}" if self.resolve_description else ics
54
55
56 class PylintRequest(LintTargetsRequest):
57 field_set_type = PylintFieldSet
58 tool_subsystem = Pylint
59
60
61 def generate_argv(field_sets: tuple[PylintFieldSet, ...], pylint: Pylint) -> Tuple[str, ...]:
62 args = []
63 if pylint.config is not None:
64 args.append(f"--rcfile={pylint.config}")
65 args.append("--jobs={pants_concurrency}")
66 args.extend(pylint.args)
67 args.extend(field_set.source.file_path for field_set in field_sets)
68 return tuple(args)
69
70
71 @rule(desc="Determine if necessary to partition Pylint input", level=LogLevel.DEBUG)
72 async def partition_pylint(
73 request: PylintRequest.PartitionRequest[PylintFieldSet],
74 pylint: Pylint,
75 python_setup: PythonSetup,
76 first_party_plugins: PylintFirstPartyPlugins,
77 ) -> Partitions[PylintFieldSet, PartitionMetadata]:
78 if pylint.skip:
79 return Partitions()
80
81 first_party_ics = InterpreterConstraints.create_from_compatibility_fields(
82 first_party_plugins.interpreter_constraints_fields, python_setup
83 )
84
85 resolve_and_interpreter_constraints_to_field_sets = (
86 _partition_by_interpreter_constraints_and_resolve(request.field_sets, python_setup)
87 )
88
89 coarsened_targets = await Get(
90 CoarsenedTargets,
91 CoarsenedTargetsRequest(field_set.address for field_set in request.field_sets),
92 )
93 coarsened_targets_by_address = coarsened_targets.by_address()
94
95 return Partitions(
96 Partition(
97 tuple(field_sets),
98 PartitionMetadata(
99 CoarsenedTargets(
100 coarsened_targets_by_address[field_set.address] for field_set in field_sets
101 ),
102 resolve if len(python_setup.resolves) > 1 else None,
103 InterpreterConstraints.merge((interpreter_constraints, first_party_ics)),
104 ),
105 )
106 for (
107 resolve,
108 interpreter_constraints,
109 ), field_sets, in resolve_and_interpreter_constraints_to_field_sets.items()
110 )
111
112
113 @rule(desc="Lint using Pylint", level=LogLevel.DEBUG)
114 async def run_pylint(
115 request: PylintRequest.Batch[PylintFieldSet, PartitionMetadata],
116 pylint: Pylint,
117 first_party_plugins: PylintFirstPartyPlugins,
118 ) -> LintResult:
119 assert request.partition_metadata is not None
120
121 requirements_pex_get = Get(
122 Pex,
123 RequirementsPexRequest(
124 (target.address for target in request.partition_metadata.coarsened_targets.closure()),
125 # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
126 # a different version for the requirements than the other two PEXes, which can result
127 # in a PEX runtime error about missing dependencies.
128 hardcoded_interpreter_constraints=request.partition_metadata.interpreter_constraints,
129 ),
130 )
131
132 pylint_pex_get = Get(
133 Pex,
134 PexRequest,
135 pylint.to_pex_request(
136 interpreter_constraints=request.partition_metadata.interpreter_constraints,
137 extra_requirements=first_party_plugins.requirement_strings,
138 ),
139 )
140
141 sources_get = Get(
142 PythonSourceFiles,
143 PythonSourceFilesRequest(request.partition_metadata.coarsened_targets.closure()),
144 )
145 # Ensure that the empty report dir exists.
146 report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))
147
148 (pylint_pex, requirements_pex, sources, report_directory,) = await MultiGet(
149 pylint_pex_get,
150 requirements_pex_get,
151 sources_get,
152 report_directory_digest_get,
153 )
154
155 pylint_runner_pex, config_files = await MultiGet(
156 Get(
157 VenvPex,
158 VenvPexRequest(
159 PexRequest(
160 output_filename="pylint_runner.pex",
161 interpreter_constraints=request.partition_metadata.interpreter_constraints,
162 main=pylint.main,
163 internal_only=True,
164 pex_path=[pylint_pex, requirements_pex],
165 ),
166 # TODO(John Sirois): Remove this (change to the default of symlinks) when we can
167 # upgrade to a version of Pylint with https://github.com/PyCQA/pylint/issues/1470
168 # resolved.
169 site_packages_copies=True,
170 ),
171 ),
172 Get(
173 ConfigFiles,
174 ConfigFilesRequest,
175 pylint.config_request(sources.source_files.snapshot.dirs),
176 ),
177 )
178
179 pythonpath = list(sources.source_roots)
180 if first_party_plugins:
181 pythonpath.append(first_party_plugins.PREFIX)
182
183 input_digest = await Get(
184 Digest,
185 MergeDigests(
186 (
187 config_files.snapshot.digest,
188 first_party_plugins.sources_digest,
189 sources.source_files.snapshot.digest,
190 report_directory,
191 )
192 ),
193 )
194
195 result = await Get(
196 FallibleProcessResult,
197 VenvPexProcess(
198 pylint_runner_pex,
199 argv=generate_argv(request.elements, pylint),
200 input_digest=input_digest,
201 output_directories=(REPORT_DIR,),
202 extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
203 concurrency_available=len(request.elements),
204 description=f"Run Pylint on {pluralize(len(request.elements), 'target')}.",
205 level=LogLevel.DEBUG,
206 ),
207 )
208 report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
209 return LintResult.create(request, result, report=report)
210
211
212 def rules():
213 return [
214 *collect_rules(),
215 *PylintRequest.rules(),
216 *pex_from_targets.rules(),
217 ]
218
[end of src/python/pants/backend/python/lint/pylint/rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/backend/python/lint/pylint/rules.py b/src/python/pants/backend/python/lint/pylint/rules.py
--- a/src/python/pants/backend/python/lint/pylint/rules.py
+++ b/src/python/pants/backend/python/lint/pylint/rules.py
@@ -118,10 +118,20 @@
) -> LintResult:
assert request.partition_metadata is not None
+ # The coarsened targets in the incoming request are for all targets in the request's original
+ # partition. Since the core `lint` logic re-batches inputs according to `[lint].batch_size`,
+ # this could be many more targets than are actually needed to lint the specific batch of files
+ # received by this rule. Subset the CTs one more time here to only those that are relevant.
+ all_coarsened_targets_by_address = request.partition_metadata.coarsened_targets.by_address()
+ coarsened_targets = CoarsenedTargets(
+ all_coarsened_targets_by_address[field_set.address] for field_set in request.elements
+ )
+ coarsened_closure = tuple(coarsened_targets.closure())
+
requirements_pex_get = Get(
Pex,
RequirementsPexRequest(
- (target.address for target in request.partition_metadata.coarsened_targets.closure()),
+ (target.address for target in coarsened_closure),
# NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
# a different version for the requirements than the other two PEXes, which can result
# in a PEX runtime error about missing dependencies.
@@ -140,7 +150,7 @@
sources_get = Get(
PythonSourceFiles,
- PythonSourceFilesRequest(request.partition_metadata.coarsened_targets.closure()),
+ PythonSourceFilesRequest(coarsened_closure),
)
# Ensure that the empty report dir exists.
report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))
|
{"golden_diff": "diff --git a/src/python/pants/backend/python/lint/pylint/rules.py b/src/python/pants/backend/python/lint/pylint/rules.py\n--- a/src/python/pants/backend/python/lint/pylint/rules.py\n+++ b/src/python/pants/backend/python/lint/pylint/rules.py\n@@ -118,10 +118,20 @@\n ) -> LintResult:\n assert request.partition_metadata is not None\n \n+ # The coarsened targets in the incoming request are for all targets in the request's original\n+ # partition. Since the core `lint` logic re-batches inputs according to `[lint].batch_size`,\n+ # this could be many more targets than are actually needed to lint the specific batch of files\n+ # received by this rule. Subset the CTs one more time here to only those that are relevant.\n+ all_coarsened_targets_by_address = request.partition_metadata.coarsened_targets.by_address()\n+ coarsened_targets = CoarsenedTargets(\n+ all_coarsened_targets_by_address[field_set.address] for field_set in request.elements\n+ )\n+ coarsened_closure = tuple(coarsened_targets.closure())\n+\n requirements_pex_get = Get(\n Pex,\n RequirementsPexRequest(\n- (target.address for target in request.partition_metadata.coarsened_targets.closure()),\n+ (target.address for target in coarsened_closure),\n # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using\n # a different version for the requirements than the other two PEXes, which can result\n # in a PEX runtime error about missing dependencies.\n@@ -140,7 +150,7 @@\n \n sources_get = Get(\n PythonSourceFiles,\n- PythonSourceFilesRequest(request.partition_metadata.coarsened_targets.closure()),\n+ PythonSourceFilesRequest(coarsened_closure),\n )\n # Ensure that the empty report dir exists.\n report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))\n", "issue": "Coarsened target calculation for `pylint` pulls in more files than it should on v2.15.0a0\n**Describe the bug**\r\n\r\nWhile testing `v2.15.0a0`, I saw that `./pants lint` would consistently freeze / be OOM killed. Through `py-spy` and logging I found that:\r\n\r\n* Much time was being spent merging together source digests when setting up `pylint` runs\r\n* The number of source digests in each of ^^^ setups was much larger than expected (tens of thousands of files for an input batch size of a few hundred)\r\n* `pylint` batches of different sizes always ended up with the same number of source digests\r\n\r\nWhile looking through the `pylint` changes in v2.15, I found that coarsened target calculation is currently running in the \"partitioner\" rule (see [here](https://github.com/pantsbuild/pants/blob/main/src/python/pants/backend/python/lint/pylint/rules.py#L89-L92)). This will result in too many targets being associated with each `pylint` batch, because the partitions returned by that rule are re-batched into smaller chunks according to `[lint].batch_size`, and there's no support in the re-batching logic for subsetting the partition metadata.\r\n\r\nWe should push the calculation of coarsened targets into the \"runner\" rule for `pylint`, so we only compute & hydrate the transitive dependencies that are relevant for the specific inputs used in each batch.\r\n\r\n**Pants version**\r\n\r\nv2.15.0a0\r\n\r\n**OS**\r\n\r\nBoth\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Tuple\n\nfrom pants.backend.python.lint.pylint.subsystem import (\n Pylint,\n PylintFieldSet,\n PylintFirstPartyPlugins,\n)\nfrom pants.backend.python.subsystems.setup import PythonSetup\nfrom pants.backend.python.util_rules import pex_from_targets\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.backend.python.util_rules.partition import (\n _partition_by_interpreter_constraints_and_resolve,\n)\nfrom pants.backend.python.util_rules.pex import (\n Pex,\n PexRequest,\n VenvPex,\n VenvPexProcess,\n VenvPexRequest,\n)\nfrom pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.lint import REPORT_DIR, LintResult, LintTargetsRequest, Partitions\nfrom pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest\nfrom pants.core.util_rules.partitions import Partition\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix\nfrom pants.engine.process import FallibleProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import CoarsenedTargets, CoarsenedTargetsRequest\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\n@dataclass(frozen=True)\nclass PartitionMetadata:\n coarsened_targets: CoarsenedTargets\n # NB: These are the same across every element in a partition\n resolve_description: str | None\n interpreter_constraints: InterpreterConstraints\n\n @property\n def description(self) -> str:\n ics = str(sorted(str(c) for c in self.interpreter_constraints))\n return f\"{self.resolve_description}, {ics}\" if self.resolve_description else ics\n\n\nclass PylintRequest(LintTargetsRequest):\n field_set_type = PylintFieldSet\n tool_subsystem = Pylint\n\n\ndef generate_argv(field_sets: tuple[PylintFieldSet, ...], pylint: Pylint) -> Tuple[str, ...]:\n args = []\n if pylint.config is not None:\n args.append(f\"--rcfile={pylint.config}\")\n args.append(\"--jobs={pants_concurrency}\")\n args.extend(pylint.args)\n args.extend(field_set.source.file_path for field_set in field_sets)\n return tuple(args)\n\n\n@rule(desc=\"Determine if necessary to partition Pylint input\", level=LogLevel.DEBUG)\nasync def partition_pylint(\n request: PylintRequest.PartitionRequest[PylintFieldSet],\n pylint: Pylint,\n python_setup: PythonSetup,\n first_party_plugins: PylintFirstPartyPlugins,\n) -> Partitions[PylintFieldSet, PartitionMetadata]:\n if pylint.skip:\n return Partitions()\n\n first_party_ics = InterpreterConstraints.create_from_compatibility_fields(\n first_party_plugins.interpreter_constraints_fields, python_setup\n )\n\n resolve_and_interpreter_constraints_to_field_sets = (\n _partition_by_interpreter_constraints_and_resolve(request.field_sets, python_setup)\n )\n\n coarsened_targets = await Get(\n CoarsenedTargets,\n CoarsenedTargetsRequest(field_set.address for field_set in request.field_sets),\n )\n coarsened_targets_by_address = coarsened_targets.by_address()\n\n return Partitions(\n Partition(\n tuple(field_sets),\n PartitionMetadata(\n CoarsenedTargets(\n coarsened_targets_by_address[field_set.address] for field_set in field_sets\n ),\n resolve if len(python_setup.resolves) > 1 else None,\n InterpreterConstraints.merge((interpreter_constraints, first_party_ics)),\n ),\n )\n for (\n resolve,\n interpreter_constraints,\n ), field_sets, in resolve_and_interpreter_constraints_to_field_sets.items()\n )\n\n\n@rule(desc=\"Lint using Pylint\", level=LogLevel.DEBUG)\nasync def run_pylint(\n request: PylintRequest.Batch[PylintFieldSet, PartitionMetadata],\n pylint: Pylint,\n first_party_plugins: PylintFirstPartyPlugins,\n) -> LintResult:\n assert request.partition_metadata is not None\n\n requirements_pex_get = Get(\n Pex,\n RequirementsPexRequest(\n (target.address for target in request.partition_metadata.coarsened_targets.closure()),\n # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using\n # a different version for the requirements than the other two PEXes, which can result\n # in a PEX runtime error about missing dependencies.\n hardcoded_interpreter_constraints=request.partition_metadata.interpreter_constraints,\n ),\n )\n\n pylint_pex_get = Get(\n Pex,\n PexRequest,\n pylint.to_pex_request(\n interpreter_constraints=request.partition_metadata.interpreter_constraints,\n extra_requirements=first_party_plugins.requirement_strings,\n ),\n )\n\n sources_get = Get(\n PythonSourceFiles,\n PythonSourceFilesRequest(request.partition_metadata.coarsened_targets.closure()),\n )\n # Ensure that the empty report dir exists.\n report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))\n\n (pylint_pex, requirements_pex, sources, report_directory,) = await MultiGet(\n pylint_pex_get,\n requirements_pex_get,\n sources_get,\n report_directory_digest_get,\n )\n\n pylint_runner_pex, config_files = await MultiGet(\n Get(\n VenvPex,\n VenvPexRequest(\n PexRequest(\n output_filename=\"pylint_runner.pex\",\n interpreter_constraints=request.partition_metadata.interpreter_constraints,\n main=pylint.main,\n internal_only=True,\n pex_path=[pylint_pex, requirements_pex],\n ),\n # TODO(John Sirois): Remove this (change to the default of symlinks) when we can\n # upgrade to a version of Pylint with https://github.com/PyCQA/pylint/issues/1470\n # resolved.\n site_packages_copies=True,\n ),\n ),\n Get(\n ConfigFiles,\n ConfigFilesRequest,\n pylint.config_request(sources.source_files.snapshot.dirs),\n ),\n )\n\n pythonpath = list(sources.source_roots)\n if first_party_plugins:\n pythonpath.append(first_party_plugins.PREFIX)\n\n input_digest = await Get(\n Digest,\n MergeDigests(\n (\n config_files.snapshot.digest,\n first_party_plugins.sources_digest,\n sources.source_files.snapshot.digest,\n report_directory,\n )\n ),\n )\n\n result = await Get(\n FallibleProcessResult,\n VenvPexProcess(\n pylint_runner_pex,\n argv=generate_argv(request.elements, pylint),\n input_digest=input_digest,\n output_directories=(REPORT_DIR,),\n extra_env={\"PEX_EXTRA_SYS_PATH\": \":\".join(pythonpath)},\n concurrency_available=len(request.elements),\n description=f\"Run Pylint on {pluralize(len(request.elements), 'target')}.\",\n level=LogLevel.DEBUG,\n ),\n )\n report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))\n return LintResult.create(request, result, report=report)\n\n\ndef rules():\n return [\n *collect_rules(),\n *PylintRequest.rules(),\n *pex_from_targets.rules(),\n ]\n", "path": "src/python/pants/backend/python/lint/pylint/rules.py"}]}
| 3,078 | 439 |
gh_patches_debug_3258
|
rasdani/github-patches
|
git_diff
|
ManimCommunity__manim-755
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"manim -" is not working
I broke this when revamping the config system. Thanks @naveen521kk for reporting
</issue>
<code>
[start of manim/utils/module_ops.py]
1 from .. import constants, logger, console, config
2 import importlib.util
3 import inspect
4 import os
5 from pathlib import Path
6 import sys
7 import types
8 import re
9
10
11 def get_module(file_name):
12 if file_name == "-":
13 module = types.ModuleType("input_scenes")
14 logger.info(
15 "Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):"
16 )
17 code = sys.stdin.read()
18 if not code.startswith("from manim import"):
19 logger.warn(
20 "Didn't find an import statement for Manim. Importing automatically..."
21 )
22 code = "from manim import *\n" + code
23 logger.info("Rendering animation from typed code...")
24 try:
25 exec(code, module.__dict__)
26 return module
27 except Exception as e:
28 logger.error(f"Failed to render scene: {str(e)}")
29 sys.exit(2)
30 else:
31 if Path(file_name).exists():
32 ext = file_name.suffix
33 if ext != ".py":
34 raise ValueError(f"{file_name} is not a valid Manim python script.")
35 module_name = ext.replace(os.sep, ".").split(".")[-1]
36 spec = importlib.util.spec_from_file_location(module_name, file_name)
37 module = importlib.util.module_from_spec(spec)
38 sys.modules[module_name] = module
39 spec.loader.exec_module(module)
40 return module
41 else:
42 raise FileNotFoundError(f"{file_name} not found")
43
44
45 def get_scene_classes_from_module(module):
46 from ..scene.scene import Scene
47
48 def is_child_scene(obj, module):
49 return (
50 inspect.isclass(obj)
51 and issubclass(obj, Scene)
52 and obj != Scene
53 and obj.__module__.startswith(module.__name__)
54 )
55
56 return [
57 member[1]
58 for member in inspect.getmembers(module, lambda x: is_child_scene(x, module))
59 ]
60
61
62 def get_scenes_to_render(scene_classes):
63 if not scene_classes:
64 logger.error(constants.NO_SCENE_MESSAGE)
65 return []
66 if config["write_all"]:
67 return scene_classes
68 result = []
69 for scene_name in config["scene_names"]:
70 found = False
71 for scene_class in scene_classes:
72 if scene_class.__name__ == scene_name:
73 result.append(scene_class)
74 found = True
75 break
76 if not found and (scene_name != ""):
77 logger.error(constants.SCENE_NOT_FOUND_MESSAGE.format(scene_name))
78 if result:
79 return result
80 return (
81 [scene_classes[0]]
82 if len(scene_classes) == 1
83 else prompt_user_for_choice(scene_classes)
84 )
85
86
87 def prompt_user_for_choice(scene_classes):
88 num_to_class = {}
89 for count, scene_class in enumerate(scene_classes):
90 count += 1 # start with 1 instead of 0
91 name = scene_class.__name__
92 console.print(f"{count}: {name}", style="logging.level.info")
93 num_to_class[count] = scene_class
94 try:
95 user_input = console.input(
96 f"[log.message] {constants.CHOOSE_NUMBER_MESSAGE} [/log.message]"
97 )
98 return [
99 num_to_class[int(num_str)]
100 for num_str in re.split(r"\s*,\s*", user_input.strip())
101 ]
102 except KeyError:
103 logger.error(constants.INVALID_NUMBER_MESSAGE)
104 sys.exit(2)
105 except EOFError:
106 sys.exit(1)
107
[end of manim/utils/module_ops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/manim/utils/module_ops.py b/manim/utils/module_ops.py
--- a/manim/utils/module_ops.py
+++ b/manim/utils/module_ops.py
@@ -9,7 +9,7 @@
def get_module(file_name):
- if file_name == "-":
+ if str(file_name) == "-":
module = types.ModuleType("input_scenes")
logger.info(
"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):"
|
{"golden_diff": "diff --git a/manim/utils/module_ops.py b/manim/utils/module_ops.py\n--- a/manim/utils/module_ops.py\n+++ b/manim/utils/module_ops.py\n@@ -9,7 +9,7 @@\n \n \n def get_module(file_name):\n- if file_name == \"-\":\n+ if str(file_name) == \"-\":\n module = types.ModuleType(\"input_scenes\")\n logger.info(\n \"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):\"\n", "issue": "\"manim -\" is not working\nI broke this when revamping the config system. Thanks @naveen521kk for reporting\n", "before_files": [{"content": "from .. import constants, logger, console, config\nimport importlib.util\nimport inspect\nimport os\nfrom pathlib import Path\nimport sys\nimport types\nimport re\n\n\ndef get_module(file_name):\n if file_name == \"-\":\n module = types.ModuleType(\"input_scenes\")\n logger.info(\n \"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):\"\n )\n code = sys.stdin.read()\n if not code.startswith(\"from manim import\"):\n logger.warn(\n \"Didn't find an import statement for Manim. Importing automatically...\"\n )\n code = \"from manim import *\\n\" + code\n logger.info(\"Rendering animation from typed code...\")\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if Path(file_name).exists():\n ext = file_name.suffix\n if ext != \".py\":\n raise ValueError(f\"{file_name} is not a valid Manim python script.\")\n module_name = ext.replace(os.sep, \".\").split(\".\")[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = module\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f\"{file_name} not found\")\n\n\ndef get_scene_classes_from_module(module):\n from ..scene.scene import Scene\n\n def is_child_scene(obj, module):\n return (\n inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__)\n )\n\n return [\n member[1]\n for member in inspect.getmembers(module, lambda x: is_child_scene(x, module))\n ]\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(constants.SCENE_NOT_FOUND_MESSAGE.format(scene_name))\n if result:\n return result\n return (\n [scene_classes[0]]\n if len(scene_classes) == 1\n else prompt_user_for_choice(scene_classes)\n )\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n console.print(f\"{count}: {name}\", style=\"logging.level.info\")\n num_to_class[count] = scene_class\n try:\n user_input = console.input(\n f\"[log.message] {constants.CHOOSE_NUMBER_MESSAGE} [/log.message]\"\n )\n return [\n num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())\n ]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n", "path": "manim/utils/module_ops.py"}]}
| 1,519 | 114 |
gh_patches_debug_12818
|
rasdani/github-patches
|
git_diff
|
replicate__cog-620
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
What should `cog predict` do if you don't pass an input name?
The syntax of `cog predict` is this:
cog predict -i [email protected]
But you can also do this:
cog predict -i @image.jpg
Which implicitly means an input name of `input`. This is a neat short hand but a bit weird for a few reasons:
- `input` is a Python built-in, so we should really be encouraging using that as a variable name.
- It is a magic name.
- For a sufficiently complex model, you probably don't want to call it `input`.
What could we do that is better here? Maybe if you don't pass a name, it defaults to the _first_ input defined, rather than a magic name? This is vaguely backwards compatible, which is neat.
</issue>
<code>
[start of pkg/cli/init-templates/predict.py]
1 # Prediction interface for Cog ⚙️
2 # https://github.com/replicate/cog/blob/main/docs/python.md
3
4 from cog import BasePredictor, Input, Path
5
6
7 class Predictor(BasePredictor):
8 def setup(self):
9 """Load the model into memory to make running multiple predictions efficient"""
10 # self.model = torch.load("./weights.pth")
11
12 def predict(
13 self,
14 input: Path = Input(description="Grayscale input image"),
15 scale: float = Input(
16 description="Factor to scale image by", ge=0, le=10, default=1.5
17 ),
18 ) -> Path:
19 """Run a single prediction on the model"""
20 # processed_input = preprocess(input)
21 # output = self.model(processed_input, scale)
22 # return postprocess(output)
23
[end of pkg/cli/init-templates/predict.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pkg/cli/init-templates/predict.py b/pkg/cli/init-templates/predict.py
--- a/pkg/cli/init-templates/predict.py
+++ b/pkg/cli/init-templates/predict.py
@@ -11,12 +11,12 @@
def predict(
self,
- input: Path = Input(description="Grayscale input image"),
+ image: Path = Input(description="Grayscale input image"),
scale: float = Input(
description="Factor to scale image by", ge=0, le=10, default=1.5
),
) -> Path:
"""Run a single prediction on the model"""
- # processed_input = preprocess(input)
- # output = self.model(processed_input, scale)
+ # processed_input = preprocess(image)
+ # output = self.model(processed_image, scale)
# return postprocess(output)
|
{"golden_diff": "diff --git a/pkg/cli/init-templates/predict.py b/pkg/cli/init-templates/predict.py\n--- a/pkg/cli/init-templates/predict.py\n+++ b/pkg/cli/init-templates/predict.py\n@@ -11,12 +11,12 @@\n \n def predict(\n self,\n- input: Path = Input(description=\"Grayscale input image\"),\n+ image: Path = Input(description=\"Grayscale input image\"),\n scale: float = Input(\n description=\"Factor to scale image by\", ge=0, le=10, default=1.5\n ),\n ) -> Path:\n \"\"\"Run a single prediction on the model\"\"\"\n- # processed_input = preprocess(input)\n- # output = self.model(processed_input, scale)\n+ # processed_input = preprocess(image)\n+ # output = self.model(processed_image, scale)\n # return postprocess(output)\n", "issue": "What should `cog predict` do if you don't pass an input name?\nThe syntax of `cog predict` is this:\r\n\r\n cog predict -i [email protected]\r\n\r\nBut you can also do this:\r\n\r\n cog predict -i @image.jpg\r\n\r\nWhich implicitly means an input name of `input`. This is a neat short hand but a bit weird for a few reasons:\r\n\r\n- `input` is a Python built-in, so we should really be encouraging using that as a variable name.\r\n- It is a magic name.\r\n- For a sufficiently complex model, you probably don't want to call it `input`.\r\n\r\nWhat could we do that is better here? Maybe if you don't pass a name, it defaults to the _first_ input defined, rather than a magic name? This is vaguely backwards compatible, which is neat.\n", "before_files": [{"content": "# Prediction interface for Cog \u2699\ufe0f\n# https://github.com/replicate/cog/blob/main/docs/python.md\n\nfrom cog import BasePredictor, Input, Path\n\n\nclass Predictor(BasePredictor):\n def setup(self):\n \"\"\"Load the model into memory to make running multiple predictions efficient\"\"\"\n # self.model = torch.load(\"./weights.pth\")\n\n def predict(\n self,\n input: Path = Input(description=\"Grayscale input image\"),\n scale: float = Input(\n description=\"Factor to scale image by\", ge=0, le=10, default=1.5\n ),\n ) -> Path:\n \"\"\"Run a single prediction on the model\"\"\"\n # processed_input = preprocess(input)\n # output = self.model(processed_input, scale)\n # return postprocess(output)\n", "path": "pkg/cli/init-templates/predict.py"}]}
| 923 | 193 |
gh_patches_debug_40554
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-886
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bless_test_results - can not find create_test command
I am using tag:
https://github.com/CESM-Development/cime/tags/cime5.2.0-alpha.10
When I issue the command:
/glade/u/home/cacraig/cam5_4_94/cime/scripts/Tools/bless_test_results -t '' -c '' -r /glade/scratch/cacraig/aux_cam_20161123172411 -b cam5_4_94-copy6 -f
I get the following error:
FAILED TO BLESS TEST: SMS_Lm13.f09_f09.F2000_DEV.yellowstone_intel.cam-outfrq1m_clm5, reason Namelist regen failed: '/bin/sh: create_test: command not found'
The baseline directory created by this script has the netCDF files in it, but no namelists are copied. I've tried running this command from various locations including the cime/scripts directory where create_test resides, and can not eliminate this error.
</issue>
<code>
[start of utils/python/CIME/case_cmpgen_namelists.py]
1 """
2 Library for case.cmpgen_namelists.
3 """
4
5 from CIME.XML.standard_module_setup import *
6
7 from CIME.preview_namelists import create_namelists
8 from CIME.compare_namelists import is_namelist_file, compare_namelist_files
9 from CIME.simple_compare import compare_files
10 from CIME.utils import get_current_branch, append_status
11 from CIME.test_status import *
12
13 import os, shutil, traceback, stat, glob
14
15 logger = logging.getLogger(__name__)
16
17 def _do_full_nl_comp(case, test, compare_name):
18 test_dir = case.get_value("CASEROOT")
19 casedoc_dir = os.path.join(test_dir, "CaseDocs")
20 baseline_root = case.get_value("BASELINE_ROOT")
21
22 all_match = True
23 baseline_dir = os.path.join(baseline_root, compare_name, test)
24 baseline_casedocs = os.path.join(baseline_dir, "CaseDocs")
25
26 # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!)
27 # TODO: Namelist files should have consistent suffix
28 all_items_to_compare = [item for item in glob.glob("%s/*" % casedoc_dir)\
29 if "README" not in os.path.basename(item)\
30 and not item.endswith("doc")\
31 and not item.endswith("prescribed")\
32 and not os.path.basename(item).startswith(".")] + \
33 glob.glob("%s/*user_nl*" % test_dir)
34
35 comments = ""
36 for item in all_items_to_compare:
37 baseline_counterpart = os.path.join(baseline_casedocs \
38 if os.path.dirname(item).endswith("CaseDocs") \
39 else baseline_dir,os.path.basename(item))
40 if not os.path.exists(baseline_counterpart):
41 comments += "Missing baseline namelist '%s'\n" % baseline_counterpart
42 all_match = False
43 else:
44 if is_namelist_file(item):
45 success, current_comments = compare_namelist_files(baseline_counterpart, item, test)
46 else:
47 success, current_comments = compare_files(baseline_counterpart, item, test)
48
49 all_match &= success
50 comments += current_comments
51
52 logging.info(comments)
53 return all_match, comments
54
55 def _do_full_nl_gen(case, test, generate_name):
56 test_dir = case.get_value("CASEROOT")
57 casedoc_dir = os.path.join(test_dir, "CaseDocs")
58 baseline_root = case.get_value("BASELINE_ROOT")
59
60 baseline_dir = os.path.join(baseline_root, generate_name, test)
61 baseline_casedocs = os.path.join(baseline_dir, "CaseDocs")
62
63 if not os.path.isdir(baseline_dir):
64 os.makedirs(baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)
65
66 if os.path.isdir(baseline_casedocs):
67 shutil.rmtree(baseline_casedocs)
68
69 shutil.copytree(casedoc_dir, baseline_casedocs)
70 os.chmod(baseline_casedocs, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)
71 for item in glob.glob("%s/*" % baseline_casedocs):
72 os.chmod(item, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
73
74 for item in glob.glob(os.path.join(test_dir, "user_nl*")):
75 preexisting_baseline = os.path.join(baseline_dir, os.path.basename(item))
76 if (os.path.exists(preexisting_baseline)):
77 os.remove(preexisting_baseline)
78
79 shutil.copy2(item, baseline_dir)
80 os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
81
82 def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None):
83 expect(case.get_value("TEST"), "Only makes sense to run this for a test case")
84
85 caseroot, casebaseid = case.get_value("CASEROOT"), case.get_value("CASEBASEID")
86
87 if not compare:
88 compare = case.get_value("COMPARE_BASELINE")
89 if not generate:
90 generate = case.get_value("GENERATE_BASELINE")
91
92 if not compare and not generate:
93 logging.info("Nothing to do")
94 return True
95
96 # create namelists for case if they haven't been already
97 casedocs = os.path.join(caseroot, "CaseDocs")
98 if not os.path.exists(os.path.join(casedocs, "drv_in")):
99 create_namelists(case)
100
101 test_name = casebaseid if casebaseid is not None else case.get_value("CASE")
102 with TestStatus(test_dir=caseroot, test_name=test_name) as ts:
103 try:
104 # Inside this try are where we catch non-fatal errors, IE errors involving
105 # baseline operations which may not directly impact the functioning of the viability of this case
106 if compare and not compare_name:
107 compare_name = case.get_value("BASELINE_NAME_CMP")
108 compare_name = get_current_branch() if compare_name is None else compare_name
109 expect(compare_name, "Was asked to do baseline compare but unable to determine baseline name")
110 logging.info("Comparing namelists with baselines '%s'" % compare_name)
111 if generate and not generate_name:
112 generate_name = case.get_value("BASELINE_NAME_GEN")
113 generate_name = get_current_branch() if generate_name is None else generate_name
114 expect(generate_name, "Was asked to do baseline generation but unable to determine baseline name")
115 logging.info("Generating namelists to baselines '%s'" % generate_name)
116
117 success = True
118 output = ""
119 if compare:
120 success, output = _do_full_nl_comp(case, test_name, compare_name)
121 if generate:
122 _do_full_nl_gen(case, test_name, generate_name)
123 except:
124 ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)
125 success = False
126 warn = "Exception during namelist operations:\n%s\n%s" % (sys.exc_info()[1], traceback.format_exc())
127 output += warn
128 logging.warning(warn)
129 finally:
130 ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)
131 append_status(output, caseroot=caseroot, sfile="TestStatus.log")
132
133 return success
134
135
[end of utils/python/CIME/case_cmpgen_namelists.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/utils/python/CIME/case_cmpgen_namelists.py b/utils/python/CIME/case_cmpgen_namelists.py
--- a/utils/python/CIME/case_cmpgen_namelists.py
+++ b/utils/python/CIME/case_cmpgen_namelists.py
@@ -14,10 +14,10 @@
logger = logging.getLogger(__name__)
-def _do_full_nl_comp(case, test, compare_name):
+def _do_full_nl_comp(case, test, compare_name, baseline_root=None):
test_dir = case.get_value("CASEROOT")
casedoc_dir = os.path.join(test_dir, "CaseDocs")
- baseline_root = case.get_value("BASELINE_ROOT")
+ baseline_root = case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root
all_match = True
baseline_dir = os.path.join(baseline_root, compare_name, test)
@@ -52,10 +52,10 @@
logging.info(comments)
return all_match, comments
-def _do_full_nl_gen(case, test, generate_name):
+def _do_full_nl_gen(case, test, generate_name, baseline_root=None):
test_dir = case.get_value("CASEROOT")
casedoc_dir = os.path.join(test_dir, "CaseDocs")
- baseline_root = case.get_value("BASELINE_ROOT")
+ baseline_root = case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root
baseline_dir = os.path.join(baseline_root, generate_name, test)
baseline_casedocs = os.path.join(baseline_dir, "CaseDocs")
@@ -79,7 +79,7 @@
shutil.copy2(item, baseline_dir)
os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
-def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None):
+def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None, baseline_root=None, logfile_name="TestStatus.log"):
expect(case.get_value("TEST"), "Only makes sense to run this for a test case")
caseroot, casebaseid = case.get_value("CASEROOT"), case.get_value("CASEBASEID")
@@ -117,9 +117,9 @@
success = True
output = ""
if compare:
- success, output = _do_full_nl_comp(case, test_name, compare_name)
+ success, output = _do_full_nl_comp(case, test_name, compare_name, baseline_root)
if generate:
- _do_full_nl_gen(case, test_name, generate_name)
+ _do_full_nl_gen(case, test_name, generate_name, baseline_root)
except:
ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)
success = False
@@ -128,7 +128,7 @@
logging.warning(warn)
finally:
ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)
- append_status(output, caseroot=caseroot, sfile="TestStatus.log")
+ append_status(output, caseroot=caseroot, sfile=logfile_name)
return success
|
{"golden_diff": "diff --git a/utils/python/CIME/case_cmpgen_namelists.py b/utils/python/CIME/case_cmpgen_namelists.py\n--- a/utils/python/CIME/case_cmpgen_namelists.py\n+++ b/utils/python/CIME/case_cmpgen_namelists.py\n@@ -14,10 +14,10 @@\n \n logger = logging.getLogger(__name__)\n \n-def _do_full_nl_comp(case, test, compare_name):\n+def _do_full_nl_comp(case, test, compare_name, baseline_root=None):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n- baseline_root = case.get_value(\"BASELINE_ROOT\")\n+ baseline_root = case.get_value(\"BASELINE_ROOT\") if baseline_root is None else baseline_root\n \n all_match = True\n baseline_dir = os.path.join(baseline_root, compare_name, test)\n@@ -52,10 +52,10 @@\n logging.info(comments)\n return all_match, comments\n \n-def _do_full_nl_gen(case, test, generate_name):\n+def _do_full_nl_gen(case, test, generate_name, baseline_root=None):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n- baseline_root = case.get_value(\"BASELINE_ROOT\")\n+ baseline_root = case.get_value(\"BASELINE_ROOT\") if baseline_root is None else baseline_root\n \n baseline_dir = os.path.join(baseline_root, generate_name, test)\n baseline_casedocs = os.path.join(baseline_dir, \"CaseDocs\")\n@@ -79,7 +79,7 @@\n shutil.copy2(item, baseline_dir)\n os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)\n \n-def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None):\n+def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None, baseline_root=None, logfile_name=\"TestStatus.log\"):\n expect(case.get_value(\"TEST\"), \"Only makes sense to run this for a test case\")\n \n caseroot, casebaseid = case.get_value(\"CASEROOT\"), case.get_value(\"CASEBASEID\")\n@@ -117,9 +117,9 @@\n success = True\n output = \"\"\n if compare:\n- success, output = _do_full_nl_comp(case, test_name, compare_name)\n+ success, output = _do_full_nl_comp(case, test_name, compare_name, baseline_root)\n if generate:\n- _do_full_nl_gen(case, test_name, generate_name)\n+ _do_full_nl_gen(case, test_name, generate_name, baseline_root)\n except:\n ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)\n success = False\n@@ -128,7 +128,7 @@\n logging.warning(warn)\n finally:\n ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)\n- append_status(output, caseroot=caseroot, sfile=\"TestStatus.log\")\n+ append_status(output, caseroot=caseroot, sfile=logfile_name)\n \n return success\n", "issue": "bless_test_results - can not find create_test command\nI am using tag:\r\n https://github.com/CESM-Development/cime/tags/cime5.2.0-alpha.10\r\n\r\nWhen I issue the command:\r\n /glade/u/home/cacraig/cam5_4_94/cime/scripts/Tools/bless_test_results -t '' -c '' -r /glade/scratch/cacraig/aux_cam_20161123172411 -b cam5_4_94-copy6 -f\r\n\r\nI get the following error:\r\nFAILED TO BLESS TEST: SMS_Lm13.f09_f09.F2000_DEV.yellowstone_intel.cam-outfrq1m_clm5, reason Namelist regen failed: '/bin/sh: create_test: command not found'\r\n\r\nThe baseline directory created by this script has the netCDF files in it, but no namelists are copied. I've tried running this command from various locations including the cime/scripts directory where create_test resides, and can not eliminate this error.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nLibrary for case.cmpgen_namelists.\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.compare_namelists import is_namelist_file, compare_namelist_files\nfrom CIME.simple_compare import compare_files\nfrom CIME.utils import get_current_branch, append_status\nfrom CIME.test_status import *\n\nimport os, shutil, traceback, stat, glob\n\nlogger = logging.getLogger(__name__)\n\ndef _do_full_nl_comp(case, test, compare_name):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n baseline_root = case.get_value(\"BASELINE_ROOT\")\n\n all_match = True\n baseline_dir = os.path.join(baseline_root, compare_name, test)\n baseline_casedocs = os.path.join(baseline_dir, \"CaseDocs\")\n\n # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!)\n # TODO: Namelist files should have consistent suffix\n all_items_to_compare = [item for item in glob.glob(\"%s/*\" % casedoc_dir)\\\n if \"README\" not in os.path.basename(item)\\\n and not item.endswith(\"doc\")\\\n and not item.endswith(\"prescribed\")\\\n and not os.path.basename(item).startswith(\".\")] + \\\n glob.glob(\"%s/*user_nl*\" % test_dir)\n\n comments = \"\"\n for item in all_items_to_compare:\n baseline_counterpart = os.path.join(baseline_casedocs \\\n if os.path.dirname(item).endswith(\"CaseDocs\") \\\n else baseline_dir,os.path.basename(item))\n if not os.path.exists(baseline_counterpart):\n comments += \"Missing baseline namelist '%s'\\n\" % baseline_counterpart\n all_match = False\n else:\n if is_namelist_file(item):\n success, current_comments = compare_namelist_files(baseline_counterpart, item, test)\n else:\n success, current_comments = compare_files(baseline_counterpart, item, test)\n\n all_match &= success\n comments += current_comments\n\n logging.info(comments)\n return all_match, comments\n\ndef _do_full_nl_gen(case, test, generate_name):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n baseline_root = case.get_value(\"BASELINE_ROOT\")\n\n baseline_dir = os.path.join(baseline_root, generate_name, test)\n baseline_casedocs = os.path.join(baseline_dir, \"CaseDocs\")\n\n if not os.path.isdir(baseline_dir):\n os.makedirs(baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)\n\n if os.path.isdir(baseline_casedocs):\n shutil.rmtree(baseline_casedocs)\n\n shutil.copytree(casedoc_dir, baseline_casedocs)\n os.chmod(baseline_casedocs, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)\n for item in glob.glob(\"%s/*\" % baseline_casedocs):\n os.chmod(item, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)\n\n for item in glob.glob(os.path.join(test_dir, \"user_nl*\")):\n preexisting_baseline = os.path.join(baseline_dir, os.path.basename(item))\n if (os.path.exists(preexisting_baseline)):\n os.remove(preexisting_baseline)\n\n shutil.copy2(item, baseline_dir)\n os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)\n\ndef case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None):\n expect(case.get_value(\"TEST\"), \"Only makes sense to run this for a test case\")\n\n caseroot, casebaseid = case.get_value(\"CASEROOT\"), case.get_value(\"CASEBASEID\")\n\n if not compare:\n compare = case.get_value(\"COMPARE_BASELINE\")\n if not generate:\n generate = case.get_value(\"GENERATE_BASELINE\")\n\n if not compare and not generate:\n logging.info(\"Nothing to do\")\n return True\n\n # create namelists for case if they haven't been already\n casedocs = os.path.join(caseroot, \"CaseDocs\")\n if not os.path.exists(os.path.join(casedocs, \"drv_in\")):\n create_namelists(case)\n\n test_name = casebaseid if casebaseid is not None else case.get_value(\"CASE\")\n with TestStatus(test_dir=caseroot, test_name=test_name) as ts:\n try:\n # Inside this try are where we catch non-fatal errors, IE errors involving\n # baseline operations which may not directly impact the functioning of the viability of this case\n if compare and not compare_name:\n compare_name = case.get_value(\"BASELINE_NAME_CMP\")\n compare_name = get_current_branch() if compare_name is None else compare_name\n expect(compare_name, \"Was asked to do baseline compare but unable to determine baseline name\")\n logging.info(\"Comparing namelists with baselines '%s'\" % compare_name)\n if generate and not generate_name:\n generate_name = case.get_value(\"BASELINE_NAME_GEN\")\n generate_name = get_current_branch() if generate_name is None else generate_name\n expect(generate_name, \"Was asked to do baseline generation but unable to determine baseline name\")\n logging.info(\"Generating namelists to baselines '%s'\" % generate_name)\n\n success = True\n output = \"\"\n if compare:\n success, output = _do_full_nl_comp(case, test_name, compare_name)\n if generate:\n _do_full_nl_gen(case, test_name, generate_name)\n except:\n ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)\n success = False\n warn = \"Exception during namelist operations:\\n%s\\n%s\" % (sys.exc_info()[1], traceback.format_exc())\n output += warn\n logging.warning(warn)\n finally:\n ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)\n append_status(output, caseroot=caseroot, sfile=\"TestStatus.log\")\n\n return success\n\n", "path": "utils/python/CIME/case_cmpgen_namelists.py"}]}
| 2,465 | 728 |
gh_patches_debug_57235
|
rasdani/github-patches
|
git_diff
|
pymodbus-dev__pymodbus-411
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use pycryptodome instead of pycrypto.
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python: 2.7.12
* OS: Ubuntu 18.04
* Pymodbus: 2.1.0 [twisted]
* Modbus Hardware (if used):
### Pymodbus Specific
* Server: tcp - async
### Description
I am trying to use Mod bus server on TCP protocol, but when I installed pymodbus and I saw it's installed pycrypto, which is deprecated and dead software.
I already have installed pycryptodome in my application, which is a conflict with pycrypto,
we can't have both pycrypto and pycryptodome at the same time,
Can we have pymodbus[twisted] release which can use pycryptodome instead of pycrypto?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """
3 Installs pymodbus using distutils
4
5 Run:
6 python setup.py install
7 to install the package from the source archive.
8
9 For information about setuptools
10 http://peak.telecommunity.com/DevCenter/setuptools#new-and-changed-setup-keywords
11 """
12
13 # --------------------------------------------------------------------------- #
14 # initialization
15 # --------------------------------------------------------------------------- #
16 try: # if not installed, install and proceed
17 from setuptools import setup, find_packages
18 except ImportError:
19 from ez_setup import use_setuptools
20 use_setuptools()
21 from setuptools import setup, find_packages
22
23 try:
24 from setup_commands import command_classes
25 except ImportError:
26 command_classes={}
27 from pymodbus import __version__, __author__, __maintainer__
28
29 with open('requirements.txt') as reqs:
30 install_requires = [
31 line for line in reqs.read().split('\n')
32 if (line and not line.startswith('--'))
33 ]
34 install_requires.append("pyserial >= 3.4")
35 # --------------------------------------------------------------------------- #
36 # configuration
37 # --------------------------------------------------------------------------- #
38 setup(
39 name="pymodbus",
40 version=__version__,
41 description="A fully featured modbus protocol stack in python",
42 long_description="""
43 Pymodbus aims to be a fully implemented modbus protocol stack
44 implemented using twisted/asyncio/tornado.
45 Its orignal goal was to allow simulation of thousands of modbus devices
46 on a single machine for monitoring software testing.
47 """,
48 classifiers=[
49 'Development Status :: 4 - Beta',
50 'Environment :: Console',
51 'Environment :: X11 Applications :: GTK',
52 'Framework :: Twisted',
53 'Intended Audience :: Developers',
54 'License :: OSI Approved :: BSD License',
55 'Operating System :: POSIX :: Linux',
56 'Operating System :: Unix',
57 'Programming Language :: Python',
58 'Topic :: System :: Networking',
59 'Topic :: Utilities'
60 ],
61 keywords='modbus, twisted, scada',
62 author=__author__,
63 author_email='[email protected]',
64 maintainer=__maintainer__,
65 maintainer_email='[email protected]',
66 url='https://github.com/riptideio/pymodbus/',
67 license='BSD',
68 packages=find_packages(exclude=['examples', 'test']),
69 exclude_package_data={'': ['examples', 'test', 'tools', 'doc']},
70 py_modules=['ez_setup'],
71 platforms=['Linux', 'Mac OS X', 'Win'],
72 include_package_data=True,
73 zip_safe=True,
74 install_requires=install_requires,
75 extras_require={
76 'quality': [
77 'coverage >= 3.5.3',
78 'nose >= 1.2.1',
79 'mock >= 1.0.0',
80 'pep8 >= 1.3.3'
81 ],
82 'documents': ['sphinx >= 1.1.3',
83 'sphinx_rtd_theme',
84 'humanfriendly'],
85 'twisted': [
86 'twisted >= 12.2.0',
87 'pyasn1 >= 0.1.4',
88 'pycrypto >= 2.6'
89 ],
90 'tornado': [
91 'tornado >= 4.5.3'
92 ],
93 'repl': [
94 'click>=6.7',
95 'prompt-toolkit==2.0.4',
96 'pygments==2.2.0'
97 ]
98 },
99 entry_points={
100 'console_scripts': ['pymodbus.console=pymodbus.repl.main:main'],
101 },
102 test_suite='nose.collector',
103 cmdclass=command_classes,
104 )
105
106
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,7 +85,6 @@
'twisted': [
'twisted >= 12.2.0',
'pyasn1 >= 0.1.4',
- 'pycrypto >= 2.6'
],
'tornado': [
'tornado >= 4.5.3'
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,7 +85,6 @@\n 'twisted': [\n 'twisted >= 12.2.0',\n 'pyasn1 >= 0.1.4',\n- 'pycrypto >= 2.6'\n ],\n 'tornado': [\n 'tornado >= 4.5.3'\n", "issue": "Use pycryptodome instead of pycrypto.\n<!--\r\nPlease use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for\r\nsupport questions.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 2.7.12\r\n* OS: Ubuntu 18.04\r\n* Pymodbus: 2.1.0 [twisted]\r\n* Modbus Hardware (if used): \r\n\r\n### Pymodbus Specific\r\n* Server: tcp - async\r\n\r\n### Description\r\n\r\nI am trying to use Mod bus server on TCP protocol, but when I installed pymodbus and I saw it's installed pycrypto, which is deprecated and dead software. \r\n\r\nI already have installed pycryptodome in my application, which is a conflict with pycrypto, \r\nwe can't have both pycrypto and pycryptodome at the same time,\r\n\r\nCan we have pymodbus[twisted] release which can use pycryptodome instead of pycrypto?\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls pymodbus using distutils\n\nRun:\n python setup.py install\nto install the package from the source archive.\n\nFor information about setuptools\nhttp://peak.telecommunity.com/DevCenter/setuptools#new-and-changed-setup-keywords\n\"\"\"\n\n# --------------------------------------------------------------------------- #\n# initialization\n# --------------------------------------------------------------------------- #\ntry: # if not installed, install and proceed\n from setuptools import setup, find_packages\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\ntry:\n from setup_commands import command_classes\nexcept ImportError:\n command_classes={}\nfrom pymodbus import __version__, __author__, __maintainer__\n\nwith open('requirements.txt') as reqs:\n install_requires = [\n line for line in reqs.read().split('\\n')\n if (line and not line.startswith('--'))\n ]\n install_requires.append(\"pyserial >= 3.4\")\n# --------------------------------------------------------------------------- #\n# configuration\n# --------------------------------------------------------------------------- #\nsetup(\n name=\"pymodbus\",\n version=__version__,\n description=\"A fully featured modbus protocol stack in python\",\n long_description=\"\"\"\n Pymodbus aims to be a fully implemented modbus protocol stack \n implemented using twisted/asyncio/tornado. \n Its orignal goal was to allow simulation of thousands of modbus devices\n on a single machine for monitoring software testing.\n \"\"\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: X11 Applications :: GTK',\n 'Framework :: Twisted',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Topic :: System :: Networking',\n 'Topic :: Utilities'\n ],\n keywords='modbus, twisted, scada',\n author=__author__,\n author_email='[email protected]',\n maintainer=__maintainer__,\n maintainer_email='[email protected]',\n url='https://github.com/riptideio/pymodbus/',\n license='BSD',\n packages=find_packages(exclude=['examples', 'test']),\n exclude_package_data={'': ['examples', 'test', 'tools', 'doc']},\n py_modules=['ez_setup'],\n platforms=['Linux', 'Mac OS X', 'Win'],\n include_package_data=True,\n zip_safe=True,\n install_requires=install_requires,\n extras_require={\n 'quality': [\n 'coverage >= 3.5.3',\n 'nose >= 1.2.1',\n 'mock >= 1.0.0',\n 'pep8 >= 1.3.3'\n ],\n 'documents': ['sphinx >= 1.1.3',\n 'sphinx_rtd_theme',\n 'humanfriendly'],\n 'twisted': [\n 'twisted >= 12.2.0',\n 'pyasn1 >= 0.1.4',\n 'pycrypto >= 2.6'\n ],\n 'tornado': [\n 'tornado >= 4.5.3'\n ],\n 'repl': [\n 'click>=6.7',\n 'prompt-toolkit==2.0.4',\n 'pygments==2.2.0'\n ]\n },\n entry_points={\n 'console_scripts': ['pymodbus.console=pymodbus.repl.main:main'],\n },\n test_suite='nose.collector',\n cmdclass=command_classes,\n)\n\n", "path": "setup.py"}]}
| 1,808 | 95 |
gh_patches_debug_15259
|
rasdani/github-patches
|
git_diff
|
facebookresearch__Mephisto-832
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make URLs in terminal output clickable on launch
<img width="1028" alt="CleanShot 2022-07-15 at 10 43 57@2x" src="https://user-images.githubusercontent.com/425059/179247049-927a78f7-d6fd-414c-8d60-5732cc6393a3.png">
It's annoying to have to copy and paste the URLs from the terminal output into a browesr on task launch.
```
# change:
localhost:3000/?worker_id=x&assignment_id=1
# to:
http://localhost:3000/?worker_id=x&assignment_id=1
```
Adding a protocol (http: / https://) before the URL will make it easy to simply click on them to open (in some terminals). We should add this.
---
Note: I'm not sure if we need to decide between http or https based on certain scenarios
</issue>
<code>
[start of mephisto/abstractions/providers/mock/mock_unit.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 from mephisto.data_model.unit import Unit
8 from mephisto.data_model.constants.assignment_state import AssignmentState
9 from mephisto.abstractions.blueprint import AgentState
10
11 from mephisto.abstractions.providers.mock.provider_type import PROVIDER_TYPE
12 from typing import List, Optional, Tuple, Dict, Mapping, Any, Type, TYPE_CHECKING
13
14 if TYPE_CHECKING:
15 from mephisto.abstractions.database import MephistoDB
16 from mephisto.data_model.assignment import Assignment
17 from mephisto.abstractions.providers.mock.mock_datastore import MockDatastore
18
19 from mephisto.utils.logger_core import get_logger
20
21 logger = get_logger(name=__name__)
22
23
24 class MockUnit(Unit):
25 """
26 This class tracks the status of an individual worker's contribution to a
27 higher level assignment. It is the smallest 'unit' of work to complete
28 the assignment, and this class is only responsible for checking
29 the status of that work itself being done.
30
31 It should be extended for usage with a specific crowd provider
32 """
33
34 def __init__(
35 self,
36 db: "MephistoDB",
37 db_id: str,
38 row: Optional[Mapping[str, Any]] = None,
39 _used_new_call: bool = False,
40 ):
41 super().__init__(db, db_id, row=row, _used_new_call=_used_new_call)
42 self.datastore: "MockDatastore" = db.get_datastore_for_provider(PROVIDER_TYPE)
43
44 def launch(self, task_url: str) -> None:
45 """Mock launches do nothing right now beyond updating state"""
46 self.set_db_status(status=AssignmentState.LAUNCHED)
47
48 # TODO(OWN) get this link to the frontend
49 port = task_url.split(":")[1].split("/")[0]
50 print(task_url)
51 print(
52 f"Mock task launched: localhost:{port} for preview, "
53 f"localhost:{port}/?worker_id=x&assignment_id={self.db_id}"
54 )
55 logger.info(
56 f"Mock task launched: localhost:{port} for preview, "
57 f"localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}"
58 )
59
60 return None
61
62 def expire(self) -> float:
63 """Expiration is immediate on Mocks"""
64 if self.get_status() not in [
65 AssignmentState.EXPIRED,
66 AssignmentState.COMPLETED,
67 ]:
68 self.set_db_status(AssignmentState.EXPIRED)
69 self.datastore.set_unit_expired(self.db_id, True)
70 return 0.0
71
72 def is_expired(self) -> bool:
73 """Determine if this unit is expired as according to the vendor."""
74 return self.datastore.get_unit_expired(self.db_id)
75
76 @staticmethod
77 def new(
78 db: "MephistoDB", assignment: "Assignment", index: int, pay_amount: float
79 ) -> "Unit":
80 """Create a Unit for the given assignment"""
81 return MockUnit._register_unit(db, assignment, index, pay_amount, PROVIDER_TYPE)
82
[end of mephisto/abstractions/providers/mock/mock_unit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mephisto/abstractions/providers/mock/mock_unit.py b/mephisto/abstractions/providers/mock/mock_unit.py
--- a/mephisto/abstractions/providers/mock/mock_unit.py
+++ b/mephisto/abstractions/providers/mock/mock_unit.py
@@ -49,12 +49,12 @@
port = task_url.split(":")[1].split("/")[0]
print(task_url)
print(
- f"Mock task launched: localhost:{port} for preview, "
- f"localhost:{port}/?worker_id=x&assignment_id={self.db_id}"
+ f"Mock task launched: http://localhost:{port} for preview, "
+ f"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id}"
)
logger.info(
- f"Mock task launched: localhost:{port} for preview, "
- f"localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}"
+ f"Mock task launched: http://localhost:{port} for preview, "
+ f"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}"
)
return None
|
{"golden_diff": "diff --git a/mephisto/abstractions/providers/mock/mock_unit.py b/mephisto/abstractions/providers/mock/mock_unit.py\n--- a/mephisto/abstractions/providers/mock/mock_unit.py\n+++ b/mephisto/abstractions/providers/mock/mock_unit.py\n@@ -49,12 +49,12 @@\n port = task_url.split(\":\")[1].split(\"/\")[0]\n print(task_url)\n print(\n- f\"Mock task launched: localhost:{port} for preview, \"\n- f\"localhost:{port}/?worker_id=x&assignment_id={self.db_id}\"\n+ f\"Mock task launched: http://localhost:{port} for preview, \"\n+ f\"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id}\"\n )\n logger.info(\n- f\"Mock task launched: localhost:{port} for preview, \"\n- f\"localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}\"\n+ f\"Mock task launched: http://localhost:{port} for preview, \"\n+ f\"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}\"\n )\n \n return None\n", "issue": "Make URLs in terminal output clickable on launch\n<img width=\"1028\" alt=\"CleanShot 2022-07-15 at 10 43 57@2x\" src=\"https://user-images.githubusercontent.com/425059/179247049-927a78f7-d6fd-414c-8d60-5732cc6393a3.png\">\r\n\r\nIt's annoying to have to copy and paste the URLs from the terminal output into a browesr on task launch.\r\n\r\n```\r\n# change:\r\n\r\nlocalhost:3000/?worker_id=x&assignment_id=1\r\n\r\n# to:\r\n\r\nhttp://localhost:3000/?worker_id=x&assignment_id=1\r\n```\r\n\r\nAdding a protocol (http: / https://) before the URL will make it easy to simply click on them to open (in some terminals). We should add this.\r\n\r\n---\r\n\r\nNote: I'm not sure if we need to decide between http or https based on certain scenarios\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom mephisto.data_model.unit import Unit\nfrom mephisto.data_model.constants.assignment_state import AssignmentState\nfrom mephisto.abstractions.blueprint import AgentState\n\nfrom mephisto.abstractions.providers.mock.provider_type import PROVIDER_TYPE\nfrom typing import List, Optional, Tuple, Dict, Mapping, Any, Type, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from mephisto.abstractions.database import MephistoDB\n from mephisto.data_model.assignment import Assignment\n from mephisto.abstractions.providers.mock.mock_datastore import MockDatastore\n\nfrom mephisto.utils.logger_core import get_logger\n\nlogger = get_logger(name=__name__)\n\n\nclass MockUnit(Unit):\n \"\"\"\n This class tracks the status of an individual worker's contribution to a\n higher level assignment. It is the smallest 'unit' of work to complete\n the assignment, and this class is only responsible for checking\n the status of that work itself being done.\n\n It should be extended for usage with a specific crowd provider\n \"\"\"\n\n def __init__(\n self,\n db: \"MephistoDB\",\n db_id: str,\n row: Optional[Mapping[str, Any]] = None,\n _used_new_call: bool = False,\n ):\n super().__init__(db, db_id, row=row, _used_new_call=_used_new_call)\n self.datastore: \"MockDatastore\" = db.get_datastore_for_provider(PROVIDER_TYPE)\n\n def launch(self, task_url: str) -> None:\n \"\"\"Mock launches do nothing right now beyond updating state\"\"\"\n self.set_db_status(status=AssignmentState.LAUNCHED)\n\n # TODO(OWN) get this link to the frontend\n port = task_url.split(\":\")[1].split(\"/\")[0]\n print(task_url)\n print(\n f\"Mock task launched: localhost:{port} for preview, \"\n f\"localhost:{port}/?worker_id=x&assignment_id={self.db_id}\"\n )\n logger.info(\n f\"Mock task launched: localhost:{port} for preview, \"\n f\"localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}\"\n )\n\n return None\n\n def expire(self) -> float:\n \"\"\"Expiration is immediate on Mocks\"\"\"\n if self.get_status() not in [\n AssignmentState.EXPIRED,\n AssignmentState.COMPLETED,\n ]:\n self.set_db_status(AssignmentState.EXPIRED)\n self.datastore.set_unit_expired(self.db_id, True)\n return 0.0\n\n def is_expired(self) -> bool:\n \"\"\"Determine if this unit is expired as according to the vendor.\"\"\"\n return self.datastore.get_unit_expired(self.db_id)\n\n @staticmethod\n def new(\n db: \"MephistoDB\", assignment: \"Assignment\", index: int, pay_amount: float\n ) -> \"Unit\":\n \"\"\"Create a Unit for the given assignment\"\"\"\n return MockUnit._register_unit(db, assignment, index, pay_amount, PROVIDER_TYPE)\n", "path": "mephisto/abstractions/providers/mock/mock_unit.py"}]}
| 1,654 | 274 |
gh_patches_debug_35951
|
rasdani/github-patches
|
git_diff
|
cisagov__manage.get.gov-1997
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django Admin: Indicate whether `creator` has other domains
### Issue description
Problem: an analyst would like to know whether the ~~**submitter**~~ **creator** is already associated with other domains in the system. This is an important indicator as to whether this submitter is trustworthy and established.
Solution: add an indicator near the **creator** name or email address, which shows how many other domains they are associated with in our system. This will be enhanced with a list of other requests or domains in a future ticket.
### Acceptance criteria
- [ ] a labeled indicator near the creator name, shows how many "Ready" domains they are a domain manager for, based on the user domain roles.
- [ ] a labeled indicator near the creator name, shows how many domain requests they have in progress.
- [ ] Labels are in plain english, as shown in Additional context.
### Additional context
Approved domains: nn
Active Requests : nn
Rejected or Ineligible: nn
active requests will not include requests in the following states:started, approved, withdrawn
approved domains will not include deleted domains
### Links to other issues
Blocked by #1852.
Related to: #1875 (1850 should be done first)
</issue>
<code>
[start of src/registrar/models/contact.py]
1 from django.db import models
2
3 from .utility.time_stamped_model import TimeStampedModel
4
5 from phonenumber_field.modelfields import PhoneNumberField # type: ignore
6
7
8 class Contact(TimeStampedModel):
9 """Contact information follows a similar pattern for each contact."""
10
11 user = models.OneToOneField(
12 "registrar.User",
13 null=True,
14 blank=True,
15 on_delete=models.SET_NULL,
16 )
17
18 first_name = models.CharField(
19 null=True,
20 blank=True,
21 verbose_name="first name / given name",
22 db_index=True,
23 )
24 middle_name = models.CharField(
25 null=True,
26 blank=True,
27 )
28 last_name = models.CharField(
29 null=True,
30 blank=True,
31 verbose_name="last name / family name",
32 db_index=True,
33 )
34 title = models.CharField(
35 null=True,
36 blank=True,
37 verbose_name="title or role in your organization",
38 )
39 email = models.EmailField(
40 null=True,
41 blank=True,
42 db_index=True,
43 max_length=320,
44 )
45 phone = PhoneNumberField(
46 null=True,
47 blank=True,
48 db_index=True,
49 )
50
51 def _get_all_relations(self):
52 """Returns an array of all fields which are relations"""
53 return [f.name for f in self._meta.get_fields() if f.is_relation]
54
55 def has_more_than_one_join(self, expected_relation):
56 """Helper for finding whether an object is joined more than once.
57 expected_relation is the one relation with one expected join"""
58 # all_relations is the list of all_relations (from contact) to be checked for existing joins
59 all_relations = self._get_all_relations()
60 return any(self._has_more_than_one_join_per_relation(rel, expected_relation) for rel in all_relations)
61
62 def _has_more_than_one_join_per_relation(self, relation, expected_relation):
63 """Helper for finding whether an object is joined more than once."""
64 # threshold is the number of related objects that are acceptable
65 # when determining if related objects exist. threshold is 0 for most
66 # relationships. if the relationship is expected_relation, we know that
67 # there is already exactly 1 acceptable relationship (the one we are
68 # attempting to delete), so the threshold is 1
69 threshold = 1 if relation == expected_relation else 0
70
71 # Raise a KeyError if rel is not a defined field on the db_obj model
72 # This will help catch any errors in relation passed.
73 if relation not in [field.name for field in self._meta.get_fields()]:
74 raise KeyError(f"{relation} is not a defined field on the {self._meta.model_name} model.")
75
76 # if attr rel in db_obj is not None, then test if reference object(s) exist
77 if getattr(self, relation) is not None:
78 field = self._meta.get_field(relation)
79 if isinstance(field, models.OneToOneField):
80 # if the rel field is a OneToOne field, then we have already
81 # determined that the object exists (is not None)
82 # so return True unless the relation being tested is the expected_relation
83 is_not_expected_relation = relation != expected_relation
84 return is_not_expected_relation
85 elif isinstance(field, models.ForeignObjectRel):
86 # if the rel field is a ManyToOne or ManyToMany, then we need
87 # to determine if the count of related objects is greater than
88 # the threshold
89 return getattr(self, relation).count() > threshold
90 return False
91
92 def get_formatted_name(self):
93 """Returns the contact's name in Western order."""
94 names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]
95 return " ".join(names) if names else "Unknown"
96
97 def save(self, *args, **kwargs):
98 # Call the parent class's save method to perform the actual save
99 super().save(*args, **kwargs)
100
101 # Update the related User object's first_name and last_name
102 if self.user and (not self.user.first_name or not self.user.last_name):
103 self.user.first_name = self.first_name
104 self.user.last_name = self.last_name
105 self.user.save()
106
107 def __str__(self):
108 if self.first_name or self.last_name:
109 return self.get_formatted_name()
110 elif self.email:
111 return self.email
112 elif self.pk:
113 return str(self.pk)
114 else:
115 return ""
116
[end of src/registrar/models/contact.py]
[start of src/registrar/models/user.py]
1 import logging
2
3 from django.contrib.auth.models import AbstractUser
4 from django.db import models
5
6 from registrar.models.user_domain_role import UserDomainRole
7
8 from .domain_invitation import DomainInvitation
9 from .transition_domain import TransitionDomain
10 from .verified_by_staff import VerifiedByStaff
11 from .domain import Domain
12
13 from phonenumber_field.modelfields import PhoneNumberField # type: ignore
14
15
16 logger = logging.getLogger(__name__)
17
18
19 class User(AbstractUser):
20 """
21 A custom user model that performs identically to the default user model
22 but can be customized later.
23 """
24
25 # #### Constants for choice fields ####
26 RESTRICTED = "restricted"
27 STATUS_CHOICES = ((RESTRICTED, RESTRICTED),)
28
29 status = models.CharField(
30 max_length=10,
31 choices=STATUS_CHOICES,
32 default=None, # Set the default value to None
33 null=True, # Allow the field to be null
34 blank=True, # Allow the field to be blank
35 )
36
37 domains = models.ManyToManyField(
38 "registrar.Domain",
39 through="registrar.UserDomainRole",
40 related_name="users",
41 )
42
43 phone = PhoneNumberField(
44 null=True,
45 blank=True,
46 help_text="Phone",
47 db_index=True,
48 )
49
50 def __str__(self):
51 # this info is pulled from Login.gov
52 if self.first_name or self.last_name:
53 return f"{self.first_name or ''} {self.last_name or ''} {self.email or ''}"
54 elif self.email:
55 return self.email
56 else:
57 return self.username
58
59 def restrict_user(self):
60 self.status = self.RESTRICTED
61 self.save()
62
63 def unrestrict_user(self):
64 self.status = None
65 self.save()
66
67 def is_restricted(self):
68 return self.status == self.RESTRICTED
69
70 @classmethod
71 def needs_identity_verification(cls, email, uuid):
72 """A method used by our oidc classes to test whether a user needs email/uuid verification
73 or the full identity PII verification"""
74
75 # An existing user who is a domain manager of a domain (that is,
76 # they have an entry in UserDomainRole for their User)
77 try:
78 existing_user = cls.objects.get(username=uuid)
79 if existing_user and UserDomainRole.objects.filter(user=existing_user).exists():
80 return False
81 except cls.DoesNotExist:
82 # Do nothing when the user is not found, as we're checking for existence.
83 pass
84 except Exception as err:
85 raise err
86
87 # A new incoming user who is a domain manager for one of the domains
88 # that we inputted from Verisign (that is, their email address appears
89 # in the username field of a TransitionDomain)
90 if TransitionDomain.objects.filter(username=email).exists():
91 return False
92
93 # New users flagged by Staff to bypass ial2
94 if VerifiedByStaff.objects.filter(email=email).exists():
95 return False
96
97 # A new incoming user who is being invited to be a domain manager (that is,
98 # their email address is in DomainInvitation for an invitation that is not yet "retrieved").
99 invited = DomainInvitation.DomainInvitationStatus.INVITED
100 if DomainInvitation.objects.filter(email=email, status=invited).exists():
101 return False
102
103 return True
104
105 def check_domain_invitations_on_login(self):
106 """When a user first arrives on the site, we need to retrieve any domain
107 invitations that match their email address."""
108 for invitation in DomainInvitation.objects.filter(
109 email__iexact=self.email, status=DomainInvitation.DomainInvitationStatus.INVITED
110 ):
111 try:
112 invitation.retrieve()
113 invitation.save()
114 except RuntimeError:
115 # retrieving should not fail because of a missing user, but
116 # if it does fail, log the error so a new user can continue
117 # logging in
118 logger.warn("Failed to retrieve invitation %s", invitation, exc_info=True)
119
120 def create_domain_and_invite(self, transition_domain: TransitionDomain):
121 transition_domain_name = transition_domain.domain_name
122 transition_domain_status = transition_domain.status
123 transition_domain_email = transition_domain.username
124
125 # type safety check. name should never be none
126 if transition_domain_name is not None:
127 new_domain = Domain(name=transition_domain_name, state=transition_domain_status)
128 new_domain.save()
129 # check that a domain invitation doesn't already
130 # exist for this e-mail / Domain pair
131 domain_email_already_in_domain_invites = DomainInvitation.objects.filter(
132 email=transition_domain_email.lower(), domain=new_domain
133 ).exists()
134 if not domain_email_already_in_domain_invites:
135 # Create new domain invitation
136 new_domain_invitation = DomainInvitation(email=transition_domain_email.lower(), domain=new_domain)
137 new_domain_invitation.save()
138
139 def on_each_login(self):
140 """Callback each time the user is authenticated.
141
142 When a user arrives on the site each time, we need to retrieve any domain
143 invitations that match their email address.
144
145 We also need to check if they are logging in with the same e-mail
146 as a transition domain and update our domainInfo objects accordingly.
147 """
148
149 self.check_domain_invitations_on_login()
150
151 class Meta:
152 permissions = [
153 ("analyst_access_permission", "Analyst Access Permission"),
154 ("full_access_permission", "Full Access Permission"),
155 ]
156
[end of src/registrar/models/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py
--- a/src/registrar/models/contact.py
+++ b/src/registrar/models/contact.py
@@ -94,6 +94,9 @@
names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]
return " ".join(names) if names else "Unknown"
+ def has_contact_info(self):
+ return bool(self.title or self.email or self.phone)
+
def save(self, *args, **kwargs):
# Call the parent class's save method to perform the actual save
super().save(*args, **kwargs)
diff --git a/src/registrar/models/user.py b/src/registrar/models/user.py
--- a/src/registrar/models/user.py
+++ b/src/registrar/models/user.py
@@ -9,6 +9,7 @@
from .transition_domain import TransitionDomain
from .verified_by_staff import VerifiedByStaff
from .domain import Domain
+from .domain_request import DomainRequest
from phonenumber_field.modelfields import PhoneNumberField # type: ignore
@@ -67,6 +68,33 @@
def is_restricted(self):
return self.status == self.RESTRICTED
+ def get_approved_domains_count(self):
+ """Return count of approved domains"""
+ allowed_states = [Domain.State.UNKNOWN, Domain.State.DNS_NEEDED, Domain.State.READY, Domain.State.ON_HOLD]
+ approved_domains_count = self.domains.filter(state__in=allowed_states).count()
+ return approved_domains_count
+
+ def get_active_requests_count(self):
+ """Return count of active requests"""
+ allowed_states = [
+ DomainRequest.DomainRequestStatus.SUBMITTED,
+ DomainRequest.DomainRequestStatus.IN_REVIEW,
+ DomainRequest.DomainRequestStatus.ACTION_NEEDED,
+ ]
+ active_requests_count = self.domain_requests_created.filter(status__in=allowed_states).count()
+ return active_requests_count
+
+ def get_rejected_requests_count(self):
+ """Return count of rejected requests"""
+ return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.REJECTED).count()
+
+ def get_ineligible_requests_count(self):
+ """Return count of ineligible requests"""
+ return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.INELIGIBLE).count()
+
+ def has_contact_info(self):
+ return bool(self.contact.title or self.contact.email or self.contact.phone)
+
@classmethod
def needs_identity_verification(cls, email, uuid):
"""A method used by our oidc classes to test whether a user needs email/uuid verification
|
{"golden_diff": "diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py\n--- a/src/registrar/models/contact.py\n+++ b/src/registrar/models/contact.py\n@@ -94,6 +94,9 @@\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n \n+ def has_contact_info(self):\n+ return bool(self.title or self.email or self.phone)\n+\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\ndiff --git a/src/registrar/models/user.py b/src/registrar/models/user.py\n--- a/src/registrar/models/user.py\n+++ b/src/registrar/models/user.py\n@@ -9,6 +9,7 @@\n from .transition_domain import TransitionDomain\n from .verified_by_staff import VerifiedByStaff\n from .domain import Domain\n+from .domain_request import DomainRequest\n \n from phonenumber_field.modelfields import PhoneNumberField # type: ignore\n \n@@ -67,6 +68,33 @@\n def is_restricted(self):\n return self.status == self.RESTRICTED\n \n+ def get_approved_domains_count(self):\n+ \"\"\"Return count of approved domains\"\"\"\n+ allowed_states = [Domain.State.UNKNOWN, Domain.State.DNS_NEEDED, Domain.State.READY, Domain.State.ON_HOLD]\n+ approved_domains_count = self.domains.filter(state__in=allowed_states).count()\n+ return approved_domains_count\n+\n+ def get_active_requests_count(self):\n+ \"\"\"Return count of active requests\"\"\"\n+ allowed_states = [\n+ DomainRequest.DomainRequestStatus.SUBMITTED,\n+ DomainRequest.DomainRequestStatus.IN_REVIEW,\n+ DomainRequest.DomainRequestStatus.ACTION_NEEDED,\n+ ]\n+ active_requests_count = self.domain_requests_created.filter(status__in=allowed_states).count()\n+ return active_requests_count\n+\n+ def get_rejected_requests_count(self):\n+ \"\"\"Return count of rejected requests\"\"\"\n+ return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.REJECTED).count()\n+\n+ def get_ineligible_requests_count(self):\n+ \"\"\"Return count of ineligible requests\"\"\"\n+ return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.INELIGIBLE).count()\n+\n+ def has_contact_info(self):\n+ return bool(self.contact.title or self.contact.email or self.contact.phone)\n+\n @classmethod\n def needs_identity_verification(cls, email, uuid):\n \"\"\"A method used by our oidc classes to test whether a user needs email/uuid verification\n", "issue": "Django Admin: Indicate whether `creator` has other domains\n### Issue description\n\nProblem: an analyst would like to know whether the ~~**submitter**~~ **creator** is already associated with other domains in the system. This is an important indicator as to whether this submitter is trustworthy and established.\n\nSolution: add an indicator near the **creator** name or email address, which shows how many other domains they are associated with in our system. This will be enhanced with a list of other requests or domains in a future ticket.\n\n\n### Acceptance criteria\n\n- [ ] a labeled indicator near the creator name, shows how many \"Ready\" domains they are a domain manager for, based on the user domain roles.\n- [ ] a labeled indicator near the creator name, shows how many domain requests they have in progress.\n- [ ] Labels are in plain english, as shown in Additional context.\n\n\n### Additional context\nApproved domains: nn\nActive Requests : nn \nRejected or Ineligible: nn\n\nactive requests will not include requests in the following states:started, approved, withdrawn\napproved domains will not include deleted domains\n\n### Links to other issues\n\nBlocked by #1852.\nRelated to: #1875 (1850 should be done first)\n", "before_files": [{"content": "from django.db import models\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"first name / given name\",\n db_index=True,\n )\n middle_name = models.CharField(\n null=True,\n blank=True,\n )\n last_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"last name / family name\",\n db_index=True,\n )\n title = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"title or role in your organization\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n db_index=True,\n max_length=320,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n db_index=True,\n )\n\n def _get_all_relations(self):\n \"\"\"Returns an array of all fields which are relations\"\"\"\n return [f.name for f in self._meta.get_fields() if f.is_relation]\n\n def has_more_than_one_join(self, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\n expected_relation is the one relation with one expected join\"\"\"\n # all_relations is the list of all_relations (from contact) to be checked for existing joins\n all_relations = self._get_all_relations()\n return any(self._has_more_than_one_join_per_relation(rel, expected_relation) for rel in all_relations)\n\n def _has_more_than_one_join_per_relation(self, relation, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\"\"\"\n # threshold is the number of related objects that are acceptable\n # when determining if related objects exist. threshold is 0 for most\n # relationships. if the relationship is expected_relation, we know that\n # there is already exactly 1 acceptable relationship (the one we are\n # attempting to delete), so the threshold is 1\n threshold = 1 if relation == expected_relation else 0\n\n # Raise a KeyError if rel is not a defined field on the db_obj model\n # This will help catch any errors in relation passed.\n if relation not in [field.name for field in self._meta.get_fields()]:\n raise KeyError(f\"{relation} is not a defined field on the {self._meta.model_name} model.\")\n\n # if attr rel in db_obj is not None, then test if reference object(s) exist\n if getattr(self, relation) is not None:\n field = self._meta.get_field(relation)\n if isinstance(field, models.OneToOneField):\n # if the rel field is a OneToOne field, then we have already\n # determined that the object exists (is not None)\n # so return True unless the relation being tested is the expected_relation\n is_not_expected_relation = relation != expected_relation\n return is_not_expected_relation\n elif isinstance(field, models.ForeignObjectRel):\n # if the rel field is a ManyToOne or ManyToMany, then we need\n # to determine if the count of related objects is greater than\n # the threshold\n return getattr(self, relation).count() > threshold\n return False\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user and (not self.user.first_name or not self.user.last_name):\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py"}, {"content": "import logging\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\nfrom registrar.models.user_domain_role import UserDomainRole\n\nfrom .domain_invitation import DomainInvitation\nfrom .transition_domain import TransitionDomain\nfrom .verified_by_staff import VerifiedByStaff\nfrom .domain import Domain\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass User(AbstractUser):\n \"\"\"\n A custom user model that performs identically to the default user model\n but can be customized later.\n \"\"\"\n\n # #### Constants for choice fields ####\n RESTRICTED = \"restricted\"\n STATUS_CHOICES = ((RESTRICTED, RESTRICTED),)\n\n status = models.CharField(\n max_length=10,\n choices=STATUS_CHOICES,\n default=None, # Set the default value to None\n null=True, # Allow the field to be null\n blank=True, # Allow the field to be blank\n )\n\n domains = models.ManyToManyField(\n \"registrar.Domain\",\n through=\"registrar.UserDomainRole\",\n related_name=\"users\",\n )\n\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def __str__(self):\n # this info is pulled from Login.gov\n if self.first_name or self.last_name:\n return f\"{self.first_name or ''} {self.last_name or ''} {self.email or ''}\"\n elif self.email:\n return self.email\n else:\n return self.username\n\n def restrict_user(self):\n self.status = self.RESTRICTED\n self.save()\n\n def unrestrict_user(self):\n self.status = None\n self.save()\n\n def is_restricted(self):\n return self.status == self.RESTRICTED\n\n @classmethod\n def needs_identity_verification(cls, email, uuid):\n \"\"\"A method used by our oidc classes to test whether a user needs email/uuid verification\n or the full identity PII verification\"\"\"\n\n # An existing user who is a domain manager of a domain (that is,\n # they have an entry in UserDomainRole for their User)\n try:\n existing_user = cls.objects.get(username=uuid)\n if existing_user and UserDomainRole.objects.filter(user=existing_user).exists():\n return False\n except cls.DoesNotExist:\n # Do nothing when the user is not found, as we're checking for existence.\n pass\n except Exception as err:\n raise err\n\n # A new incoming user who is a domain manager for one of the domains\n # that we inputted from Verisign (that is, their email address appears\n # in the username field of a TransitionDomain)\n if TransitionDomain.objects.filter(username=email).exists():\n return False\n\n # New users flagged by Staff to bypass ial2\n if VerifiedByStaff.objects.filter(email=email).exists():\n return False\n\n # A new incoming user who is being invited to be a domain manager (that is,\n # their email address is in DomainInvitation for an invitation that is not yet \"retrieved\").\n invited = DomainInvitation.DomainInvitationStatus.INVITED\n if DomainInvitation.objects.filter(email=email, status=invited).exists():\n return False\n\n return True\n\n def check_domain_invitations_on_login(self):\n \"\"\"When a user first arrives on the site, we need to retrieve any domain\n invitations that match their email address.\"\"\"\n for invitation in DomainInvitation.objects.filter(\n email__iexact=self.email, status=DomainInvitation.DomainInvitationStatus.INVITED\n ):\n try:\n invitation.retrieve()\n invitation.save()\n except RuntimeError:\n # retrieving should not fail because of a missing user, but\n # if it does fail, log the error so a new user can continue\n # logging in\n logger.warn(\"Failed to retrieve invitation %s\", invitation, exc_info=True)\n\n def create_domain_and_invite(self, transition_domain: TransitionDomain):\n transition_domain_name = transition_domain.domain_name\n transition_domain_status = transition_domain.status\n transition_domain_email = transition_domain.username\n\n # type safety check. name should never be none\n if transition_domain_name is not None:\n new_domain = Domain(name=transition_domain_name, state=transition_domain_status)\n new_domain.save()\n # check that a domain invitation doesn't already\n # exist for this e-mail / Domain pair\n domain_email_already_in_domain_invites = DomainInvitation.objects.filter(\n email=transition_domain_email.lower(), domain=new_domain\n ).exists()\n if not domain_email_already_in_domain_invites:\n # Create new domain invitation\n new_domain_invitation = DomainInvitation(email=transition_domain_email.lower(), domain=new_domain)\n new_domain_invitation.save()\n\n def on_each_login(self):\n \"\"\"Callback each time the user is authenticated.\n\n When a user arrives on the site each time, we need to retrieve any domain\n invitations that match their email address.\n\n We also need to check if they are logging in with the same e-mail\n as a transition domain and update our domainInfo objects accordingly.\n \"\"\"\n\n self.check_domain_invitations_on_login()\n\n class Meta:\n permissions = [\n (\"analyst_access_permission\", \"Analyst Access Permission\"),\n (\"full_access_permission\", \"Full Access Permission\"),\n ]\n", "path": "src/registrar/models/user.py"}]}
| 3,544 | 572 |
gh_patches_debug_7617
|
rasdani/github-patches
|
git_diff
|
larq__larq-39
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add test coverage report to Azure Pipelines
https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/python?view=azure-devops#test-with-pytest-and-collect-coverage-metrics-with-pytest-cov
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3
4 def readme():
5 with open("README.md", "r") as f:
6 return f.read()
7
8
9 setup(
10 name="pl-xquant",
11 version="0.0.0",
12 author="Plumerai",
13 author_email="[email protected]",
14 description="An Open Source Machine Learning Framework for Training Extreme Quantized Neural Networks",
15 long_description=readme(),
16 long_description_content_type="text/markdown",
17 url="https://github.com/lgeiger/xquant",
18 packages=find_packages(),
19 license="Apache 2.0",
20 install_requires=["numpy >= 1.15.4, < 2.0"],
21 extras_require={
22 "tensorflow": ["tensorflow>=1.13.1"],
23 "tensorflow_gpu": ["tensorflow-gpu>=1.13.1"],
24 "test": ["absl-py>=0.7.0", "pytest>=4.3.1"],
25 "docs": [
26 "pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip",
27 "mkdocs-material>=4.1.0",
28 "pymdown-extensions>=6.0",
29 "mknotebooks>=0.1.5",
30 ],
31 },
32 classifiers=[
33 "Development Status :: 2 - Pre-Alpha",
34 "Intended Audience :: Developers",
35 "Intended Audience :: Education",
36 "Intended Audience :: Science/Research",
37 "License :: OSI Approved :: Apache Software License",
38 "Programming Language :: Python :: 3",
39 "Programming Language :: Python :: 3 :: Only",
40 "Programming Language :: Python :: 3.6",
41 "Programming Language :: Python :: 3.7",
42 "Topic :: Scientific/Engineering",
43 "Topic :: Scientific/Engineering :: Mathematics",
44 "Topic :: Scientific/Engineering :: Artificial Intelligence",
45 "Topic :: Software Development",
46 "Topic :: Software Development :: Libraries",
47 "Topic :: Software Development :: Libraries :: Python Modules",
48 ],
49 )
50
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
extras_require={
"tensorflow": ["tensorflow>=1.13.1"],
"tensorflow_gpu": ["tensorflow-gpu>=1.13.1"],
- "test": ["absl-py>=0.7.0", "pytest>=4.3.1"],
+ "test": ["absl-py>=0.7.0", "pytest>=4.3.1", "pytest-cov>=2.6.1"],
"docs": [
"pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip",
"mkdocs-material>=4.1.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.13.1\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.13.1\"],\n- \"test\": [\"absl-py>=0.7.0\", \"pytest>=4.3.1\"],\n+ \"test\": [\"absl-py>=0.7.0\", \"pytest>=4.3.1\", \"pytest-cov>=2.6.1\"],\n \"docs\": [\n \"pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip\",\n \"mkdocs-material>=4.1.0\",\n", "issue": "Add test coverage report to Azure Pipelines\nhttps://docs.microsoft.com/en-us/azure/devops/pipelines/languages/python?view=azure-devops#test-with-pytest-and-collect-coverage-metrics-with-pytest-cov\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"pl-xquant\",\n version=\"0.0.0\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Framework for Training Extreme Quantized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/lgeiger/xquant\",\n packages=find_packages(),\n license=\"Apache 2.0\",\n install_requires=[\"numpy >= 1.15.4, < 2.0\"],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.13.1\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.13.1\"],\n \"test\": [\"absl-py>=0.7.0\", \"pytest>=4.3.1\"],\n \"docs\": [\n \"pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip\",\n \"mkdocs-material>=4.1.0\",\n \"pymdown-extensions>=6.0\",\n \"mknotebooks>=0.1.5\",\n ],\n },\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]}
| 1,108 | 169 |
gh_patches_debug_15952
|
rasdani/github-patches
|
git_diff
|
kevoreilly__CAPEv2-935
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CAPA rule path does not exist
# Expected Behavior
CAPA is able to load its rules.
# Current Behavior
CAPA is unable to load rules. Splits the file path into a list and loops through each character. Enumerates the / directory then moves to the next character o at which point it errors as the path does not exist:
OSError: rule path o does not exist or cannot be accessed
# Failure Information (for bugs)
The argument capa.main.RULES_PATH_DEFAULT_STRING passed to get_rules is a string, but should be a list of file paths:
https://github.com/kevoreilly/CAPEv2/blob/d9124712cabe5bf9a7a3a98da93cbdbd37a53da3/lib/cuckoo/common/integrations/capa.py#L44
## Steps to Reproduce
Enable CAPA in the config and run web server.
## Context
commit 1bd0bf62055fc3741ea19a85d510d54052dbf431
Ubuntu 20.04.04 LTS
## Failure Logs
File "/opt/CAPEv2/web/../lib/cuckoo/common/integrations/parse_pe.py", line 74, in <module>
from lib.cuckoo.common.integrations.capa import HAVE_FLARE_CAPA, flare_capa_details
File "/opt/CAPEv2/web/../lib/cuckoo/common/integrations/capa.py", line 48, in <module>
rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)
File "/usr/local/lib/python3.8/dist-packages/capa/main.py", line 580, in get_rules
raise IOError("rule path %s does not exist or cannot be accessed" % rule_path)
OSError: rule path o does not exist or cannot be accessed
</issue>
<code>
[start of lib/cuckoo/common/integrations/capa.py]
1 # Copyright (C) 2010-2015 Cuckoo Foundation.
2 # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
3 # See the file 'docs/LICENSE' for copying permission.
4
5 import collections
6 import logging
7 import os
8 from typing import Any, Dict, List
9
10 from lib.cuckoo.common.config import Config
11 from lib.cuckoo.common.constants import CUCKOO_ROOT
12
13 log = logging.getLogger(__name__)
14
15 processing_conf = Config("processing")
16
17 """
18 from lib.cuckoo.common.integrations.capa import flare_capa_details, HAVE_FLARE_CAPA
19 path = "/opt/CAPEv2/storage/binaries/da034c11f0c396f6cd11d22f833f9501dc75a33047ba3bd5870ff79e479bc004"
20 details = flare_capa_details(path, "static", on_demand=True)
21 """
22
23 HAVE_FLARE_CAPA = False
24 if processing_conf.flare_capa.enabled:
25 try:
26 from capa.version import __version__ as capa_version
27
28 if capa_version[0] != "3":
29 print("FLARE-CAPA missed, pip3 install -U flare-capa")
30 else:
31 import capa.main
32 import capa.render.utils as rutils
33 import capa.rules
34 from capa.main import UnsupportedRuntimeError
35 from capa.render.result_document import (
36 convert_capabilities_to_result_document as capa_convert_capabilities_to_result_document,
37 )
38 from capa.rules import InvalidRuleSet, InvalidRuleWithPath
39
40 rules_path = os.path.join(CUCKOO_ROOT, "data", "capa-rules")
41 if os.path.exists(rules_path):
42 capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, "data", "capa-rules")
43 try:
44 rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)
45 rules = capa.rules.RuleSet(rules)
46 HAVE_FLARE_CAPA = True
47 except InvalidRuleWithPath:
48 print("FLARE_CAPA InvalidRuleWithPath")
49 HAVE_FLARE_CAPA = False
50 except InvalidRuleSet:
51 print("FLARE_CAPA InvalidRuleSet")
52 HAVE_FLARE_CAPA = False
53 else:
54 print("FLARE CAPA rules missed! You can download them using python3 community.py -cr")
55 HAVE_FLARE_CAPA = False
56
57 signatures_path = os.path.join(CUCKOO_ROOT, "data", "capa-signatures")
58 if os.path.exists(signatures_path):
59 capa.main.SIGNATURES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, "data", "capa-signatures")
60 try:
61 signatures = capa.main.get_signatures(capa.main.SIGNATURES_PATH_DEFAULT_STRING)
62 HAVE_FLARE_CAPA = True
63 except IOError:
64 print("FLARE_CAPA InvalidSignatures")
65 else:
66 print("FLARE CAPA rules missed! You can download them using python3 community.py -cr")
67 HAVE_FLARE_CAPA = False
68 except ImportError as e:
69 HAVE_FLARE_CAPA = False
70 print(e)
71 print("FLARE-CAPA missed, pip3 install -U flare-capa")
72
73
74 def render_meta(doc: Dict[str, Any]) -> Dict[str, Any]:
75 return {
76 "md5": doc["meta"]["sample"]["md5"],
77 "sha1": doc["meta"]["sample"]["sha1"],
78 "sha256": doc["meta"]["sample"]["sha256"],
79 "path": doc["meta"]["sample"]["path"],
80 }
81
82
83 def find_subrule_matches(doc: Dict[str, Any]) -> set:
84 """
85 collect the rule names that have been matched as a subrule match.
86 this way we can avoid displaying entries for things that are too specific.
87 """
88
89 def rec(node: dict) -> set:
90 matches = set()
91 if not node["success"]:
92 # there's probably a bug here for rules that do `not: match: ...`
93 # but we don't have any examples of this yet
94 return
95
96 elif node["node"]["type"] == "statement":
97 for child in node["children"]:
98 rec(child)
99
100 elif node["node"]["type"] == "feature":
101 if node["node"]["feature"]["type"] == "match":
102 matches.add(node["node"]["feature"]["match"])
103 return matches
104
105 matches = set()
106
107 for rule in rutils.capability_rules(doc):
108 for node in rule["matches"].values():
109 matches = matches.union(rec(node))
110
111 return matches
112
113
114 def render_capabilities(doc: Dict[str, Any]) -> Dict[str, List[str]]:
115 """
116 example::
117 {'accept command line arguments': ['host-interaction/cli'],
118 'allocate thread local storage (2 matches)': ['host-interaction/process'],
119 'check for time delay via GetTickCount': ['anti-analysis/anti-debugging/debugger-detection'],
120 'check if process is running under wine': ['anti-analysis/anti-emulation/wine'],
121 'contain a resource (.rsrc) section': ['executable/pe/section/rsrc'],
122 'write file (3 matches)': ['host-interaction/file-system/write']
123 }
124 """
125 subrule_matches = find_subrule_matches(doc)
126
127 capability_dict = {}
128 for rule in rutils.capability_rules(doc):
129 if rule["meta"]["name"] in subrule_matches:
130 # rules that are also matched by other rules should not get rendered by default.
131 # this cuts down on the amount of output while giving approx the same detail.
132 # see #224
133 continue
134
135 count = len(rule["matches"])
136 if count == 1:
137 capability = rule["meta"]["name"]
138 else:
139 capability = f"{rule['meta']['name']} ({count} matches)"
140
141 capability_dict.setdefault(rule["meta"]["namespace"], []).append(capability)
142 return capability_dict
143
144
145 def render_attack(doc: Dict[str, Any]) -> Dict[str, List[str]]:
146 """
147 example::
148 {'COLLECTION': ['Input Capture::Keylogging [T1056.001]'],
149 'DEFENSE EVASION': ['Obfuscated Files or Information [T1027]',
150 'Virtualization/Sandbox Evasion::System Checks '
151 '[T1497.001]'],
152 'DISCOVERY': ['File and Directory Discovery [T1083]',
153 'Query Registry [T1012]',
154 'System Information Discovery [T1082]'],
155 'EXECUTION': ['Shared Modules [T1129]']
156 }
157 """
158 attck_dict = {}
159 tactics = collections.defaultdict(set)
160 for rule in rutils.capability_rules(doc):
161 for attack in rule["meta"].get("att&ck", {}):
162 tactics[attack["tactic"]].add((attack["technique"], attack.get("subtechnique"), attack["id"]))
163
164 for tactic, techniques in sorted(tactics.items()):
165 inner_rows = []
166 for technique, subtechnique, id in sorted(techniques):
167 if subtechnique is None:
168 inner_rows.append(f"{technique} {id}")
169 else:
170 inner_rows.append(f"{technique}::{subtechnique} {id}")
171 attck_dict.setdefault(tactic.upper(), inner_rows)
172 return attck_dict
173
174
175 def render_mbc(doc: Dict[str, Any]) -> Dict[str, List[str]]:
176 """
177 example::
178 {'ANTI-BEHAVIORAL ANALYSIS': ['Debugger Detection::Timing/Delay Check '
179 'GetTickCount [B0001.032]',
180 'Emulator Detection [B0004]',
181 'Virtual Machine Detection::Instruction '
182 'Testing [B0009.029]',
183 'Virtual Machine Detection [B0009]'],
184 'COLLECTION': ['Keylogging::Polling [F0002.002]'],
185 'CRYPTOGRAPHY': ['Encrypt Data::RC4 [C0027.009]',
186 'Generate Pseudo-random Sequence::RC4 PRGA '
187 '[C0021.004]']
188 }
189 """
190 mbc_dict = {}
191 objectives = collections.defaultdict(set)
192 for rule in rutils.capability_rules(doc):
193 for mbc in rule["meta"].get("mbc", {}):
194 objectives[mbc["objective"]].add((mbc["behavior"], mbc.get("method"), mbc["id"]))
195
196 for objective, behaviors in sorted(objectives.items()):
197 inner_rows = []
198 for behavior, method, id in sorted(behaviors):
199 if method is None:
200 inner_rows.append(f"{behavior} [{id}]")
201 else:
202 inner_rows.append(f"{behavior}::{method} [{id}]")
203 mbc_dict.setdefault(objective.upper(), inner_rows)
204 return mbc_dict
205
206
207 def render_dictionary(doc: Dict[str, Any]) -> Dict[str, Any]:
208 ostream = render_meta(doc)
209 ostream["ATTCK"] = render_attack(doc)
210 ostream["MBC"] = render_mbc(doc)
211 ostream["CAPABILITY"] = render_capabilities(doc)
212
213 return ostream
214
215
216 # ===== CAPA END
217 def flare_capa_details(file_path: str, category: str = False, on_demand=False, disable_progress=True) -> Dict[str, Any]:
218 capa_dictionary = {}
219 if (
220 HAVE_FLARE_CAPA
221 and processing_conf.flare_capa.enabled
222 and processing_conf.flare_capa.get(category, False)
223 and not processing_conf.flare_capa.on_demand
224 or on_demand
225 ):
226 try:
227 extractor = capa.main.get_extractor(
228 file_path, "auto", capa.main.BACKEND_VIV, signatures, disable_progress=disable_progress
229 )
230 meta = capa.main.collect_metadata("", file_path, capa.main.RULES_PATH_DEFAULT_STRING, extractor)
231 capabilities, counts = capa.main.find_capabilities(rules, extractor, disable_progress=True)
232 meta["analysis"].update(counts)
233 doc = capa_convert_capabilities_to_result_document(meta, rules, capabilities)
234 capa_dictionary = render_dictionary(doc)
235 except MemoryError:
236 log.warning("FLARE CAPA -> MemoryError")
237 except UnsupportedRuntimeError:
238 log.error("FLARE CAPA -> UnsupportedRuntimeError")
239 except Exception as e:
240 log.error(e, exc_info=True)
241 return capa_dictionary
242
[end of lib/cuckoo/common/integrations/capa.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/cuckoo/common/integrations/capa.py b/lib/cuckoo/common/integrations/capa.py
--- a/lib/cuckoo/common/integrations/capa.py
+++ b/lib/cuckoo/common/integrations/capa.py
@@ -40,8 +40,10 @@
rules_path = os.path.join(CUCKOO_ROOT, "data", "capa-rules")
if os.path.exists(rules_path):
capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, "data", "capa-rules")
+ rules_list = []
+ rules_list.append(rules_path)
try:
- rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)
+ rules = capa.main.get_rules(rules_list, disable_progress=True)
rules = capa.rules.RuleSet(rules)
HAVE_FLARE_CAPA = True
except InvalidRuleWithPath:
|
{"golden_diff": "diff --git a/lib/cuckoo/common/integrations/capa.py b/lib/cuckoo/common/integrations/capa.py\n--- a/lib/cuckoo/common/integrations/capa.py\n+++ b/lib/cuckoo/common/integrations/capa.py\n@@ -40,8 +40,10 @@\n rules_path = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n if os.path.exists(rules_path):\n capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n+ rules_list = []\n+ rules_list.append(rules_path)\n try:\n- rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)\n+ rules = capa.main.get_rules(rules_list, disable_progress=True)\n rules = capa.rules.RuleSet(rules)\n HAVE_FLARE_CAPA = True\n except InvalidRuleWithPath:\n", "issue": "CAPA rule path does not exist\n# Expected Behavior\r\n\r\nCAPA is able to load its rules.\r\n\r\n# Current Behavior\r\n\r\nCAPA is unable to load rules. Splits the file path into a list and loops through each character. Enumerates the / directory then moves to the next character o at which point it errors as the path does not exist:\r\nOSError: rule path o does not exist or cannot be accessed\r\n\r\n# Failure Information (for bugs)\r\n\r\nThe argument capa.main.RULES_PATH_DEFAULT_STRING passed to get_rules is a string, but should be a list of file paths:\r\nhttps://github.com/kevoreilly/CAPEv2/blob/d9124712cabe5bf9a7a3a98da93cbdbd37a53da3/lib/cuckoo/common/integrations/capa.py#L44\r\n\r\n## Steps to Reproduce\r\n\r\nEnable CAPA in the config and run web server.\r\n\r\n## Context\r\n\r\ncommit 1bd0bf62055fc3741ea19a85d510d54052dbf431\r\nUbuntu 20.04.04 LTS\r\n\r\n## Failure Logs\r\n\r\n File \"/opt/CAPEv2/web/../lib/cuckoo/common/integrations/parse_pe.py\", line 74, in <module>\r\n from lib.cuckoo.common.integrations.capa import HAVE_FLARE_CAPA, flare_capa_details\r\n File \"/opt/CAPEv2/web/../lib/cuckoo/common/integrations/capa.py\", line 48, in <module>\r\n rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)\r\n File \"/usr/local/lib/python3.8/dist-packages/capa/main.py\", line 580, in get_rules\r\n raise IOError(\"rule path %s does not exist or cannot be accessed\" % rule_path)\r\nOSError: rule path o does not exist or cannot be accessed\n", "before_files": [{"content": "# Copyright (C) 2010-2015 Cuckoo Foundation.\n# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org\n# See the file 'docs/LICENSE' for copying permission.\n\nimport collections\nimport logging\nimport os\nfrom typing import Any, Dict, List\n\nfrom lib.cuckoo.common.config import Config\nfrom lib.cuckoo.common.constants import CUCKOO_ROOT\n\nlog = logging.getLogger(__name__)\n\nprocessing_conf = Config(\"processing\")\n\n\"\"\"\nfrom lib.cuckoo.common.integrations.capa import flare_capa_details, HAVE_FLARE_CAPA\npath = \"/opt/CAPEv2/storage/binaries/da034c11f0c396f6cd11d22f833f9501dc75a33047ba3bd5870ff79e479bc004\"\ndetails = flare_capa_details(path, \"static\", on_demand=True)\n\"\"\"\n\nHAVE_FLARE_CAPA = False\nif processing_conf.flare_capa.enabled:\n try:\n from capa.version import __version__ as capa_version\n\n if capa_version[0] != \"3\":\n print(\"FLARE-CAPA missed, pip3 install -U flare-capa\")\n else:\n import capa.main\n import capa.render.utils as rutils\n import capa.rules\n from capa.main import UnsupportedRuntimeError\n from capa.render.result_document import (\n convert_capabilities_to_result_document as capa_convert_capabilities_to_result_document,\n )\n from capa.rules import InvalidRuleSet, InvalidRuleWithPath\n\n rules_path = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n if os.path.exists(rules_path):\n capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n try:\n rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)\n rules = capa.rules.RuleSet(rules)\n HAVE_FLARE_CAPA = True\n except InvalidRuleWithPath:\n print(\"FLARE_CAPA InvalidRuleWithPath\")\n HAVE_FLARE_CAPA = False\n except InvalidRuleSet:\n print(\"FLARE_CAPA InvalidRuleSet\")\n HAVE_FLARE_CAPA = False\n else:\n print(\"FLARE CAPA rules missed! You can download them using python3 community.py -cr\")\n HAVE_FLARE_CAPA = False\n\n signatures_path = os.path.join(CUCKOO_ROOT, \"data\", \"capa-signatures\")\n if os.path.exists(signatures_path):\n capa.main.SIGNATURES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, \"data\", \"capa-signatures\")\n try:\n signatures = capa.main.get_signatures(capa.main.SIGNATURES_PATH_DEFAULT_STRING)\n HAVE_FLARE_CAPA = True\n except IOError:\n print(\"FLARE_CAPA InvalidSignatures\")\n else:\n print(\"FLARE CAPA rules missed! You can download them using python3 community.py -cr\")\n HAVE_FLARE_CAPA = False\n except ImportError as e:\n HAVE_FLARE_CAPA = False\n print(e)\n print(\"FLARE-CAPA missed, pip3 install -U flare-capa\")\n\n\ndef render_meta(doc: Dict[str, Any]) -> Dict[str, Any]:\n return {\n \"md5\": doc[\"meta\"][\"sample\"][\"md5\"],\n \"sha1\": doc[\"meta\"][\"sample\"][\"sha1\"],\n \"sha256\": doc[\"meta\"][\"sample\"][\"sha256\"],\n \"path\": doc[\"meta\"][\"sample\"][\"path\"],\n }\n\n\ndef find_subrule_matches(doc: Dict[str, Any]) -> set:\n \"\"\"\n collect the rule names that have been matched as a subrule match.\n this way we can avoid displaying entries for things that are too specific.\n \"\"\"\n\n def rec(node: dict) -> set:\n matches = set()\n if not node[\"success\"]:\n # there's probably a bug here for rules that do `not: match: ...`\n # but we don't have any examples of this yet\n return\n\n elif node[\"node\"][\"type\"] == \"statement\":\n for child in node[\"children\"]:\n rec(child)\n\n elif node[\"node\"][\"type\"] == \"feature\":\n if node[\"node\"][\"feature\"][\"type\"] == \"match\":\n matches.add(node[\"node\"][\"feature\"][\"match\"])\n return matches\n\n matches = set()\n\n for rule in rutils.capability_rules(doc):\n for node in rule[\"matches\"].values():\n matches = matches.union(rec(node))\n\n return matches\n\n\ndef render_capabilities(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'accept command line arguments': ['host-interaction/cli'],\n 'allocate thread local storage (2 matches)': ['host-interaction/process'],\n 'check for time delay via GetTickCount': ['anti-analysis/anti-debugging/debugger-detection'],\n 'check if process is running under wine': ['anti-analysis/anti-emulation/wine'],\n 'contain a resource (.rsrc) section': ['executable/pe/section/rsrc'],\n 'write file (3 matches)': ['host-interaction/file-system/write']\n }\n \"\"\"\n subrule_matches = find_subrule_matches(doc)\n\n capability_dict = {}\n for rule in rutils.capability_rules(doc):\n if rule[\"meta\"][\"name\"] in subrule_matches:\n # rules that are also matched by other rules should not get rendered by default.\n # this cuts down on the amount of output while giving approx the same detail.\n # see #224\n continue\n\n count = len(rule[\"matches\"])\n if count == 1:\n capability = rule[\"meta\"][\"name\"]\n else:\n capability = f\"{rule['meta']['name']} ({count} matches)\"\n\n capability_dict.setdefault(rule[\"meta\"][\"namespace\"], []).append(capability)\n return capability_dict\n\n\ndef render_attack(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'COLLECTION': ['Input Capture::Keylogging [T1056.001]'],\n 'DEFENSE EVASION': ['Obfuscated Files or Information [T1027]',\n 'Virtualization/Sandbox Evasion::System Checks '\n '[T1497.001]'],\n 'DISCOVERY': ['File and Directory Discovery [T1083]',\n 'Query Registry [T1012]',\n 'System Information Discovery [T1082]'],\n 'EXECUTION': ['Shared Modules [T1129]']\n }\n \"\"\"\n attck_dict = {}\n tactics = collections.defaultdict(set)\n for rule in rutils.capability_rules(doc):\n for attack in rule[\"meta\"].get(\"att&ck\", {}):\n tactics[attack[\"tactic\"]].add((attack[\"technique\"], attack.get(\"subtechnique\"), attack[\"id\"]))\n\n for tactic, techniques in sorted(tactics.items()):\n inner_rows = []\n for technique, subtechnique, id in sorted(techniques):\n if subtechnique is None:\n inner_rows.append(f\"{technique} {id}\")\n else:\n inner_rows.append(f\"{technique}::{subtechnique} {id}\")\n attck_dict.setdefault(tactic.upper(), inner_rows)\n return attck_dict\n\n\ndef render_mbc(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'ANTI-BEHAVIORAL ANALYSIS': ['Debugger Detection::Timing/Delay Check '\n 'GetTickCount [B0001.032]',\n 'Emulator Detection [B0004]',\n 'Virtual Machine Detection::Instruction '\n 'Testing [B0009.029]',\n 'Virtual Machine Detection [B0009]'],\n 'COLLECTION': ['Keylogging::Polling [F0002.002]'],\n 'CRYPTOGRAPHY': ['Encrypt Data::RC4 [C0027.009]',\n 'Generate Pseudo-random Sequence::RC4 PRGA '\n '[C0021.004]']\n }\n \"\"\"\n mbc_dict = {}\n objectives = collections.defaultdict(set)\n for rule in rutils.capability_rules(doc):\n for mbc in rule[\"meta\"].get(\"mbc\", {}):\n objectives[mbc[\"objective\"]].add((mbc[\"behavior\"], mbc.get(\"method\"), mbc[\"id\"]))\n\n for objective, behaviors in sorted(objectives.items()):\n inner_rows = []\n for behavior, method, id in sorted(behaviors):\n if method is None:\n inner_rows.append(f\"{behavior} [{id}]\")\n else:\n inner_rows.append(f\"{behavior}::{method} [{id}]\")\n mbc_dict.setdefault(objective.upper(), inner_rows)\n return mbc_dict\n\n\ndef render_dictionary(doc: Dict[str, Any]) -> Dict[str, Any]:\n ostream = render_meta(doc)\n ostream[\"ATTCK\"] = render_attack(doc)\n ostream[\"MBC\"] = render_mbc(doc)\n ostream[\"CAPABILITY\"] = render_capabilities(doc)\n\n return ostream\n\n\n# ===== CAPA END\ndef flare_capa_details(file_path: str, category: str = False, on_demand=False, disable_progress=True) -> Dict[str, Any]:\n capa_dictionary = {}\n if (\n HAVE_FLARE_CAPA\n and processing_conf.flare_capa.enabled\n and processing_conf.flare_capa.get(category, False)\n and not processing_conf.flare_capa.on_demand\n or on_demand\n ):\n try:\n extractor = capa.main.get_extractor(\n file_path, \"auto\", capa.main.BACKEND_VIV, signatures, disable_progress=disable_progress\n )\n meta = capa.main.collect_metadata(\"\", file_path, capa.main.RULES_PATH_DEFAULT_STRING, extractor)\n capabilities, counts = capa.main.find_capabilities(rules, extractor, disable_progress=True)\n meta[\"analysis\"].update(counts)\n doc = capa_convert_capabilities_to_result_document(meta, rules, capabilities)\n capa_dictionary = render_dictionary(doc)\n except MemoryError:\n log.warning(\"FLARE CAPA -> MemoryError\")\n except UnsupportedRuntimeError:\n log.error(\"FLARE CAPA -> UnsupportedRuntimeError\")\n except Exception as e:\n log.error(e, exc_info=True)\n return capa_dictionary\n", "path": "lib/cuckoo/common/integrations/capa.py"}]}
| 3,883 | 207 |
gh_patches_debug_21826
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1802
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check resource error in container network API
```
docker python client v2.4.2
python v2.7.12
docker v17.03.1-ce
Ubuntu 16.04
```
PR #1649 updated the `check_resource` decorator to handle different resource names. Container network API functions `connect_container_to_network()` and `disconnect_container_from_network()` check 'image' as resource ID and not 'container'.
Reproduce using the following snippet:
```python
import docker
cli = docker.APIClient(base_url='unix:///var/run/docker.sock')
cli.pull(repository='ubuntu', tag='latest')
name = 'my_ubuntu'
container = cli.create_container(image='ubuntu:latest', name=name)
cli.connect_container_to_network(container=name, net_id='bridge')
```
This causes:
```
Traceback (most recent call last):
File "test.py", line 8, in <module>
cli.connect_container_to_network(container=name, net_id='bridge')
File "/home/mberry/scratch/virtualenv/docker_py/local/lib/python2.7/site-packages/docker/utils/decorators.py", line 17, in wrapped
'Resource ID was not provided'
docker.errors.NullResource: Resource ID was not provided
```
client.networks.create check_duplicates docs not reflective of behavior
Docs say it does, but it's actually set to `None`.
</issue>
<code>
[start of docker/version.py]
1 version = "2.6.0"
2 version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
3
[end of docker/version.py]
[start of docker/transport/unixconn.py]
1 import six
2 import requests.adapters
3 import socket
4
5 from .. import constants
6
7 if six.PY3:
8 import http.client as httplib
9 else:
10 import httplib
11
12 try:
13 import requests.packages.urllib3 as urllib3
14 except ImportError:
15 import urllib3
16
17
18 RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
19
20
21 class UnixHTTPResponse(httplib.HTTPResponse, object):
22 def __init__(self, sock, *args, **kwargs):
23 disable_buffering = kwargs.pop('disable_buffering', False)
24 super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
25 if disable_buffering is True:
26 # We must first create a new pointer then close the old one
27 # to avoid closing the underlying socket.
28 new_fp = sock.makefile('rb', 0)
29 self.fp.close()
30 self.fp = new_fp
31
32
33 class UnixHTTPConnection(httplib.HTTPConnection, object):
34
35 def __init__(self, base_url, unix_socket, timeout=60):
36 super(UnixHTTPConnection, self).__init__(
37 'localhost', timeout=timeout
38 )
39 self.base_url = base_url
40 self.unix_socket = unix_socket
41 self.timeout = timeout
42 self.disable_buffering = False
43
44 def connect(self):
45 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
46 sock.settimeout(self.timeout)
47 sock.connect(self.unix_socket)
48 self.sock = sock
49
50 def putheader(self, header, *values):
51 super(UnixHTTPConnection, self).putheader(header, *values)
52 if header == 'Connection' and 'Upgrade' in values:
53 self.disable_buffering = True
54
55 def response_class(self, sock, *args, **kwargs):
56 if self.disable_buffering:
57 kwargs['disable_buffering'] = True
58
59 return UnixHTTPResponse(sock, *args, **kwargs)
60
61
62 class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
63 def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
64 super(UnixHTTPConnectionPool, self).__init__(
65 'localhost', timeout=timeout, maxsize=maxsize
66 )
67 self.base_url = base_url
68 self.socket_path = socket_path
69 self.timeout = timeout
70
71 def _new_conn(self):
72 return UnixHTTPConnection(
73 self.base_url, self.socket_path, self.timeout
74 )
75
76
77 class UnixAdapter(requests.adapters.HTTPAdapter):
78
79 __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
80 'socket_path',
81 'timeout']
82
83 def __init__(self, socket_url, timeout=60,
84 pool_connections=constants.DEFAULT_NUM_POOLS):
85 socket_path = socket_url.replace('http+unix://', '')
86 if not socket_path.startswith('/'):
87 socket_path = '/' + socket_path
88 self.socket_path = socket_path
89 self.timeout = timeout
90 self.pools = RecentlyUsedContainer(
91 pool_connections, dispose_func=lambda p: p.close()
92 )
93 super(UnixAdapter, self).__init__()
94
95 def get_connection(self, url, proxies=None):
96 with self.pools.lock:
97 pool = self.pools.get(url)
98 if pool:
99 return pool
100
101 pool = UnixHTTPConnectionPool(
102 url, self.socket_path, self.timeout
103 )
104 self.pools[url] = pool
105
106 return pool
107
108 def request_url(self, request, proxies):
109 # The select_proxy utility in requests errors out when the provided URL
110 # doesn't have a hostname, like is the case when using a UNIX socket.
111 # Since proxies are an irrelevant notion in the case of UNIX sockets
112 # anyway, we simply return the path URL directly.
113 # See also: https://github.com/docker/docker-py/issues/811
114 return request.path_url
115
116 def close(self):
117 self.pools.clear()
118
[end of docker/transport/unixconn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -21,13 +21,12 @@
class UnixHTTPResponse(httplib.HTTPResponse, object):
def __init__(self, sock, *args, **kwargs):
disable_buffering = kwargs.pop('disable_buffering', False)
+ if six.PY2:
+ # FIXME: We may need to disable buffering on Py3 as well,
+ # but there's no clear way to do it at the moment. See:
+ # https://github.com/docker/docker-py/issues/1799
+ kwargs['buffering'] = not disable_buffering
super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
- if disable_buffering is True:
- # We must first create a new pointer then close the old one
- # to avoid closing the underlying socket.
- new_fp = sock.makefile('rb', 0)
- self.fp.close()
- self.fp = new_fp
class UnixHTTPConnection(httplib.HTTPConnection, object):
diff --git a/docker/version.py b/docker/version.py
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "2.6.0"
+version = "2.6.1"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
|
{"golden_diff": "diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py\n--- a/docker/transport/unixconn.py\n+++ b/docker/transport/unixconn.py\n@@ -21,13 +21,12 @@\n class UnixHTTPResponse(httplib.HTTPResponse, object):\n def __init__(self, sock, *args, **kwargs):\n disable_buffering = kwargs.pop('disable_buffering', False)\n+ if six.PY2:\n+ # FIXME: We may need to disable buffering on Py3 as well,\n+ # but there's no clear way to do it at the moment. See:\n+ # https://github.com/docker/docker-py/issues/1799\n+ kwargs['buffering'] = not disable_buffering\n super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)\n- if disable_buffering is True:\n- # We must first create a new pointer then close the old one\n- # to avoid closing the underlying socket.\n- new_fp = sock.makefile('rb', 0)\n- self.fp.close()\n- self.fp = new_fp\n \n \n class UnixHTTPConnection(httplib.HTTPConnection, object):\ndiff --git a/docker/version.py b/docker/version.py\n--- a/docker/version.py\n+++ b/docker/version.py\n@@ -1,2 +1,2 @@\n-version = \"2.6.0\"\n+version = \"2.6.1\"\n version_info = tuple([int(d) for d in version.split(\"-\")[0].split(\".\")])\n", "issue": "Check resource error in container network API\n```\r\ndocker python client v2.4.2\r\npython v2.7.12\r\ndocker v17.03.1-ce\r\nUbuntu 16.04\r\n```\r\n\r\nPR #1649 updated the `check_resource` decorator to handle different resource names. Container network API functions `connect_container_to_network()` and `disconnect_container_from_network()` check 'image' as resource ID and not 'container'.\r\n\r\nReproduce using the following snippet:\r\n```python\r\nimport docker\r\n\r\ncli = docker.APIClient(base_url='unix:///var/run/docker.sock')\r\ncli.pull(repository='ubuntu', tag='latest')\r\n\r\nname = 'my_ubuntu'\r\ncontainer = cli.create_container(image='ubuntu:latest', name=name)\r\ncli.connect_container_to_network(container=name, net_id='bridge')\r\n```\r\n\r\nThis causes:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 8, in <module>\r\n cli.connect_container_to_network(container=name, net_id='bridge')\r\n File \"/home/mberry/scratch/virtualenv/docker_py/local/lib/python2.7/site-packages/docker/utils/decorators.py\", line 17, in wrapped\r\n 'Resource ID was not provided'\r\ndocker.errors.NullResource: Resource ID was not provided\r\n```\nclient.networks.create check_duplicates docs not reflective of behavior\nDocs say it does, but it's actually set to `None`.\n", "before_files": [{"content": "version = \"2.6.0\"\nversion_info = tuple([int(d) for d in version.split(\"-\")[0].split(\".\")])\n", "path": "docker/version.py"}, {"content": "import six\nimport requests.adapters\nimport socket\n\nfrom .. import constants\n\nif six.PY3:\n import http.client as httplib\nelse:\n import httplib\n\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nRecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer\n\n\nclass UnixHTTPResponse(httplib.HTTPResponse, object):\n def __init__(self, sock, *args, **kwargs):\n disable_buffering = kwargs.pop('disable_buffering', False)\n super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)\n if disable_buffering is True:\n # We must first create a new pointer then close the old one\n # to avoid closing the underlying socket.\n new_fp = sock.makefile('rb', 0)\n self.fp.close()\n self.fp = new_fp\n\n\nclass UnixHTTPConnection(httplib.HTTPConnection, object):\n\n def __init__(self, base_url, unix_socket, timeout=60):\n super(UnixHTTPConnection, self).__init__(\n 'localhost', timeout=timeout\n )\n self.base_url = base_url\n self.unix_socket = unix_socket\n self.timeout = timeout\n self.disable_buffering = False\n\n def connect(self):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.settimeout(self.timeout)\n sock.connect(self.unix_socket)\n self.sock = sock\n\n def putheader(self, header, *values):\n super(UnixHTTPConnection, self).putheader(header, *values)\n if header == 'Connection' and 'Upgrade' in values:\n self.disable_buffering = True\n\n def response_class(self, sock, *args, **kwargs):\n if self.disable_buffering:\n kwargs['disable_buffering'] = True\n\n return UnixHTTPResponse(sock, *args, **kwargs)\n\n\nclass UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):\n def __init__(self, base_url, socket_path, timeout=60, maxsize=10):\n super(UnixHTTPConnectionPool, self).__init__(\n 'localhost', timeout=timeout, maxsize=maxsize\n )\n self.base_url = base_url\n self.socket_path = socket_path\n self.timeout = timeout\n\n def _new_conn(self):\n return UnixHTTPConnection(\n self.base_url, self.socket_path, self.timeout\n )\n\n\nclass UnixAdapter(requests.adapters.HTTPAdapter):\n\n __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',\n 'socket_path',\n 'timeout']\n\n def __init__(self, socket_url, timeout=60,\n pool_connections=constants.DEFAULT_NUM_POOLS):\n socket_path = socket_url.replace('http+unix://', '')\n if not socket_path.startswith('/'):\n socket_path = '/' + socket_path\n self.socket_path = socket_path\n self.timeout = timeout\n self.pools = RecentlyUsedContainer(\n pool_connections, dispose_func=lambda p: p.close()\n )\n super(UnixAdapter, self).__init__()\n\n def get_connection(self, url, proxies=None):\n with self.pools.lock:\n pool = self.pools.get(url)\n if pool:\n return pool\n\n pool = UnixHTTPConnectionPool(\n url, self.socket_path, self.timeout\n )\n self.pools[url] = pool\n\n return pool\n\n def request_url(self, request, proxies):\n # The select_proxy utility in requests errors out when the provided URL\n # doesn't have a hostname, like is the case when using a UNIX socket.\n # Since proxies are an irrelevant notion in the case of UNIX sockets\n # anyway, we simply return the path URL directly.\n # See also: https://github.com/docker/docker-py/issues/811\n return request.path_url\n\n def close(self):\n self.pools.clear()\n", "path": "docker/transport/unixconn.py"}]}
| 1,983 | 335 |
gh_patches_debug_17479
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-1557
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Metadata column repeated when exported as csv
Hi I have recently come across a bug when you export data as csv
<environment.-->
* Operating System:MacOS 10.14
* Python Version Used: 3.9.5
* Doccano installed through pip3 install doccano
I have created a DocumentClassification project and have imported some json data.
The json data is in the format of
```bash
{"text":"The ravioli was excellent" , "hidden":"The FOOD was excellent"}
```
When these sentences are imported, the "hidden" : "The FOOD was excellent" becomes part of the Metadata. I have quite a few of these sentences and have labelled them with my own labels
The issue is when I export the dataset as csv, the Metadata column repeats. For example if I have 10 labelled sentences, the Metadata column is repeated 10 times per row of data in excel.
</issue>
<code>
[start of backend/api/views/download/data.py]
1 from typing import Any, Dict, List
2
3
4 class Record:
5
6 def __init__(self,
7 id: int,
8 data: str,
9 label: List[Any],
10 user: str,
11 metadata: Dict[Any, Any]):
12 self.id = id
13 self.data = data
14 self.label = label
15 self.user = user
16 self.metadata = metadata
17
18 def __str__(self):
19 return f'{self.data}\t{self.label}'
20
[end of backend/api/views/download/data.py]
[start of backend/api/views/download/writer.py]
1 import abc
2 import csv
3 import itertools
4 import json
5 import os
6 import uuid
7 import zipfile
8 from collections import defaultdict
9 from typing import Dict, Iterable, Iterator, List
10
11 from .data import Record
12
13
14 class BaseWriter:
15
16 def __init__(self, tmpdir: str):
17 self.tmpdir = tmpdir
18
19 @abc.abstractmethod
20 def write(self, records: Iterator[Record]) -> str:
21 raise NotImplementedError()
22
23 def write_zip(self, filenames: Iterable):
24 save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))
25 with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
26 for file in filenames:
27 zf.write(filename=file, arcname=os.path.basename(file))
28 return save_file
29
30
31 class LineWriter(BaseWriter):
32 extension = 'txt'
33
34 def write(self, records: Iterator[Record]) -> str:
35 files = {}
36 for record in records:
37 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
38 if filename not in files:
39 f = open(filename, mode='a')
40 files[filename] = f
41 f = files[filename]
42 line = self.create_line(record)
43 f.write(f'{line}\n')
44 for f in files.values():
45 f.close()
46 save_file = self.write_zip(files)
47 for file in files:
48 os.remove(file)
49 return save_file
50
51 @abc.abstractmethod
52 def create_line(self, record) -> str:
53 raise NotImplementedError()
54
55
56 class CsvWriter(BaseWriter):
57 extension = 'csv'
58
59 def write(self, records: Iterator[Record]) -> str:
60 writers = {}
61 file_handlers = set()
62 records = list(records)
63 header = self.create_header(records)
64 for record in records:
65 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
66 if filename not in writers:
67 f = open(filename, mode='a', encoding='utf-8')
68 writer = csv.DictWriter(f, header)
69 writer.writeheader()
70 writers[filename] = writer
71 file_handlers.add(f)
72 writer = writers[filename]
73 line = self.create_line(record)
74 writer.writerow(line)
75
76 for f in file_handlers:
77 f.close()
78 save_file = self.write_zip(writers)
79 for file in writers:
80 os.remove(file)
81 return save_file
82
83 def create_line(self, record) -> Dict:
84 return {
85 'id': record.id,
86 'data': record.data,
87 'label': '#'.join(record.label),
88 **record.metadata
89 }
90
91 def create_header(self, records: List[Record]) -> Iterable[str]:
92 header = ['id', 'data', 'label']
93 header += list(itertools.chain(*[r.metadata.keys() for r in records]))
94 return header
95
96
97 class JSONWriter(BaseWriter):
98 extension = 'json'
99
100 def write(self, records: Iterator[Record]) -> str:
101 writers = {}
102 contents = defaultdict(list)
103 for record in records:
104 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
105 if filename not in writers:
106 f = open(filename, mode='a', encoding='utf-8')
107 writers[filename] = f
108 line = self.create_line(record)
109 contents[filename].append(line)
110
111 for filename, f in writers.items():
112 content = contents[filename]
113 json.dump(content, f, ensure_ascii=False)
114 f.close()
115
116 save_file = self.write_zip(writers)
117 for file in writers:
118 os.remove(file)
119 return save_file
120
121 def create_line(self, record) -> Dict:
122 return {
123 'id': record.id,
124 'data': record.data,
125 'label': record.label,
126 **record.metadata
127 }
128
129
130 class JSONLWriter(LineWriter):
131 extension = 'jsonl'
132
133 def create_line(self, record):
134 return json.dumps({
135 'id': record.id,
136 'data': record.data,
137 'label': record.label,
138 **record.metadata
139 }, ensure_ascii=False)
140
141
142 class FastTextWriter(LineWriter):
143 extension = 'txt'
144
145 def create_line(self, record):
146 line = [f'__label__{label}' for label in record.label]
147 line.append(record.data)
148 line = ' '.join(line)
149 return line
150
[end of backend/api/views/download/writer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/backend/api/views/download/data.py b/backend/api/views/download/data.py
--- a/backend/api/views/download/data.py
+++ b/backend/api/views/download/data.py
@@ -1,3 +1,4 @@
+import json
from typing import Any, Dict, List
@@ -16,4 +17,10 @@
self.metadata = metadata
def __str__(self):
- return f'{self.data}\t{self.label}'
+ return json.dumps({
+ 'id': self.id,
+ 'data': self.data,
+ 'label': self.label,
+ 'user': self.user,
+ 'metadata': self.metadata
+ })
diff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py
--- a/backend/api/views/download/writer.py
+++ b/backend/api/views/download/writer.py
@@ -90,7 +90,7 @@
def create_header(self, records: List[Record]) -> Iterable[str]:
header = ['id', 'data', 'label']
- header += list(itertools.chain(*[r.metadata.keys() for r in records]))
+ header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))
return header
|
{"golden_diff": "diff --git a/backend/api/views/download/data.py b/backend/api/views/download/data.py\n--- a/backend/api/views/download/data.py\n+++ b/backend/api/views/download/data.py\n@@ -1,3 +1,4 @@\n+import json\n from typing import Any, Dict, List\n \n \n@@ -16,4 +17,10 @@\n self.metadata = metadata\n \n def __str__(self):\n- return f'{self.data}\\t{self.label}'\n+ return json.dumps({\n+ 'id': self.id,\n+ 'data': self.data,\n+ 'label': self.label,\n+ 'user': self.user,\n+ 'metadata': self.metadata\n+ })\ndiff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py\n--- a/backend/api/views/download/writer.py\n+++ b/backend/api/views/download/writer.py\n@@ -90,7 +90,7 @@\n \n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n- header += list(itertools.chain(*[r.metadata.keys() for r in records]))\n+ header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))\n return header\n", "issue": "Metadata column repeated when exported as csv\nHi I have recently come across a bug when you export data as csv\r\n<environment.-->\r\n*\u00a0\u00a0 Operating System:MacOS 10.14\r\n*\u00a0\u00a0 Python Version Used: 3.9.5\r\n*\u00a0\u00a0 Doccano installed through pip3 install\u00a0\u00a0\u00a0\u00a0 doccano \r\n\r\nI have created a DocumentClassification project and have imported some json data. \r\n\r\nThe json data is in the format of \r\n\r\n```bash\r\n{\"text\":\"The ravioli was excellent\" , \"hidden\":\"The FOOD was excellent\"} \r\n```\r\n\r\nWhen these sentences are imported, the \"hidden\" : \"The FOOD was excellent\" becomes part of the Metadata. I have quite a few of these sentences and have labelled them with my own labels \r\n\r\nThe issue is when I export the dataset as csv, the Metadata column repeats. For example if I have 10 labelled sentences, the Metadata column is repeated 10 times per row of data in excel. \n", "before_files": [{"content": "from typing import Any, Dict, List\n\n\nclass Record:\n\n def __init__(self,\n id: int,\n data: str,\n label: List[Any],\n user: str,\n metadata: Dict[Any, Any]):\n self.id = id\n self.data = data\n self.label = label\n self.user = user\n self.metadata = metadata\n\n def __str__(self):\n return f'{self.data}\\t{self.label}'\n", "path": "backend/api/views/download/data.py"}, {"content": "import abc\nimport csv\nimport itertools\nimport json\nimport os\nimport uuid\nimport zipfile\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, Iterator, List\n\nfrom .data import Record\n\n\nclass BaseWriter:\n\n def __init__(self, tmpdir: str):\n self.tmpdir = tmpdir\n\n @abc.abstractmethod\n def write(self, records: Iterator[Record]) -> str:\n raise NotImplementedError()\n\n def write_zip(self, filenames: Iterable):\n save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))\n with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:\n for file in filenames:\n zf.write(filename=file, arcname=os.path.basename(file))\n return save_file\n\n\nclass LineWriter(BaseWriter):\n extension = 'txt'\n\n def write(self, records: Iterator[Record]) -> str:\n files = {}\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in files:\n f = open(filename, mode='a')\n files[filename] = f\n f = files[filename]\n line = self.create_line(record)\n f.write(f'{line}\\n')\n for f in files.values():\n f.close()\n save_file = self.write_zip(files)\n for file in files:\n os.remove(file)\n return save_file\n\n @abc.abstractmethod\n def create_line(self, record) -> str:\n raise NotImplementedError()\n\n\nclass CsvWriter(BaseWriter):\n extension = 'csv'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n file_handlers = set()\n records = list(records)\n header = self.create_header(records)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writer = csv.DictWriter(f, header)\n writer.writeheader()\n writers[filename] = writer\n file_handlers.add(f)\n writer = writers[filename]\n line = self.create_line(record)\n writer.writerow(line)\n\n for f in file_handlers:\n f.close()\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': '#'.join(record.label),\n **record.metadata\n }\n\n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n header += list(itertools.chain(*[r.metadata.keys() for r in records]))\n return header\n\n\nclass JSONWriter(BaseWriter):\n extension = 'json'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n contents = defaultdict(list)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writers[filename] = f\n line = self.create_line(record)\n contents[filename].append(line)\n\n for filename, f in writers.items():\n content = contents[filename]\n json.dump(content, f, ensure_ascii=False)\n f.close()\n\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }\n\n\nclass JSONLWriter(LineWriter):\n extension = 'jsonl'\n\n def create_line(self, record):\n return json.dumps({\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }, ensure_ascii=False)\n\n\nclass FastTextWriter(LineWriter):\n extension = 'txt'\n\n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "path": "backend/api/views/download/writer.py"}]}
| 2,189 | 272 |
gh_patches_debug_25715
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-704
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Minor issues in feedback
- We have said that rating questions go up to 5, they actually go to 6. This must be either changed, or the committees must be informed so questions can be altered.
- Norwegian dates in the mail sent to users
- The domain used for the links should be https://online.ntnu.no, not morgan.online.ntnu.no
- Online linjeforening should be Linjeforeningen Online
- Errors on required fields should stand out more, see appendix 1 for current.
"Hei, vi ønsker tilbakemelding på "Kurs med Steria" som du var med på den 23. January:
morgan.online.ntnu.no/feedback/events/event/23/2/
Fristen for å svare på skjema er 30. January innen kl 23:59.
Vær oppmerksom på at du får prikk dersom du ikke svarer på disse spørsmålene innen fristen.
Eventuelle spørsmål sendes til [email protected]
Mvh
Online linjeforening"
Appendix 1

</issue>
<code>
[start of apps/feedback/mommy.py]
1 # -*- coding: utf-8 -*-
2 import datetime
3 import socket
4
5 from django.utils import timezone
6 from django.contrib.contenttypes.models import ContentType
7 from django.conf import settings
8 from django.core.mail import EmailMessage
9
10 from apps.events.models import Event, AttendanceEvent, Attendee
11 from apps.feedback.models import FeedbackRelation
12 from apps.marks.models import Mark, UserEntry
13 from apps.mommy import Task, schedule
14
15 class FeedbackMail(Task):
16
17 @staticmethod
18 def run():
19 active_feedbacks = FeedbackRelation.objects.filter(active=True)
20
21 for feedback in active_feedbacks:
22 message = FeedbackMail.generate_message(feedback)
23
24 if message.send:
25 EmailMessage(message.subject, unicode(message), message.committee_mail, [], message.attended_mails).send()
26
27 if message.results_message:
28 EmailMessage("Feedback resultat", message.results_message,"[email protected]", [message.committee_mail]).send()
29
30 @staticmethod
31 def generate_message(feedback):
32 today = timezone.now().date()
33 yesterday = today + datetime.timedelta(days=-1)
34 not_responded = FeedbackMail.get_users(feedback)
35 message = Message()
36
37 #return if everyone has answered
38 if not not_responded:
39 return message
40
41 message.attended_mails = FeedbackMail.get_user_mails(not_responded)
42
43 message.committee_mail = FeedbackMail.get_committee_email(feedback)
44 deadline = feedback.deadline.strftime("%d. %B").encode("utf-8")
45 title = str(FeedbackMail.get_title(feedback)).encode("utf-8")
46 message.link = str(u"\n\n" + FeedbackMail.get_link(feedback)).encode("utf-8")
47 results_link = str(FeedbackMail.get_link(feedback) + "results").encode("utf-8")
48
49 start_date = feedback.get_start_date()
50 deadline_diff = (feedback.deadline - today).days
51
52 message.subject = u"Feedback: %s" % (title)
53 message.intro = u"Hei, vi ønsker tilbakemelding på \"%s\"" % (title)
54 message.mark = FeedbackMail.mark_message(feedback)
55 message.contact = u"\n\nEventuelle spørsmål sendes til %s " % (message.committee_mail)
56 message.start_date = FeedbackMail.start_date_message(start_date)
57
58 if deadline_diff < 0: #Deadline passed
59 feedback.active = False
60 feedback.save()
61
62 if feedback.gives_mark:
63 FeedbackMail.set_marks(title, not_responded)
64
65 message.intro = u"Fristen for å svare på \"%s\" har gått ut og du har fått en prikk." % (title)
66 message.mark = ""
67 message.start_date = ""
68 message.link = ""
69 message.send = True
70
71 elif deadline_diff < 1: #Last warning
72 message.deadline = u"\n\nI dag innen 23:59 er siste frist til å svare på skjemaet."
73
74 message.results_message = u"Hei, siste purremail på feedback skjema har blitt sendt til alle " \
75 u"gjenværende deltagere på \"%s\".\nDere kan se feedback-resultatene på:\n%s\n" % \
76 (title, results_link)
77 message.send = True
78 elif deadline_diff < 3 and feedback.gives_mark: # 3 days from the deadline
79 message.deadline = u"\n\nFristen for å svare på skjema er %s innen kl 23:59." % (deadline)
80 message.send = True
81 elif FeedbackMail.send_first_notification(feedback): #Day after the event or feedback creation
82 message.deadline = u"\n\nFristen for å svare på skjema er %s innen kl 23:59." % (deadline)
83
84 message.results_message = u"Hei, nå har feedbackmail blitt sendt til alle " \
85 u"deltagere på \"%s\".\nDere kan se feedback-resultatene på:\n%s\n" % \
86 (title, results_link)
87 message.send = True
88
89 return message
90
91 @staticmethod
92 def send_first_notification(feedback):
93 start_date = FeedbackMail.start_date(feedback)
94
95 #The object that requires feedback doesnt have a start date
96 if not start_date:
97 yesterday = timezone.now().date() - datetime.timedelta(days=1)
98 if feedback.created_date == yesterday.date():
99 #Send the first notification the day after the feedback relation was created
100 return True
101 else:
102 day_after_event = start_date + datetime.timedelta(1)
103 if day_after_event == datetime.datetime.date(timezone.now()):
104 #Send the first notification the day after the event
105 return True
106 return False
107
108 @staticmethod
109 def start_date(feedback):
110 start_date = feedback.get_start_date()
111
112 if start_date:
113 return start_date.date()
114 else:
115 return False
116
117 @staticmethod
118 def start_date_message(start_date):
119 #If the object(event) doesnt have start date it will send
120 #the first notification the day after the feedbackrelation is made
121 if start_date:
122 start_date_string = start_date.strftime("%d. %B").encode("utf-8")
123 message_start_date = u"som du var med på den %s:" % (start_date_string)
124 else:
125 message_start_date = ""
126
127 return message_start_date
128
129 @staticmethod
130 def get_users(feedback):
131 return feedback.get_slackers()
132
133 @staticmethod
134 def get_user_mails(not_responded):
135 return [user.email for user in not_responded]
136
137 @staticmethod
138 def get_link(feedback):
139 hostname = socket.getfqdn()
140 return str(hostname + feedback.get_absolute_url())
141
142 @staticmethod
143 def get_title(feedback):
144 return feedback.get_title()
145
146 @staticmethod
147 def get_committee_email(feedback):
148 return feedback.get_email()
149
150 @staticmethod
151 def mark_message(feedback):
152 if feedback.gives_mark:
153 return u"\nVær oppmerksom på at du får prikk dersom du ikke svarer " \
154 u"på disse spørsmålene innen fristen."
155 else:
156 return ""
157
158 @staticmethod
159 def set_marks(title, not_responded):
160 mark = Mark()
161 mark.title = u"Manglende tilbakemelding på %s" % (title)
162 mark.category = 4 #Missed feedback
163 mark.description = u"Du har fått en prikk fordi du ikke har levert tilbakemelding."
164 mark.save()
165
166 for user in not_responded:
167 user_entry = UserEntry()
168 user_entry.user = user
169 user_entry.mark = mark
170 user_entry.save()
171
172 class Message():
173 subject = ""
174 intro = ""
175 start_date = ""
176 deadline = ""
177 mark = ""
178 contact = ""
179 link = ""
180 send = False
181 end = u"\n\nMvh\nOnline linjeforening"
182 results_message = False
183
184 committee_mail = ""
185 attended_mails = False
186
187
188 def __unicode__(self):
189 message = "%s %s %s %s %s %s %s" % (
190 self.intro,
191 self.start_date,
192 self.link,
193 self.deadline,
194 self.mark,
195 self.contact,
196 self.end)
197 return message
198
199 schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=0)
200
[end of apps/feedback/mommy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/feedback/mommy.py b/apps/feedback/mommy.py
--- a/apps/feedback/mommy.py
+++ b/apps/feedback/mommy.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import datetime
import socket
+import locale
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
@@ -16,9 +17,10 @@
@staticmethod
def run():
- active_feedbacks = FeedbackRelation.objects.filter(active=True)
+ locale.setlocale(locale.LC_ALL, "nb_NO.UTF-8")
+ active_feedbacks = FeedbackRelation.objects.filter(active=True)
- for feedback in active_feedbacks:
+ for feedback in active_feedbacks:
message = FeedbackMail.generate_message(feedback)
if message.send:
@@ -178,7 +180,7 @@
contact = ""
link = ""
send = False
- end = u"\n\nMvh\nOnline linjeforening"
+ end = u"\n\nMvh\nLinjeforeningen Online"
results_message = False
committee_mail = ""
@@ -196,4 +198,4 @@
self.end)
return message
-schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=0)
+schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=00)
|
{"golden_diff": "diff --git a/apps/feedback/mommy.py b/apps/feedback/mommy.py\n--- a/apps/feedback/mommy.py\n+++ b/apps/feedback/mommy.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n import datetime\n import socket\n+import locale\n \n from django.utils import timezone\n from django.contrib.contenttypes.models import ContentType\n@@ -16,9 +17,10 @@\n \n @staticmethod\n def run():\n- active_feedbacks = FeedbackRelation.objects.filter(active=True)\n+ locale.setlocale(locale.LC_ALL, \"nb_NO.UTF-8\")\n+ active_feedbacks = FeedbackRelation.objects.filter(active=True)\n \n- for feedback in active_feedbacks:\n+ for feedback in active_feedbacks:\n message = FeedbackMail.generate_message(feedback)\n \n if message.send:\n@@ -178,7 +180,7 @@\n contact = \"\"\n link = \"\"\n send = False\n- end = u\"\\n\\nMvh\\nOnline linjeforening\"\n+ end = u\"\\n\\nMvh\\nLinjeforeningen Online\"\n results_message = False\n \n committee_mail = \"\"\n@@ -196,4 +198,4 @@\n self.end)\n return message\n \n-schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=0)\n+schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=00)\n", "issue": "Minor issues in feedback\n- We have said that rating questions go up to 5, they actually go to 6. This must be either changed, or the committees must be informed so questions can be altered.\n- Norwegian dates in the mail sent to users\n- The domain used for the links should be https://online.ntnu.no, not morgan.online.ntnu.no\n- Online linjeforening should be Linjeforeningen Online\n- Errors on required fields should stand out more, see appendix 1 for current.\n\n\"Hei, vi \u00f8nsker tilbakemelding p\u00e5 \"Kurs med Steria\" som du var med p\u00e5 den 23. January:\n\nmorgan.online.ntnu.no/feedback/events/event/23/2/\n\nFristen for \u00e5 svare p\u00e5 skjema er 30. January innen kl 23:59.\nV\u00e6r oppmerksom p\u00e5 at du f\u00e5r prikk dersom du ikke svarer p\u00e5 disse sp\u00f8rsm\u00e5lene innen fristen.\n\nEventuelle sp\u00f8rsm\u00e5l sendes til [email protected]\n\nMvh\nOnline linjeforening\"\n\nAppendix 1\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport socket\n\nfrom django.utils import timezone\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\n\nfrom apps.events.models import Event, AttendanceEvent, Attendee\nfrom apps.feedback.models import FeedbackRelation\nfrom apps.marks.models import Mark, UserEntry\nfrom apps.mommy import Task, schedule\n\nclass FeedbackMail(Task):\n\n @staticmethod\n def run():\n active_feedbacks = FeedbackRelation.objects.filter(active=True)\n \n for feedback in active_feedbacks:\n message = FeedbackMail.generate_message(feedback)\n\n if message.send:\n EmailMessage(message.subject, unicode(message), message.committee_mail, [], message.attended_mails).send()\n\n if message.results_message:\n EmailMessage(\"Feedback resultat\", message.results_message,\"[email protected]\", [message.committee_mail]).send() \n\n @staticmethod\n def generate_message(feedback):\n today = timezone.now().date()\n yesterday = today + datetime.timedelta(days=-1)\n not_responded = FeedbackMail.get_users(feedback)\n message = Message()\n\n #return if everyone has answered\n if not not_responded:\n return message\n \n message.attended_mails = FeedbackMail.get_user_mails(not_responded)\n\n message.committee_mail = FeedbackMail.get_committee_email(feedback)\n deadline = feedback.deadline.strftime(\"%d. %B\").encode(\"utf-8\")\n title = str(FeedbackMail.get_title(feedback)).encode(\"utf-8\")\n message.link = str(u\"\\n\\n\" + FeedbackMail.get_link(feedback)).encode(\"utf-8\")\n results_link = str(FeedbackMail.get_link(feedback) + \"results\").encode(\"utf-8\")\n \n start_date = feedback.get_start_date()\n deadline_diff = (feedback.deadline - today).days\n\n message.subject = u\"Feedback: %s\" % (title)\n message.intro = u\"Hei, vi \u00f8nsker tilbakemelding p\u00e5 \\\"%s\\\"\" % (title)\n message.mark = FeedbackMail.mark_message(feedback)\n message.contact = u\"\\n\\nEventuelle sp\u00f8rsm\u00e5l sendes til %s \" % (message.committee_mail)\n message.start_date = FeedbackMail.start_date_message(start_date)\n\n if deadline_diff < 0: #Deadline passed\n feedback.active = False\n feedback.save()\n\n if feedback.gives_mark:\n FeedbackMail.set_marks(title, not_responded) \n \n message.intro = u\"Fristen for \u00e5 svare p\u00e5 \\\"%s\\\" har g\u00e5tt ut og du har f\u00e5tt en prikk.\" % (title)\n message.mark = \"\"\n message.start_date = \"\"\n message.link = \"\"\n message.send = True\n\n elif deadline_diff < 1: #Last warning\n message.deadline = u\"\\n\\nI dag innen 23:59 er siste frist til \u00e5 svare p\u00e5 skjemaet.\"\n \n message.results_message = u\"Hei, siste purremail p\u00e5 feedback skjema har blitt sendt til alle \" \\\n u\"gjenv\u00e6rende deltagere p\u00e5 \\\"%s\\\".\\nDere kan se feedback-resultatene p\u00e5:\\n%s\\n\" % \\\n (title, results_link)\n message.send = True\n elif deadline_diff < 3 and feedback.gives_mark: # 3 days from the deadline\n message.deadline = u\"\\n\\nFristen for \u00e5 svare p\u00e5 skjema er %s innen kl 23:59.\" % (deadline)\n message.send = True\n elif FeedbackMail.send_first_notification(feedback): #Day after the event or feedback creation \n message.deadline = u\"\\n\\nFristen for \u00e5 svare p\u00e5 skjema er %s innen kl 23:59.\" % (deadline)\n \n message.results_message = u\"Hei, n\u00e5 har feedbackmail blitt sendt til alle \" \\\n u\"deltagere p\u00e5 \\\"%s\\\".\\nDere kan se feedback-resultatene p\u00e5:\\n%s\\n\" % \\\n (title, results_link)\n message.send = True\n\n return message\n \n @staticmethod\n def send_first_notification(feedback):\n start_date = FeedbackMail.start_date(feedback)\n\n #The object that requires feedback doesnt have a start date\n if not start_date:\n yesterday = timezone.now().date() - datetime.timedelta(days=1)\n if feedback.created_date == yesterday.date():\n #Send the first notification the day after the feedback relation was created\n return True\n else:\n day_after_event = start_date + datetime.timedelta(1)\n if day_after_event == datetime.datetime.date(timezone.now()):\n #Send the first notification the day after the event\n return True\n return False\n\n @staticmethod\n def start_date(feedback):\n start_date = feedback.get_start_date()\n \n if start_date:\n return start_date.date()\n else:\n return False\n\n @staticmethod\n def start_date_message(start_date):\n #If the object(event) doesnt have start date it will send \n #the first notification the day after the feedbackrelation is made\n if start_date:\n start_date_string = start_date.strftime(\"%d. %B\").encode(\"utf-8\")\n message_start_date = u\"som du var med p\u00e5 den %s:\" % (start_date_string)\n else:\n message_start_date = \"\"\n \n return message_start_date \n\n @staticmethod\n def get_users(feedback):\n return feedback.get_slackers()\n\n @staticmethod\n def get_user_mails(not_responded):\n return [user.email for user in not_responded]\n\n @staticmethod\n def get_link(feedback):\n hostname = socket.getfqdn()\n return str(hostname + feedback.get_absolute_url())\n\n @staticmethod\n def get_title(feedback):\n return feedback.get_title()\n\n @staticmethod\n def get_committee_email(feedback):\n return feedback.get_email()\n\n @staticmethod\n def mark_message(feedback):\n if feedback.gives_mark:\n return u\"\\nV\u00e6r oppmerksom p\u00e5 at du f\u00e5r prikk dersom du ikke svarer \" \\\n u\"p\u00e5 disse sp\u00f8rsm\u00e5lene innen fristen.\"\n else:\n return \"\"\n\n @staticmethod\n def set_marks(title, not_responded):\n mark = Mark()\n mark.title = u\"Manglende tilbakemelding p\u00e5 %s\" % (title)\n mark.category = 4 #Missed feedback\n mark.description = u\"Du har f\u00e5tt en prikk fordi du ikke har levert tilbakemelding.\"\n mark.save()\n \n for user in not_responded:\n user_entry = UserEntry()\n user_entry.user = user\n user_entry.mark = mark\n user_entry.save()\n \nclass Message():\n subject = \"\"\n intro = \"\"\n start_date = \"\"\n deadline = \"\"\n mark = \"\"\n contact = \"\"\n link = \"\"\n send = False\n end = u\"\\n\\nMvh\\nOnline linjeforening\"\n results_message = False\n\n committee_mail = \"\"\n attended_mails = False\n\n\n def __unicode__(self):\n message = \"%s %s %s %s %s %s %s\" % (\n self.intro, \n self.start_date, \n self.link, \n self.deadline, \n self.mark, \n self.contact, \n self.end)\n return message\n\nschedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=0)\n", "path": "apps/feedback/mommy.py"}]}
| 2,995 | 324 |
gh_patches_debug_16388
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-8385
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use Full URL for ImageRenditionField.
### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
I'm a big fan of the new `full_url` field that images have and would like them to be easily used in the API.
Assuming one's frontend app is living on a different domain to the Wagtail API then the default relative URLs aren't as useful.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Add `full_url` to the output of `ImageRenditionField`.
I propose it just replace the `url` field altogether, but both could be returned.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
I've been extending the `ImageRenditionField` for use in my own projects
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
(Write your answer here.)
Use Full URL for ImageRenditionField.
### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
I'm a big fan of the new `full_url` field that images have and would like them to be easily used in the API.
Assuming one's frontend app is living on a different domain to the Wagtail API then the default relative URLs aren't as useful.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Add `full_url` to the output of `ImageRenditionField`.
I propose it just replace the `url` field altogether, but both could be returned.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
I've been extending the `ImageRenditionField` for use in my own projects
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
(Write your answer here.)
</issue>
<code>
[start of wagtail/images/api/fields.py]
1 from collections import OrderedDict
2
3 from rest_framework.fields import Field
4
5 from ..models import SourceImageIOError
6
7
8 class ImageRenditionField(Field):
9 """
10 A field that generates a rendition with the specified filter spec, and serialises
11 details of that rendition.
12
13 Example:
14 "thumbnail": {
15 "url": "/media/images/myimage.max-165x165.jpg",
16 "width": 165,
17 "height": 100,
18 "alt": "Image alt text"
19 }
20
21 If there is an error with the source image. The dict will only contain a single
22 key, "error", indicating this error:
23
24 "thumbnail": {
25 "error": "SourceImageIOError"
26 }
27 """
28
29 def __init__(self, filter_spec, *args, **kwargs):
30 self.filter_spec = filter_spec
31 super().__init__(*args, **kwargs)
32
33 def to_representation(self, image):
34 try:
35 thumbnail = image.get_rendition(self.filter_spec)
36
37 return OrderedDict(
38 [
39 ("url", thumbnail.url),
40 ("width", thumbnail.width),
41 ("height", thumbnail.height),
42 ("alt", thumbnail.alt),
43 ]
44 )
45 except SourceImageIOError:
46 return OrderedDict(
47 [
48 ("error", "SourceImageIOError"),
49 ]
50 )
51
[end of wagtail/images/api/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wagtail/images/api/fields.py b/wagtail/images/api/fields.py
--- a/wagtail/images/api/fields.py
+++ b/wagtail/images/api/fields.py
@@ -13,6 +13,7 @@
Example:
"thumbnail": {
"url": "/media/images/myimage.max-165x165.jpg",
+ "full_url": "https://media.example.com/media/images/myimage.max-165x165.jpg",
"width": 165,
"height": 100,
"alt": "Image alt text"
@@ -37,6 +38,7 @@
return OrderedDict(
[
("url", thumbnail.url),
+ ("full_url", thumbnail.full_url),
("width", thumbnail.width),
("height", thumbnail.height),
("alt", thumbnail.alt),
|
{"golden_diff": "diff --git a/wagtail/images/api/fields.py b/wagtail/images/api/fields.py\n--- a/wagtail/images/api/fields.py\n+++ b/wagtail/images/api/fields.py\n@@ -13,6 +13,7 @@\n Example:\n \"thumbnail\": {\n \"url\": \"/media/images/myimage.max-165x165.jpg\",\n+ \"full_url\": \"https://media.example.com/media/images/myimage.max-165x165.jpg\",\n \"width\": 165,\n \"height\": 100,\n \"alt\": \"Image alt text\"\n@@ -37,6 +38,7 @@\n return OrderedDict(\n [\n (\"url\", thumbnail.url),\n+ (\"full_url\", thumbnail.full_url),\n (\"width\", thumbnail.width),\n (\"height\", thumbnail.height),\n (\"alt\", thumbnail.alt),\n", "issue": "Use Full URL for ImageRenditionField.\n### Is your proposal related to a problem?\r\n\r\n<!--\r\n Provide a clear and concise description of what the problem is.\r\n For example, \"I'm always frustrated when...\"\r\n-->\r\n\r\nI'm a big fan of the new `full_url` field that images have and would like them to be easily used in the API.\r\n\r\nAssuming one's frontend app is living on a different domain to the Wagtail API then the default relative URLs aren't as useful.\r\n\r\n### Describe the solution you'd like\r\n\r\n<!--\r\n Provide a clear and concise description of what you want to happen.\r\n-->\r\n\r\nAdd `full_url` to the output of `ImageRenditionField`.\r\n\r\nI propose it just replace the `url` field altogether, but both could be returned.\r\n\r\n### Describe alternatives you've considered\r\n\r\n<!--\r\n Let us know about other solutions you've tried or researched.\r\n-->\r\n\r\nI've been extending the `ImageRenditionField` for use in my own projects\r\n\r\n### Additional context\r\n\r\n<!--\r\n Is there anything else you can add about the proposal?\r\n You might want to link to related issues here, if you haven't already.\r\n-->\r\n\r\n(Write your answer here.)\r\n\nUse Full URL for ImageRenditionField.\n### Is your proposal related to a problem?\r\n\r\n<!--\r\n Provide a clear and concise description of what the problem is.\r\n For example, \"I'm always frustrated when...\"\r\n-->\r\n\r\nI'm a big fan of the new `full_url` field that images have and would like them to be easily used in the API.\r\n\r\nAssuming one's frontend app is living on a different domain to the Wagtail API then the default relative URLs aren't as useful.\r\n\r\n### Describe the solution you'd like\r\n\r\n<!--\r\n Provide a clear and concise description of what you want to happen.\r\n-->\r\n\r\nAdd `full_url` to the output of `ImageRenditionField`.\r\n\r\nI propose it just replace the `url` field altogether, but both could be returned.\r\n\r\n### Describe alternatives you've considered\r\n\r\n<!--\r\n Let us know about other solutions you've tried or researched.\r\n-->\r\n\r\nI've been extending the `ImageRenditionField` for use in my own projects\r\n\r\n### Additional context\r\n\r\n<!--\r\n Is there anything else you can add about the proposal?\r\n You might want to link to related issues here, if you haven't already.\r\n-->\r\n\r\n(Write your answer here.)\r\n\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework.fields import Field\n\nfrom ..models import SourceImageIOError\n\n\nclass ImageRenditionField(Field):\n \"\"\"\n A field that generates a rendition with the specified filter spec, and serialises\n details of that rendition.\n\n Example:\n \"thumbnail\": {\n \"url\": \"/media/images/myimage.max-165x165.jpg\",\n \"width\": 165,\n \"height\": 100,\n \"alt\": \"Image alt text\"\n }\n\n If there is an error with the source image. The dict will only contain a single\n key, \"error\", indicating this error:\n\n \"thumbnail\": {\n \"error\": \"SourceImageIOError\"\n }\n \"\"\"\n\n def __init__(self, filter_spec, *args, **kwargs):\n self.filter_spec = filter_spec\n super().__init__(*args, **kwargs)\n\n def to_representation(self, image):\n try:\n thumbnail = image.get_rendition(self.filter_spec)\n\n return OrderedDict(\n [\n (\"url\", thumbnail.url),\n (\"width\", thumbnail.width),\n (\"height\", thumbnail.height),\n (\"alt\", thumbnail.alt),\n ]\n )\n except SourceImageIOError:\n return OrderedDict(\n [\n (\"error\", \"SourceImageIOError\"),\n ]\n )\n", "path": "wagtail/images/api/fields.py"}]}
| 1,428 | 194 |
gh_patches_debug_7296
|
rasdani/github-patches
|
git_diff
|
LMFDB__lmfdb-5669
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
L-function of Dirichlet character does not link to Dirichlet character page
</issue>
<code>
[start of lmfdb/utils/names_and_urls.py]
1 # -*- coding: utf-8 -*-
2 from lmfdb.utils.utilities import key_for_numerically_sort
3 from flask import url_for
4 #######################################################################
5 # Functions for interacting with web structure
6 #######################################################################
7
8 # TODO This needs to be able to handle any sort of object
9 # There should probably be a more relevant field
10 # in the database, instead of trying to extract this from a URL
11 def name_and_object_from_url(url, check_existence=False):
12 # the import is here to avoid circular imports
13 from lmfdb import db
14 url_split = url.rstrip('/').lstrip('/').split("/")
15 name = '??'
16 obj_exists = False
17
18 if url_split[0] == "EllipticCurve":
19 # every EC instance was added from EC
20 obj_exists = True
21 if url_split[1] == 'Q':
22 if len(url_split) == 4: # isogeny class
23 # EllipticCurve/Q/341641/a
24 label_isogeny_class = ".".join(url_split[-2:])
25 if check_existence:
26 obj_exists = db.ec_curvedata.exists({"lmfdb_iso": label_isogeny_class})
27 elif len(url_split) == 5: # curve
28 # EllipticCurve/Q/48/a/6
29 label_curve = ".".join(url_split[-3:-1]) + url_split[-1]
30 if check_existence:
31 obj_exists = db.ec_curvedata.exists({"lmfdb_label": label_curve})
32 else:
33 raise NotImplementedError
34 else:
35 if len(url_split) == 4: # isogeny class
36 # EllipticCurve/2.2.140.1/14.1/a
37 field, cond, isog = url_split[-3:]
38 label_isogeny_class = "-".join([field, cond, isog])
39 if check_existence:
40 obj_exists = db.ec_nfcurves.exists({"class_label": label_isogeny_class})
41 elif len(url_split) == 5: # curve
42 # EllipticCurve/2.0.4.1/1250.3/a/3
43 field, cond, isog, ind = url_split[-4:]
44 label_curve = "-".join([field, cond, isog]) + ind
45 if check_existence:
46 obj_exists = db.ec_nfcurves.exists({"label": label_curve})
47 if len(url_split) == 4: # isogeny class
48 #name = 'Isogeny class ' + label_isogeny_class
49 name = 'Elliptic curve ' + label_isogeny_class
50 elif len(url_split) == 5: # curve
51 #name = 'Curve ' + label_curve
52 name = 'Elliptic curve ' + label_curve
53
54 elif url_split[0] == "Genus2Curve":
55 obj_exists = True
56 assert url_split[1] == 'Q'
57 if len(url_split) == 4: # isog class
58 # Genus2Curve/Q/310329/a
59 label_isogeny_class = ".".join(url_split[-2:])
60 if check_existence:
61 obj_exists = db.g2c_curves.exists({"class": label_isogeny_class})
62 #name = 'Isogeny class ' + label_isogeny_class
63 name = 'Genus 2 curve ' + label_isogeny_class
64 if len(url_split) == 6: # curve
65 # Genus2Curve/Q/1728/b/442368/1
66 label_curve = ".".join(url_split[-4:])
67 if check_existence:
68 obj_exists = db.g2c_curves.exists({"label": label_curve})
69 #name = 'Curve ' + label_curve
70 name = 'Genus 2 curve ' + label_curve
71
72 elif url_split[0] == "ModularForm":
73 if url_split[1] == 'GL2':
74 if url_split[2] == 'Q' and url_split[3] == 'holomorphic':
75 if len(url_split) == 10:
76 # ModularForm/GL2/Q/holomorphic/24/2/f/a/11/2
77 newform_label = ".".join(url_split[-6:-2])
78 conrey_newform_label = ".".join(url_split[-6:])
79 name = 'Modular form ' + conrey_newform_label
80 obj_exists = True
81 if check_existence:
82 obj_exists = db.mf_newforms.label_exists(newform_label)
83 elif len(url_split) == 8:
84 # ModularForm/GL2/Q/holomorphic/24/2/f/a
85 newform_label = ".".join(url_split[-4:])
86 name = 'Modular form ' + newform_label
87 obj_exists = True
88 if check_existence:
89 obj_exists = db.mf_newforms.label_exists(newform_label)
90
91 elif url_split[2] == 'TotallyReal':
92 # ModularForm/GL2/TotallyReal/2.2.140.1/holomorphic/2.2.140.1-14.1-a
93 label = url_split[-1]
94 name = 'Hilbert modular form ' + label
95 obj_exists = True
96 if check_existence:
97 obj_exists = db.hmf_forms.label_exists(label)
98
99 elif url_split[2] == 'ImaginaryQuadratic':
100 # ModularForm/GL2/ImaginaryQuadratic/2.0.4.1/98.1/a
101 label = '-'.join(url_split[-3:])
102 name = 'Bianchi modular form ' + label
103 obj_exists = 'CM' not in label
104 if check_existence:
105 obj_exists = db.bmf_forms.label_exists(label)
106 elif url_split[0] == "ArtinRepresentation":
107 label = url_split[1]
108 name = 'Artin representation ' + label
109 obj_exists = True
110 if check_existence:
111 obj_exists = db.artin_reps.label_exists(label.split('c')[0])
112 elif url_split[0] == "NumberField":
113 from lmfdb.number_fields.web_number_field import field_pretty
114 label = url_split[1]
115 name = 'Number field ' + field_pretty(label)
116 obj_exists = True
117 if check_existence:
118 obj_exists = db.number_fields.label_exists(label)
119 elif url_split[0] == "SatoTateGroup":
120 from lmfdb.sato_tate_groups.main import st_name
121 name, label = st_name(url_split[1])
122 if name is None:
123 name = label
124 obj_exists = False
125 else:
126 name = 'Sato Tate group $%s$' % name
127 obj_exists = True
128 else:
129 # FIXME
130 #print("unknown url", url)
131 pass
132
133 return name, obj_exists
134
135
136 def names_and_urls(instances, exclude={}):
137 res = []
138 names = set()
139 urls = set()
140 exclude = set(exclude)
141 root = url_for('index')
142
143 # remove duplicate urls
144 for instance in instances:
145 if not isinstance(instance, str):
146 instance = instance['url']
147 if instance not in exclude and '|' not in instance:
148 urls.add(instance)
149
150 for url in urls:
151 name, obj_exists = name_and_object_from_url(url)
152 if not name:
153 name = ''
154 if obj_exists:
155 url = root + url
156 else:
157 # do not display unknown objects
158 continue
159 name = '(%s)' % (name)
160 url = ""
161 # avoid duplicates that might have arise from different instances
162 if name not in names:
163 res.append((name, url))
164 names.add(name)
165 # sort based on name + label
166 res.sort(key=lambda x: key_for_numerically_sort(x[0]))
167 return res
168
[end of lmfdb/utils/names_and_urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lmfdb/utils/names_and_urls.py b/lmfdb/utils/names_and_urls.py
--- a/lmfdb/utils/names_and_urls.py
+++ b/lmfdb/utils/names_and_urls.py
@@ -125,10 +125,14 @@
else:
name = 'Sato Tate group $%s$' % name
obj_exists = True
+ elif url_split[:2] == ["Character", "Dirichlet"]:
+ modulus = int(url_split[2])
+ conrey = int(url_split[3])
+ name = "Character $\chi_{%d}(%d, \cdot)$" % (modulus, conrey)
+ obj_exists = True
else:
# FIXME
- #print("unknown url", url)
- pass
+ assert False, url
return name, obj_exists
|
{"golden_diff": "diff --git a/lmfdb/utils/names_and_urls.py b/lmfdb/utils/names_and_urls.py\n--- a/lmfdb/utils/names_and_urls.py\n+++ b/lmfdb/utils/names_and_urls.py\n@@ -125,10 +125,14 @@\n else:\n name = 'Sato Tate group $%s$' % name\n obj_exists = True\n+ elif url_split[:2] == [\"Character\", \"Dirichlet\"]:\n+ modulus = int(url_split[2])\n+ conrey = int(url_split[3])\n+ name = \"Character $\\chi_{%d}(%d, \\cdot)$\" % (modulus, conrey)\n+ obj_exists = True\n else:\n # FIXME\n- #print(\"unknown url\", url)\n- pass\n+ assert False, url\n \n return name, obj_exists\n", "issue": "L-function of Dirichlet character does not link to Dirichlet character page\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lmfdb.utils.utilities import key_for_numerically_sort\nfrom flask import url_for\n#######################################################################\n# Functions for interacting with web structure\n#######################################################################\n\n# TODO This needs to be able to handle any sort of object\n# There should probably be a more relevant field\n# in the database, instead of trying to extract this from a URL\ndef name_and_object_from_url(url, check_existence=False):\n # the import is here to avoid circular imports\n from lmfdb import db\n url_split = url.rstrip('/').lstrip('/').split(\"/\")\n name = '??'\n obj_exists = False\n\n if url_split[0] == \"EllipticCurve\":\n # every EC instance was added from EC\n obj_exists = True\n if url_split[1] == 'Q':\n if len(url_split) == 4: # isogeny class\n # EllipticCurve/Q/341641/a\n label_isogeny_class = \".\".join(url_split[-2:])\n if check_existence:\n obj_exists = db.ec_curvedata.exists({\"lmfdb_iso\": label_isogeny_class})\n elif len(url_split) == 5: # curve\n # EllipticCurve/Q/48/a/6\n label_curve = \".\".join(url_split[-3:-1]) + url_split[-1]\n if check_existence:\n obj_exists = db.ec_curvedata.exists({\"lmfdb_label\": label_curve})\n else:\n raise NotImplementedError\n else:\n if len(url_split) == 4: # isogeny class\n # EllipticCurve/2.2.140.1/14.1/a\n field, cond, isog = url_split[-3:]\n label_isogeny_class = \"-\".join([field, cond, isog])\n if check_existence:\n obj_exists = db.ec_nfcurves.exists({\"class_label\": label_isogeny_class})\n elif len(url_split) == 5: # curve\n # EllipticCurve/2.0.4.1/1250.3/a/3\n field, cond, isog, ind = url_split[-4:]\n label_curve = \"-\".join([field, cond, isog]) + ind\n if check_existence:\n obj_exists = db.ec_nfcurves.exists({\"label\": label_curve})\n if len(url_split) == 4: # isogeny class\n #name = 'Isogeny class ' + label_isogeny_class\n name = 'Elliptic curve ' + label_isogeny_class\n elif len(url_split) == 5: # curve\n #name = 'Curve ' + label_curve\n name = 'Elliptic curve ' + label_curve\n\n elif url_split[0] == \"Genus2Curve\":\n obj_exists = True\n assert url_split[1] == 'Q'\n if len(url_split) == 4: # isog class\n # Genus2Curve/Q/310329/a\n label_isogeny_class = \".\".join(url_split[-2:])\n if check_existence:\n obj_exists = db.g2c_curves.exists({\"class\": label_isogeny_class})\n #name = 'Isogeny class ' + label_isogeny_class\n name = 'Genus 2 curve ' + label_isogeny_class\n if len(url_split) == 6: # curve\n # Genus2Curve/Q/1728/b/442368/1\n label_curve = \".\".join(url_split[-4:])\n if check_existence:\n obj_exists = db.g2c_curves.exists({\"label\": label_curve})\n #name = 'Curve ' + label_curve\n name = 'Genus 2 curve ' + label_curve\n\n elif url_split[0] == \"ModularForm\":\n if url_split[1] == 'GL2':\n if url_split[2] == 'Q' and url_split[3] == 'holomorphic':\n if len(url_split) == 10:\n # ModularForm/GL2/Q/holomorphic/24/2/f/a/11/2\n newform_label = \".\".join(url_split[-6:-2])\n conrey_newform_label = \".\".join(url_split[-6:])\n name = 'Modular form ' + conrey_newform_label\n obj_exists = True\n if check_existence:\n obj_exists = db.mf_newforms.label_exists(newform_label)\n elif len(url_split) == 8:\n # ModularForm/GL2/Q/holomorphic/24/2/f/a\n newform_label = \".\".join(url_split[-4:])\n name = 'Modular form ' + newform_label\n obj_exists = True\n if check_existence:\n obj_exists = db.mf_newforms.label_exists(newform_label)\n\n elif url_split[2] == 'TotallyReal':\n # ModularForm/GL2/TotallyReal/2.2.140.1/holomorphic/2.2.140.1-14.1-a\n label = url_split[-1]\n name = 'Hilbert modular form ' + label\n obj_exists = True\n if check_existence:\n obj_exists = db.hmf_forms.label_exists(label)\n\n elif url_split[2] == 'ImaginaryQuadratic':\n # ModularForm/GL2/ImaginaryQuadratic/2.0.4.1/98.1/a\n label = '-'.join(url_split[-3:])\n name = 'Bianchi modular form ' + label\n obj_exists = 'CM' not in label\n if check_existence:\n obj_exists = db.bmf_forms.label_exists(label)\n elif url_split[0] == \"ArtinRepresentation\":\n label = url_split[1]\n name = 'Artin representation ' + label\n obj_exists = True\n if check_existence:\n obj_exists = db.artin_reps.label_exists(label.split('c')[0])\n elif url_split[0] == \"NumberField\":\n from lmfdb.number_fields.web_number_field import field_pretty\n label = url_split[1]\n name = 'Number field ' + field_pretty(label)\n obj_exists = True\n if check_existence:\n obj_exists = db.number_fields.label_exists(label)\n elif url_split[0] == \"SatoTateGroup\":\n from lmfdb.sato_tate_groups.main import st_name\n name, label = st_name(url_split[1])\n if name is None:\n name = label\n obj_exists = False\n else:\n name = 'Sato Tate group $%s$' % name\n obj_exists = True\n else:\n # FIXME\n #print(\"unknown url\", url)\n pass\n\n return name, obj_exists\n\n\ndef names_and_urls(instances, exclude={}):\n res = []\n names = set()\n urls = set()\n exclude = set(exclude)\n root = url_for('index')\n\n # remove duplicate urls\n for instance in instances:\n if not isinstance(instance, str):\n instance = instance['url']\n if instance not in exclude and '|' not in instance:\n urls.add(instance)\n\n for url in urls:\n name, obj_exists = name_and_object_from_url(url)\n if not name:\n name = ''\n if obj_exists:\n url = root + url\n else:\n # do not display unknown objects\n continue\n name = '(%s)' % (name)\n url = \"\"\n # avoid duplicates that might have arise from different instances\n if name not in names:\n res.append((name, url))\n names.add(name)\n # sort based on name + label\n res.sort(key=lambda x: key_for_numerically_sort(x[0]))\n return res\n", "path": "lmfdb/utils/names_and_urls.py"}]}
| 2,691 | 196 |
gh_patches_debug_36634
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4289
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check: CKV_GCP_19: "Ensure GKE basic auth is disabled"
**Describe the issue**
The default for this is disabled yet the alert keeps firing.
**Examples**
Please share an example code sample (in the IaC of your choice) + the expected outcomes.
**Version (please complete the following information):**
- 2.2.255
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of checkov/terraform/checks/resource/gcp/GKEBasicAuth.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3 from typing import List
4
5
6 class GKEBasicAuth(BaseResourceCheck):
7 def __init__(self):
8 name = "Ensure GKE basic auth is disabled"
9 id = "CKV_GCP_19"
10 supported_resources = ['google_container_cluster']
11 categories = [CheckCategories.KUBERNETES]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 """
16 Looks for password configuration at azure_instance:
17 https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html
18 :param conf: google_compute_ssl_policy configuration
19 :return: <CheckResult>
20 """
21 if 'master_auth' in conf.keys():
22 username = conf['master_auth'][0].get('username')
23 password = conf['master_auth'][0].get('password')
24 if username or password:
25 # only if both are set to the empty string it is fine
26 # https://www.terraform.io/docs/providers/google/r/container_cluster.html
27 if username and password:
28 if username[0] == '' and password[0] == '':
29 return CheckResult.PASSED
30 return CheckResult.FAILED
31 return CheckResult.PASSED
32 return CheckResult.FAILED
33
34 def get_evaluated_keys(self) -> List[str]:
35 return ['master_auth/[0]/username', 'master_auth/[0]/password']
36
37
38 check = GKEBasicAuth()
39
[end of checkov/terraform/checks/resource/gcp/GKEBasicAuth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py
--- a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py
+++ b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py
@@ -1,37 +1,36 @@
+from __future__ import annotations
+
+from typing import Any
+
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
-from typing import List
class GKEBasicAuth(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure GKE basic auth is disabled"
id = "CKV_GCP_19"
- supported_resources = ['google_container_cluster']
- categories = [CheckCategories.KUBERNETES]
+ supported_resources = ('google_container_cluster',)
+ categories = (CheckCategories.KUBERNETES,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
- """
- Looks for password configuration at azure_instance:
- https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html
- :param conf: google_compute_ssl_policy configuration
- :return: <CheckResult>
- """
- if 'master_auth' in conf.keys():
- username = conf['master_auth'][0].get('username')
- password = conf['master_auth'][0].get('password')
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ # since GKE 1.19 the usage of basic auth is deprecated and in the provider version 4+ removed
+ master_auth = conf.get("master_auth")
+ if master_auth and isinstance(master_auth, list):
+ username = master_auth[0].get('username')
+ password = master_auth[0].get('password')
if username or password:
# only if both are set to the empty string it is fine
- # https://www.terraform.io/docs/providers/google/r/container_cluster.html
+ # https://registry.terraform.io/providers/hashicorp/google/3.90.1/docs/resources/container_cluster.html
if username and password:
if username[0] == '' and password[0] == '':
return CheckResult.PASSED
return CheckResult.FAILED
- return CheckResult.PASSED
- return CheckResult.FAILED
- def get_evaluated_keys(self) -> List[str]:
+ return CheckResult.PASSED
+
+ def get_evaluated_keys(self) -> list[str]:
return ['master_auth/[0]/username', 'master_auth/[0]/password']
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n--- a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n+++ b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n@@ -1,37 +1,36 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n-from typing import List\n \n \n class GKEBasicAuth(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure GKE basic auth is disabled\"\n id = \"CKV_GCP_19\"\n- supported_resources = ['google_container_cluster']\n- categories = [CheckCategories.KUBERNETES]\n+ supported_resources = ('google_container_cluster',)\n+ categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n- \"\"\"\n- Looks for password configuration at azure_instance:\n- https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html\n- :param conf: google_compute_ssl_policy configuration\n- :return: <CheckResult>\n- \"\"\"\n- if 'master_auth' in conf.keys():\n- username = conf['master_auth'][0].get('username')\n- password = conf['master_auth'][0].get('password')\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ # since GKE 1.19 the usage of basic auth is deprecated and in the provider version 4+ removed\n+ master_auth = conf.get(\"master_auth\")\n+ if master_auth and isinstance(master_auth, list):\n+ username = master_auth[0].get('username')\n+ password = master_auth[0].get('password')\n if username or password:\n # only if both are set to the empty string it is fine\n- # https://www.terraform.io/docs/providers/google/r/container_cluster.html\n+ # https://registry.terraform.io/providers/hashicorp/google/3.90.1/docs/resources/container_cluster.html\n if username and password:\n if username[0] == '' and password[0] == '':\n return CheckResult.PASSED\n return CheckResult.FAILED\n- return CheckResult.PASSED\n- return CheckResult.FAILED\n \n- def get_evaluated_keys(self) -> List[str]:\n+ return CheckResult.PASSED\n+\n+ def get_evaluated_keys(self) -> list[str]:\n return ['master_auth/[0]/username', 'master_auth/[0]/password']\n", "issue": "Check: CKV_GCP_19: \"Ensure GKE basic auth is disabled\"\n**Describe the issue**\r\nThe default for this is disabled yet the alert keeps firing. \r\n\r\n**Examples**\r\nPlease share an example code sample (in the IaC of your choice) + the expected outcomes.\r\n\r\n**Version (please complete the following information):**\r\n- 2.2.255\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom typing import List\n\n\nclass GKEBasicAuth(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure GKE basic auth is disabled\"\n id = \"CKV_GCP_19\"\n supported_resources = ['google_container_cluster']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for password configuration at azure_instance:\n https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html\n :param conf: google_compute_ssl_policy configuration\n :return: <CheckResult>\n \"\"\"\n if 'master_auth' in conf.keys():\n username = conf['master_auth'][0].get('username')\n password = conf['master_auth'][0].get('password')\n if username or password:\n # only if both are set to the empty string it is fine\n # https://www.terraform.io/docs/providers/google/r/container_cluster.html\n if username and password:\n if username[0] == '' and password[0] == '':\n return CheckResult.PASSED\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n def get_evaluated_keys(self) -> List[str]:\n return ['master_auth/[0]/username', 'master_auth/[0]/password']\n\n\ncheck = GKEBasicAuth()\n", "path": "checkov/terraform/checks/resource/gcp/GKEBasicAuth.py"}]}
| 1,063 | 628 |
gh_patches_debug_21720
|
rasdani/github-patches
|
git_diff
|
deeppavlov__DeepPavlov-676
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make ROOT_PATH, MODELS_PATH and DOWNLOADS_PATH environment variables
All config files I've seen so far have the following variables:
```
"ROOT_PATH": "~/.deeppavlov",
"DOWNLOADS_PATH": "{ROOT_PATH}/downloads",
"MODELS_PATH": "{ROOT_PATH}/models"
```
Should we make them environment variables?
This would be better for the following reasons:
1. No need to define the same variables across all configs
1. No need to redefine variables for your system. For example, I do not store source code and downloads/models at same place (cause of their huge size), so I need to change configs for me and change them _back_ to make PR (that is kind of ridiculous). If these variables were in the environment, I'd have to change them only one time - after deeppavlov installation.
1. Make configs working directory independent (no '~/'-paths)
</issue>
<code>
[start of deeppavlov/core/commands/utils.py]
1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from pathlib import Path
15 from typing import Union, Dict, TypeVar
16
17 from deeppavlov.core.common.file import read_json, find_config
18
19 # noinspection PyShadowingBuiltins
20 _T = TypeVar('_T', str, float, bool, list, dict)
21
22
23 def _parse_config_property(item: _T, variables: Dict[str, Union[str, Path, float, bool, None]]) -> _T:
24 """Recursively apply config's variables values to its property"""
25 if isinstance(item, str):
26 return item.format(**variables)
27 elif isinstance(item, list):
28 return [_parse_config_property(item, variables) for item in item]
29 elif isinstance(item, dict):
30 return {k: _parse_config_property(v, variables) for k, v in item.items()}
31 else:
32 return item
33
34
35 def parse_config(config: Union[str, Path, dict]) -> dict:
36 """Read config's variables and apply their values to all its properties"""
37 if isinstance(config, (str, Path)):
38 config = read_json(find_config(config))
39
40 variables = {
41 'DEEPPAVLOV_PATH': Path(__file__).parent.parent.parent
42 }
43 for name, value in config.get('metadata', {}).get('variables', {}).items():
44 variables[name] = value.format(**variables)
45
46 return _parse_config_property(config, variables)
47
48
49 def expand_path(path: Union[str, Path]) -> Path:
50 """Convert relative paths to absolute with resolving user directory."""
51 return Path(path).expanduser().resolve()
52
53
54 def import_packages(packages: list) -> None:
55 """Import packages from list to execute their code."""
56 for package in packages:
57 __import__(package)
58
[end of deeppavlov/core/commands/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deeppavlov/core/commands/utils.py b/deeppavlov/core/commands/utils.py
--- a/deeppavlov/core/commands/utils.py
+++ b/deeppavlov/core/commands/utils.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
from pathlib import Path
from typing import Union, Dict, TypeVar
@@ -38,9 +39,12 @@
config = read_json(find_config(config))
variables = {
- 'DEEPPAVLOV_PATH': Path(__file__).parent.parent.parent
+ 'DEEPPAVLOV_PATH': os.getenv(f'DP_DEEPPAVLOV_PATH', Path(__file__).parent.parent.parent)
}
for name, value in config.get('metadata', {}).get('variables', {}).items():
+ env_name = f'DP_{name}'
+ if env_name in os.environ:
+ value = os.getenv(env_name)
variables[name] = value.format(**variables)
return _parse_config_property(config, variables)
|
{"golden_diff": "diff --git a/deeppavlov/core/commands/utils.py b/deeppavlov/core/commands/utils.py\n--- a/deeppavlov/core/commands/utils.py\n+++ b/deeppavlov/core/commands/utils.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+import os\n from pathlib import Path\n from typing import Union, Dict, TypeVar\n \n@@ -38,9 +39,12 @@\n config = read_json(find_config(config))\n \n variables = {\n- 'DEEPPAVLOV_PATH': Path(__file__).parent.parent.parent\n+ 'DEEPPAVLOV_PATH': os.getenv(f'DP_DEEPPAVLOV_PATH', Path(__file__).parent.parent.parent)\n }\n for name, value in config.get('metadata', {}).get('variables', {}).items():\n+ env_name = f'DP_{name}'\n+ if env_name in os.environ:\n+ value = os.getenv(env_name)\n variables[name] = value.format(**variables)\n \n return _parse_config_property(config, variables)\n", "issue": "Make ROOT_PATH, MODELS_PATH and DOWNLOADS_PATH environment variables\nAll config files I've seen so far have the following variables:\r\n```\r\n\"ROOT_PATH\": \"~/.deeppavlov\",\r\n\"DOWNLOADS_PATH\": \"{ROOT_PATH}/downloads\",\r\n\"MODELS_PATH\": \"{ROOT_PATH}/models\"\r\n```\r\nShould we make them environment variables?\r\nThis would be better for the following reasons:\r\n1. No need to define the same variables across all configs\r\n1. No need to redefine variables for your system. For example, I do not store source code and downloads/models at same place (cause of their huge size), so I need to change configs for me and change them _back_ to make PR (that is kind of ridiculous). If these variables were in the environment, I'd have to change them only one time - after deeppavlov installation.\r\n1. Make configs working directory independent (no '~/'-paths)\n", "before_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom pathlib import Path\nfrom typing import Union, Dict, TypeVar\n\nfrom deeppavlov.core.common.file import read_json, find_config\n\n# noinspection PyShadowingBuiltins\n_T = TypeVar('_T', str, float, bool, list, dict)\n\n\ndef _parse_config_property(item: _T, variables: Dict[str, Union[str, Path, float, bool, None]]) -> _T:\n \"\"\"Recursively apply config's variables values to its property\"\"\"\n if isinstance(item, str):\n return item.format(**variables)\n elif isinstance(item, list):\n return [_parse_config_property(item, variables) for item in item]\n elif isinstance(item, dict):\n return {k: _parse_config_property(v, variables) for k, v in item.items()}\n else:\n return item\n\n\ndef parse_config(config: Union[str, Path, dict]) -> dict:\n \"\"\"Read config's variables and apply their values to all its properties\"\"\"\n if isinstance(config, (str, Path)):\n config = read_json(find_config(config))\n\n variables = {\n 'DEEPPAVLOV_PATH': Path(__file__).parent.parent.parent\n }\n for name, value in config.get('metadata', {}).get('variables', {}).items():\n variables[name] = value.format(**variables)\n\n return _parse_config_property(config, variables)\n\n\ndef expand_path(path: Union[str, Path]) -> Path:\n \"\"\"Convert relative paths to absolute with resolving user directory.\"\"\"\n return Path(path).expanduser().resolve()\n\n\ndef import_packages(packages: list) -> None:\n \"\"\"Import packages from list to execute their code.\"\"\"\n for package in packages:\n __import__(package)\n", "path": "deeppavlov/core/commands/utils.py"}]}
| 1,340 | 264 |
gh_patches_debug_37231
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-982
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extra files in source distribution packaging
`python setup.py sdist` builds source distribution with docs and tests and bunch of random files in it, that should not be included. Strangely `bdist_wheel` does not include those and generates proper release file.
https://github.com/conda-forge/datacube-feedstock/issues/25
</issue>
<code>
[start of datacube/utils/xarray_geoextensions.py]
1 """
2 Add geometric extensions to :class:`xarray.Dataset` and :class:`xarray.DataArray` for use
3 with Data Cube by Monkey Patching those classes.
4
5 This extension is reliant on an `xarray` object having a `.crs` property of type
6 :class:`datacube.utils.geometry.CRS`. This is used to inspect the spatial dimensions of the
7 :class:`Dataset` or :class:`DataArray`, and provide new attributes for accessing a
8 :class:`datacube.utils.geometry.GeoBox`, affine transform and extent for the dataset as
9 `.geobox`, `.affine` and `.extent` respectively.
10
11 """
12
13 import xarray
14
15 from datacube.utils import geometry, spatial_dims
16 from datacube.utils.math import affine_from_axis
17
18
19 def _norm_crs(crs):
20 if crs is None or isinstance(crs, geometry.CRS):
21 return crs
22 elif isinstance(crs, str):
23 return geometry.CRS(crs)
24 else:
25 raise ValueError('Can not interpret {} as CRS'.format(type(crs)))
26
27
28 def _get_crs_from_attrs(obj, sdims):
29 """ Looks for attribute named `crs` containing CRS string
30 1. Checks spatials coords attrs
31 2. Checks data variable attrs
32 3. Checks dataset attrs
33
34 Returns
35 =======
36 Content for `.attrs[crs]` usually it's a string
37 None if not present in any of the places listed above
38 """
39 if isinstance(obj, xarray.Dataset):
40 if len(obj.data_vars) > 0:
41 data_array = next(iter(obj.data_vars.values()))
42 else:
43 # fall back option
44 return obj.attrs.get('crs', None)
45 else:
46 data_array = obj
47
48 crs_set = set(data_array[d].attrs.get('crs', None) for d in sdims)
49 crs = None
50 if len(crs_set) > 1:
51 raise ValueError('Spatial dimensions have different crs.')
52 elif len(crs_set) == 1:
53 crs = crs_set.pop()
54
55 if crs is None:
56 # fall back option
57 crs = data_array.attrs.get('crs', None) or obj.attrs.get('crs', None)
58 return crs
59
60
61 def _get_crs_from_coord(obj, mode='strict'):
62 """ Looks for dimensionless coordinate with `spatial_ref` attribute.
63
64 obj: Dataset | DataArray
65 mode: strict|any|all
66 strict -- raise Error if multiple candidates
67 any -- return first one
68 all -- return a list of all found CRSs
69
70 Returns
71 =======
72 None - if none found
73 crs:str - if found one
74 crs:str - if found several but mode is any
75
76 (crs: str, crs: str) - if found several and mode=all
77 """
78 grid_mapping = obj.attrs.get('grid_mapping', None)
79
80 # First check CF convention "pointer"
81 if grid_mapping is not None and grid_mapping in obj.coords:
82 coord = obj.coords[grid_mapping]
83 spatial_ref = coord.attrs.get('spatial_ref', None)
84 if spatial_ref is not None:
85 return spatial_ref
86 else:
87 raise ValueError(f"Coordinate '{grid_mapping}' has no `spatial_ref` attribute")
88
89 # No explicit `grid_mapping` find some "CRS" coordinate
90 candidates = tuple(coord.attrs['spatial_ref'] for coord in obj.coords.values()
91 if coord.ndim == 0 and 'spatial_ref' in coord.attrs)
92
93 if len(candidates) == 0:
94 return None
95 if len(candidates) == 1:
96 return candidates[0]
97
98 if mode == 'strict':
99 raise ValueError("Too many candidates when looking for CRS")
100 elif mode == 'all':
101 return candidates
102 elif mode == 'any':
103 return candidates[0]
104 else:
105 raise ValueError(f"Mode needs to be: strict|any|all got {mode}")
106
107
108 def _xarray_affine_impl(obj):
109 sdims = spatial_dims(obj, relaxed=True)
110 if sdims is None:
111 return None, None
112
113 yy, xx = (obj[dim] for dim in sdims)
114 fallback_res = (coord.attrs.get('resolution', None) for coord in (xx, yy))
115
116 return affine_from_axis(xx.values, yy.values, fallback_res), sdims
117
118
119 def _xarray_affine(obj):
120 transform, _ = _xarray_affine_impl(obj)
121 return transform
122
123
124 def _xarray_extent(obj):
125 geobox = obj.geobox
126 return None if geobox is None else geobox.extent
127
128
129 def _xarray_geobox(obj):
130 transform, sdims = _xarray_affine_impl(obj)
131 if sdims is None:
132 return None
133
134 crs = None
135 try:
136 crs = _get_crs_from_coord(obj)
137 except ValueError:
138 pass
139
140 if crs is None:
141 try:
142 crs = _get_crs_from_attrs(obj, sdims)
143 except ValueError:
144 pass
145
146 if crs is None:
147 return None
148
149 try:
150 crs = _norm_crs(crs)
151 except ValueError:
152 return None
153
154 h, w = (obj.coords[dim].size for dim in sdims)
155
156 return geometry.GeoBox(w, h, transform, crs)
157
158
159 xarray.Dataset.geobox = property(_xarray_geobox) # type: ignore
160 xarray.Dataset.affine = property(_xarray_affine) # type: ignore
161 xarray.Dataset.extent = property(_xarray_extent) # type: ignore
162 xarray.DataArray.geobox = property(_xarray_geobox) # type: ignore
163 xarray.DataArray.affine = property(_xarray_affine) # type: ignore
164 xarray.DataArray.extent = property(_xarray_extent) # type: ignore
165
[end of datacube/utils/xarray_geoextensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/datacube/utils/xarray_geoextensions.py b/datacube/utils/xarray_geoextensions.py
--- a/datacube/utils/xarray_geoextensions.py
+++ b/datacube/utils/xarray_geoextensions.py
@@ -9,9 +9,8 @@
`.geobox`, `.affine` and `.extent` respectively.
"""
-
+import warnings
import xarray
-
from datacube.utils import geometry, spatial_dims
from datacube.utils.math import affine_from_axis
@@ -27,34 +26,48 @@
def _get_crs_from_attrs(obj, sdims):
""" Looks for attribute named `crs` containing CRS string
- 1. Checks spatials coords attrs
- 2. Checks data variable attrs
- 3. Checks dataset attrs
+ - Checks spatials coords attrs
+ - Checks data variable attrs
+ - Checks dataset attrs
Returns
=======
Content for `.attrs[crs]` usually it's a string
None if not present in any of the places listed above
"""
- if isinstance(obj, xarray.Dataset):
- if len(obj.data_vars) > 0:
- data_array = next(iter(obj.data_vars.values()))
+ crs_set = set()
+
+ def _add_candidate(crs):
+ if crs is None:
+ return
+ if isinstance(crs, str):
+ crs_set.add(crs)
else:
- # fall back option
- return obj.attrs.get('crs', None)
+ warnings.warn(f"Ignoring crs attribute of type: {type(crs)}")
+
+ def process_attrs(attrs):
+ _add_candidate(attrs.get('crs', None))
+ _add_candidate(attrs.get('crs_wkt', None))
+
+ def process_datavar(x):
+ process_attrs(x.attrs)
+ for dim in sdims:
+ if dim in x.coords:
+ process_attrs(x.coords[dim].attrs)
+
+ if isinstance(obj, xarray.Dataset):
+ process_attrs(obj.attrs)
+ for dv in obj.data_vars.values():
+ process_datavar(dv)
else:
- data_array = obj
+ process_datavar(obj)
- crs_set = set(data_array[d].attrs.get('crs', None) for d in sdims)
crs = None
if len(crs_set) > 1:
raise ValueError('Spatial dimensions have different crs.')
elif len(crs_set) == 1:
crs = crs_set.pop()
- if crs is None:
- # fall back option
- crs = data_array.attrs.get('crs', None) or obj.attrs.get('crs', None)
return crs
@@ -148,7 +161,8 @@
try:
crs = _norm_crs(crs)
- except ValueError:
+ except (ValueError, geometry.CRSError):
+ warnings.warn(f"Encountered malformed CRS: {crs}")
return None
h, w = (obj.coords[dim].size for dim in sdims)
|
{"golden_diff": "diff --git a/datacube/utils/xarray_geoextensions.py b/datacube/utils/xarray_geoextensions.py\n--- a/datacube/utils/xarray_geoextensions.py\n+++ b/datacube/utils/xarray_geoextensions.py\n@@ -9,9 +9,8 @@\n `.geobox`, `.affine` and `.extent` respectively.\n \n \"\"\"\n-\n+import warnings\n import xarray\n-\n from datacube.utils import geometry, spatial_dims\n from datacube.utils.math import affine_from_axis\n \n@@ -27,34 +26,48 @@\n \n def _get_crs_from_attrs(obj, sdims):\n \"\"\" Looks for attribute named `crs` containing CRS string\n- 1. Checks spatials coords attrs\n- 2. Checks data variable attrs\n- 3. Checks dataset attrs\n+ - Checks spatials coords attrs\n+ - Checks data variable attrs\n+ - Checks dataset attrs\n \n Returns\n =======\n Content for `.attrs[crs]` usually it's a string\n None if not present in any of the places listed above\n \"\"\"\n- if isinstance(obj, xarray.Dataset):\n- if len(obj.data_vars) > 0:\n- data_array = next(iter(obj.data_vars.values()))\n+ crs_set = set()\n+\n+ def _add_candidate(crs):\n+ if crs is None:\n+ return\n+ if isinstance(crs, str):\n+ crs_set.add(crs)\n else:\n- # fall back option\n- return obj.attrs.get('crs', None)\n+ warnings.warn(f\"Ignoring crs attribute of type: {type(crs)}\")\n+\n+ def process_attrs(attrs):\n+ _add_candidate(attrs.get('crs', None))\n+ _add_candidate(attrs.get('crs_wkt', None))\n+\n+ def process_datavar(x):\n+ process_attrs(x.attrs)\n+ for dim in sdims:\n+ if dim in x.coords:\n+ process_attrs(x.coords[dim].attrs)\n+\n+ if isinstance(obj, xarray.Dataset):\n+ process_attrs(obj.attrs)\n+ for dv in obj.data_vars.values():\n+ process_datavar(dv)\n else:\n- data_array = obj\n+ process_datavar(obj)\n \n- crs_set = set(data_array[d].attrs.get('crs', None) for d in sdims)\n crs = None\n if len(crs_set) > 1:\n raise ValueError('Spatial dimensions have different crs.')\n elif len(crs_set) == 1:\n crs = crs_set.pop()\n \n- if crs is None:\n- # fall back option\n- crs = data_array.attrs.get('crs', None) or obj.attrs.get('crs', None)\n return crs\n \n \n@@ -148,7 +161,8 @@\n \n try:\n crs = _norm_crs(crs)\n- except ValueError:\n+ except (ValueError, geometry.CRSError):\n+ warnings.warn(f\"Encountered malformed CRS: {crs}\")\n return None\n \n h, w = (obj.coords[dim].size for dim in sdims)\n", "issue": "Extra files in source distribution packaging\n`python setup.py sdist` builds source distribution with docs and tests and bunch of random files in it, that should not be included. Strangely `bdist_wheel` does not include those and generates proper release file.\r\n\r\nhttps://github.com/conda-forge/datacube-feedstock/issues/25\n", "before_files": [{"content": "\"\"\"\nAdd geometric extensions to :class:`xarray.Dataset` and :class:`xarray.DataArray` for use\nwith Data Cube by Monkey Patching those classes.\n\nThis extension is reliant on an `xarray` object having a `.crs` property of type\n:class:`datacube.utils.geometry.CRS`. This is used to inspect the spatial dimensions of the\n:class:`Dataset` or :class:`DataArray`, and provide new attributes for accessing a\n:class:`datacube.utils.geometry.GeoBox`, affine transform and extent for the dataset as\n`.geobox`, `.affine` and `.extent` respectively.\n\n\"\"\"\n\nimport xarray\n\nfrom datacube.utils import geometry, spatial_dims\nfrom datacube.utils.math import affine_from_axis\n\n\ndef _norm_crs(crs):\n if crs is None or isinstance(crs, geometry.CRS):\n return crs\n elif isinstance(crs, str):\n return geometry.CRS(crs)\n else:\n raise ValueError('Can not interpret {} as CRS'.format(type(crs)))\n\n\ndef _get_crs_from_attrs(obj, sdims):\n \"\"\" Looks for attribute named `crs` containing CRS string\n 1. Checks spatials coords attrs\n 2. Checks data variable attrs\n 3. Checks dataset attrs\n\n Returns\n =======\n Content for `.attrs[crs]` usually it's a string\n None if not present in any of the places listed above\n \"\"\"\n if isinstance(obj, xarray.Dataset):\n if len(obj.data_vars) > 0:\n data_array = next(iter(obj.data_vars.values()))\n else:\n # fall back option\n return obj.attrs.get('crs', None)\n else:\n data_array = obj\n\n crs_set = set(data_array[d].attrs.get('crs', None) for d in sdims)\n crs = None\n if len(crs_set) > 1:\n raise ValueError('Spatial dimensions have different crs.')\n elif len(crs_set) == 1:\n crs = crs_set.pop()\n\n if crs is None:\n # fall back option\n crs = data_array.attrs.get('crs', None) or obj.attrs.get('crs', None)\n return crs\n\n\ndef _get_crs_from_coord(obj, mode='strict'):\n \"\"\" Looks for dimensionless coordinate with `spatial_ref` attribute.\n\n obj: Dataset | DataArray\n mode: strict|any|all\n strict -- raise Error if multiple candidates\n any -- return first one\n all -- return a list of all found CRSs\n\n Returns\n =======\n None - if none found\n crs:str - if found one\n crs:str - if found several but mode is any\n\n (crs: str, crs: str) - if found several and mode=all\n \"\"\"\n grid_mapping = obj.attrs.get('grid_mapping', None)\n\n # First check CF convention \"pointer\"\n if grid_mapping is not None and grid_mapping in obj.coords:\n coord = obj.coords[grid_mapping]\n spatial_ref = coord.attrs.get('spatial_ref', None)\n if spatial_ref is not None:\n return spatial_ref\n else:\n raise ValueError(f\"Coordinate '{grid_mapping}' has no `spatial_ref` attribute\")\n\n # No explicit `grid_mapping` find some \"CRS\" coordinate\n candidates = tuple(coord.attrs['spatial_ref'] for coord in obj.coords.values()\n if coord.ndim == 0 and 'spatial_ref' in coord.attrs)\n\n if len(candidates) == 0:\n return None\n if len(candidates) == 1:\n return candidates[0]\n\n if mode == 'strict':\n raise ValueError(\"Too many candidates when looking for CRS\")\n elif mode == 'all':\n return candidates\n elif mode == 'any':\n return candidates[0]\n else:\n raise ValueError(f\"Mode needs to be: strict|any|all got {mode}\")\n\n\ndef _xarray_affine_impl(obj):\n sdims = spatial_dims(obj, relaxed=True)\n if sdims is None:\n return None, None\n\n yy, xx = (obj[dim] for dim in sdims)\n fallback_res = (coord.attrs.get('resolution', None) for coord in (xx, yy))\n\n return affine_from_axis(xx.values, yy.values, fallback_res), sdims\n\n\ndef _xarray_affine(obj):\n transform, _ = _xarray_affine_impl(obj)\n return transform\n\n\ndef _xarray_extent(obj):\n geobox = obj.geobox\n return None if geobox is None else geobox.extent\n\n\ndef _xarray_geobox(obj):\n transform, sdims = _xarray_affine_impl(obj)\n if sdims is None:\n return None\n\n crs = None\n try:\n crs = _get_crs_from_coord(obj)\n except ValueError:\n pass\n\n if crs is None:\n try:\n crs = _get_crs_from_attrs(obj, sdims)\n except ValueError:\n pass\n\n if crs is None:\n return None\n\n try:\n crs = _norm_crs(crs)\n except ValueError:\n return None\n\n h, w = (obj.coords[dim].size for dim in sdims)\n\n return geometry.GeoBox(w, h, transform, crs)\n\n\nxarray.Dataset.geobox = property(_xarray_geobox) # type: ignore\nxarray.Dataset.affine = property(_xarray_affine) # type: ignore\nxarray.Dataset.extent = property(_xarray_extent) # type: ignore\nxarray.DataArray.geobox = property(_xarray_geobox) # type: ignore\nxarray.DataArray.affine = property(_xarray_affine) # type: ignore\nxarray.DataArray.extent = property(_xarray_extent) # type: ignore\n", "path": "datacube/utils/xarray_geoextensions.py"}]}
| 2,305 | 690 |
gh_patches_debug_13961
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-2083
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[TODO][CI][pip upstream changes] Fix failing nightlies running against `pip`'s `main` branch
Failure example: https://github.com/jazzband/pip-tools/actions/runs/8794562108/job/24134206791
</issue>
<code>
[start of piptools/exceptions.py]
1 from __future__ import annotations
2
3 from typing import Iterable
4
5 from pip._internal.index.package_finder import PackageFinder
6 from pip._internal.models.candidate import InstallationCandidate
7 from pip._internal.req import InstallRequirement
8 from pip._internal.utils.misc import redact_auth_from_url
9
10
11 class PipToolsError(Exception):
12 pass
13
14
15 class NoCandidateFound(PipToolsError):
16 def __init__(
17 self,
18 ireq: InstallRequirement,
19 candidates_tried: Iterable[InstallationCandidate],
20 finder: PackageFinder,
21 ) -> None:
22 self.ireq = ireq
23 self.candidates_tried = candidates_tried
24 self.finder = finder
25
26 def __str__(self) -> str:
27 versions = []
28 pre_versions = []
29
30 for candidate in sorted(self.candidates_tried):
31 version = str(candidate.version)
32 if candidate.version.is_prerelease:
33 pre_versions.append(version)
34 else:
35 versions.append(version)
36
37 lines = [f"Could not find a version that matches {self.ireq}"]
38
39 if versions:
40 lines.append(f"Tried: {', '.join(versions)}")
41
42 if pre_versions:
43 if self.finder.allow_all_prereleases:
44 line = "Tried"
45 else:
46 line = "Skipped"
47
48 line += f" pre-versions: {', '.join(pre_versions)}"
49 lines.append(line)
50
51 if versions or pre_versions:
52 lines.append(
53 "There are incompatible versions in the resolved dependencies:"
54 )
55 source_ireqs = getattr(self.ireq, "_source_ireqs", [])
56 lines.extend(f" {ireq}" for ireq in source_ireqs)
57 else:
58 redacted_urls = tuple(
59 redact_auth_from_url(url) for url in self.finder.index_urls
60 )
61 lines.append("No versions found")
62 lines.append(
63 "{} {} reachable?".format(
64 "Were" if len(redacted_urls) > 1 else "Was",
65 " or ".join(redacted_urls),
66 )
67 )
68 return "\n".join(lines)
69
70
71 class IncompatibleRequirements(PipToolsError):
72 def __init__(self, ireq_a: InstallRequirement, ireq_b: InstallRequirement) -> None:
73 self.ireq_a = ireq_a
74 self.ireq_b = ireq_b
75
76 def __str__(self) -> str:
77 message = "Incompatible requirements found: {} and {}"
78 return message.format(self.ireq_a, self.ireq_b)
79
[end of piptools/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/piptools/exceptions.py b/piptools/exceptions.py
--- a/piptools/exceptions.py
+++ b/piptools/exceptions.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import operator
from typing import Iterable
from pip._internal.index.package_finder import PackageFinder
@@ -27,7 +28,9 @@
versions = []
pre_versions = []
- for candidate in sorted(self.candidates_tried):
+ for candidate in sorted(
+ self.candidates_tried, key=operator.attrgetter("version")
+ ):
version = str(candidate.version)
if candidate.version.is_prerelease:
pre_versions.append(version)
|
{"golden_diff": "diff --git a/piptools/exceptions.py b/piptools/exceptions.py\n--- a/piptools/exceptions.py\n+++ b/piptools/exceptions.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+import operator\n from typing import Iterable\n \n from pip._internal.index.package_finder import PackageFinder\n@@ -27,7 +28,9 @@\n versions = []\n pre_versions = []\n \n- for candidate in sorted(self.candidates_tried):\n+ for candidate in sorted(\n+ self.candidates_tried, key=operator.attrgetter(\"version\")\n+ ):\n version = str(candidate.version)\n if candidate.version.is_prerelease:\n pre_versions.append(version)\n", "issue": "[TODO][CI][pip upstream changes] Fix failing nightlies running against `pip`'s `main` branch\nFailure example: https://github.com/jazzband/pip-tools/actions/runs/8794562108/job/24134206791\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Iterable\n\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.models.candidate import InstallationCandidate\nfrom pip._internal.req import InstallRequirement\nfrom pip._internal.utils.misc import redact_auth_from_url\n\n\nclass PipToolsError(Exception):\n pass\n\n\nclass NoCandidateFound(PipToolsError):\n def __init__(\n self,\n ireq: InstallRequirement,\n candidates_tried: Iterable[InstallationCandidate],\n finder: PackageFinder,\n ) -> None:\n self.ireq = ireq\n self.candidates_tried = candidates_tried\n self.finder = finder\n\n def __str__(self) -> str:\n versions = []\n pre_versions = []\n\n for candidate in sorted(self.candidates_tried):\n version = str(candidate.version)\n if candidate.version.is_prerelease:\n pre_versions.append(version)\n else:\n versions.append(version)\n\n lines = [f\"Could not find a version that matches {self.ireq}\"]\n\n if versions:\n lines.append(f\"Tried: {', '.join(versions)}\")\n\n if pre_versions:\n if self.finder.allow_all_prereleases:\n line = \"Tried\"\n else:\n line = \"Skipped\"\n\n line += f\" pre-versions: {', '.join(pre_versions)}\"\n lines.append(line)\n\n if versions or pre_versions:\n lines.append(\n \"There are incompatible versions in the resolved dependencies:\"\n )\n source_ireqs = getattr(self.ireq, \"_source_ireqs\", [])\n lines.extend(f\" {ireq}\" for ireq in source_ireqs)\n else:\n redacted_urls = tuple(\n redact_auth_from_url(url) for url in self.finder.index_urls\n )\n lines.append(\"No versions found\")\n lines.append(\n \"{} {} reachable?\".format(\n \"Were\" if len(redacted_urls) > 1 else \"Was\",\n \" or \".join(redacted_urls),\n )\n )\n return \"\\n\".join(lines)\n\n\nclass IncompatibleRequirements(PipToolsError):\n def __init__(self, ireq_a: InstallRequirement, ireq_b: InstallRequirement) -> None:\n self.ireq_a = ireq_a\n self.ireq_b = ireq_b\n\n def __str__(self) -> str:\n message = \"Incompatible requirements found: {} and {}\"\n return message.format(self.ireq_a, self.ireq_b)\n", "path": "piptools/exceptions.py"}]}
| 1,301 | 155 |
gh_patches_debug_32237
|
rasdani/github-patches
|
git_diff
|
dmlc__dgl-5059
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Sparse] Create a mock implementation in mock_sparse for BSDDMM.
## 🔨Work Item
**IMPORTANT:**
* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.
* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.
Project tracker: https://github.com/orgs/dmlc/projects/2
## Description
<!-- short description of the work item -->
## Depending work items or issues
<!-- what must be done before this -->
</issue>
<code>
[start of python/dgl/mock_sparse/sddmm.py]
1 """Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module."""
2 import torch
3
4 from .sp_matrix import SparseMatrix
5
6 __all__ = ["sddmm"]
7
8
9 def sddmm(
10 A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor
11 ) -> SparseMatrix:
12 r"""Sampled-Dense-Dense Matrix Multiplication (SDDMM).
13
14 ``sddmm`` multiplies two dense matrices :attr:``mat1`` and :attr:``mat2``
15 at the nonzero locations of sparse matrix :attr:``A``. Values of :attr:``A``
16 is added to the resulting matrix.
17
18 Mathematically ``sddmm`` is formulated as:
19
20 .. math::
21 out = (mat1 @ mat2) * spy(A) + A
22
23 Parameters
24 ----------
25 A : SparseMatrix
26 Sparse matrix of shape `(M, N)`.
27 mat1 : Tensor
28 Dense matrix of shape `(M, K)`
29 mat2 : Tensor
30 Dense matrix of shape `(K, N)`
31
32 Returns
33 -------
34 SparseMatrix
35 Sparse matrix of shape `(M, N)`.
36
37 Examples
38 --------
39
40 >>> row = torch.Tensor([1, 1, 2])
41 >>> col = torch.Tensor([2, 3, 3])
42 >>> val = torch.arange(1, 4).float()
43 >>> A = SparseMatrix(row, col, val, (3, 4))
44 >>> mat1 = torch.randn(3, 5)
45 >>> mat2 = torch.randn(5, 4)
46 >>> dgl.mock_sparse.sddmm(A, mat1, mat2)
47 SparseMatrix(indices=tensor([[1, 1, 2],
48 [2, 3, 3]]),
49 values=tensor([1.8035, 2.3375, 3.1255]),
50 shape=(3, 4), nnz=3)
51 """
52 assert A.val.dim() == 1, (
53 f"Nonzero elements have values of shape ({A.val.shape[1]}). Expects "
54 "scalar values. "
55 )
56 # PyTorch's sddmm operator only supports CSR format.
57 res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)
58 return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)
59
[end of python/dgl/mock_sparse/sddmm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/dgl/mock_sparse/sddmm.py b/python/dgl/mock_sparse/sddmm.py
--- a/python/dgl/mock_sparse/sddmm.py
+++ b/python/dgl/mock_sparse/sddmm.py
@@ -1,9 +1,9 @@
"""Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module."""
import torch
-from .sp_matrix import SparseMatrix
+from .sp_matrix import create_from_coo, SparseMatrix
-__all__ = ["sddmm"]
+__all__ = ["sddmm", "mock_bsddmm"]
def sddmm(
@@ -56,3 +56,56 @@
# PyTorch's sddmm operator only supports CSR format.
res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)
return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)
+
+
+def mock_bsddmm(
+ A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor
+) -> SparseMatrix:
+ r"""Batched Sampled-Dense-Dense Matrix Multiplication (SDDMM).
+
+ ``bsddmm`` conducts `sddmm` for each batch of the two dense matrices
+ independently.
+
+ In particular, :attr:``mat1`` and :attr:``mat2`` can be 2-D, which will be
+ reshape as `(B, M, 1)` and `(B, 1, K)` in the computation.
+
+ Parameters
+ ----------
+ A : SparseMatrix
+ Sparse matrix of shape `(M, N)`.
+ mat1 : Tensor
+ Dense matrix of shape `(B, M, K)` or `(B, M,)`
+ mat2 : Tensor
+ Dense matrix of shape `(B, K, N)` or `(B, K,)`
+
+ Returns
+ -------
+ SparseMatrix
+ Sparse matrix of shape `(M, N)` with non-zero values of `B` dimension.
+
+ Examples
+ --------
+
+ >>> row = torch.tensor([1, 1, 2])
+ >>> col = torch.tensor([2, 3, 3])
+ >>> val = torch.arange(1, 4).float()
+ >>> A = create_from_coo(row, col, val, (3, 4))
+ >>> mat1 = torch.randn(2, 3, 5)
+ >>> mat2 = torch.randn(2, 5, 4)
+ >>> dgl.mock_sparse.mock_bsddmm(A, mat1, mat2)
+ SparseMatrix(indices=tensor([[1, 1, 2],
+ [2, 3, 3]]),
+ values=tensor([[-0.6765, -0.4017],
+ [ 3.3290, 6.9016],
+ [ 4.8184, 5.8882]]),
+ shape=(3, 4), nnz=3)
+ """
+ batch_mat1 = [mat1[i, ...] for i in range(mat1.shape[0])]
+ batch_mat2 = [mat2[i, ...] for i in range(mat2.shape[0])]
+ batch_ret = [sddmm(A, lhs, rhs) for lhs, rhs in zip(batch_mat1, batch_mat2)]
+ return create_from_coo(
+ row=A.row,
+ col=A.col,
+ val=torch.stack([sp_mat.val for sp_mat in batch_ret], dim=-1),
+ shape=A.shape,
+ )
|
{"golden_diff": "diff --git a/python/dgl/mock_sparse/sddmm.py b/python/dgl/mock_sparse/sddmm.py\n--- a/python/dgl/mock_sparse/sddmm.py\n+++ b/python/dgl/mock_sparse/sddmm.py\n@@ -1,9 +1,9 @@\n \"\"\"Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module.\"\"\"\n import torch\n \n-from .sp_matrix import SparseMatrix\n+from .sp_matrix import create_from_coo, SparseMatrix\n \n-__all__ = [\"sddmm\"]\n+__all__ = [\"sddmm\", \"mock_bsddmm\"]\n \n \n def sddmm(\n@@ -56,3 +56,56 @@\n # PyTorch's sddmm operator only supports CSR format.\n res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)\n return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)\n+\n+\n+def mock_bsddmm(\n+ A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor\n+) -> SparseMatrix:\n+ r\"\"\"Batched Sampled-Dense-Dense Matrix Multiplication (SDDMM).\n+\n+ ``bsddmm`` conducts `sddmm` for each batch of the two dense matrices\n+ independently.\n+\n+ In particular, :attr:``mat1`` and :attr:``mat2`` can be 2-D, which will be\n+ reshape as `(B, M, 1)` and `(B, 1, K)` in the computation.\n+\n+ Parameters\n+ ----------\n+ A : SparseMatrix\n+ Sparse matrix of shape `(M, N)`.\n+ mat1 : Tensor\n+ Dense matrix of shape `(B, M, K)` or `(B, M,)`\n+ mat2 : Tensor\n+ Dense matrix of shape `(B, K, N)` or `(B, K,)`\n+\n+ Returns\n+ -------\n+ SparseMatrix\n+ Sparse matrix of shape `(M, N)` with non-zero values of `B` dimension.\n+\n+ Examples\n+ --------\n+\n+ >>> row = torch.tensor([1, 1, 2])\n+ >>> col = torch.tensor([2, 3, 3])\n+ >>> val = torch.arange(1, 4).float()\n+ >>> A = create_from_coo(row, col, val, (3, 4))\n+ >>> mat1 = torch.randn(2, 3, 5)\n+ >>> mat2 = torch.randn(2, 5, 4)\n+ >>> dgl.mock_sparse.mock_bsddmm(A, mat1, mat2)\n+ SparseMatrix(indices=tensor([[1, 1, 2],\n+ [2, 3, 3]]),\n+ values=tensor([[-0.6765, -0.4017],\n+ [ 3.3290, 6.9016],\n+ [ 4.8184, 5.8882]]),\n+ shape=(3, 4), nnz=3)\n+ \"\"\"\n+ batch_mat1 = [mat1[i, ...] for i in range(mat1.shape[0])]\n+ batch_mat2 = [mat2[i, ...] for i in range(mat2.shape[0])]\n+ batch_ret = [sddmm(A, lhs, rhs) for lhs, rhs in zip(batch_mat1, batch_mat2)]\n+ return create_from_coo(\n+ row=A.row,\n+ col=A.col,\n+ val=torch.stack([sp_mat.val for sp_mat in batch_ret], dim=-1),\n+ shape=A.shape,\n+ )\n", "issue": "[Sparse] Create a mock implementation in mock_sparse for BSDDMM.\n## \ud83d\udd28Work Item\r\n\r\n**IMPORTANT:**\r\n* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.\r\n* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.\r\n\r\nProject tracker: https://github.com/orgs/dmlc/projects/2\r\n\r\n## Description\r\n\r\n<!-- short description of the work item -->\r\n\r\n## Depending work items or issues\r\n\r\n<!-- what must be done before this -->\r\n\n", "before_files": [{"content": "\"\"\"Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module.\"\"\"\nimport torch\n\nfrom .sp_matrix import SparseMatrix\n\n__all__ = [\"sddmm\"]\n\n\ndef sddmm(\n A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor\n) -> SparseMatrix:\n r\"\"\"Sampled-Dense-Dense Matrix Multiplication (SDDMM).\n\n ``sddmm`` multiplies two dense matrices :attr:``mat1`` and :attr:``mat2``\n at the nonzero locations of sparse matrix :attr:``A``. Values of :attr:``A``\n is added to the resulting matrix.\n\n Mathematically ``sddmm`` is formulated as:\n\n .. math::\n out = (mat1 @ mat2) * spy(A) + A\n\n Parameters\n ----------\n A : SparseMatrix\n Sparse matrix of shape `(M, N)`.\n mat1 : Tensor\n Dense matrix of shape `(M, K)`\n mat2 : Tensor\n Dense matrix of shape `(K, N)`\n\n Returns\n -------\n SparseMatrix\n Sparse matrix of shape `(M, N)`.\n\n Examples\n --------\n\n >>> row = torch.Tensor([1, 1, 2])\n >>> col = torch.Tensor([2, 3, 3])\n >>> val = torch.arange(1, 4).float()\n >>> A = SparseMatrix(row, col, val, (3, 4))\n >>> mat1 = torch.randn(3, 5)\n >>> mat2 = torch.randn(5, 4)\n >>> dgl.mock_sparse.sddmm(A, mat1, mat2)\n SparseMatrix(indices=tensor([[1, 1, 2],\n [2, 3, 3]]),\n values=tensor([1.8035, 2.3375, 3.1255]),\n shape=(3, 4), nnz=3)\n \"\"\"\n assert A.val.dim() == 1, (\n f\"Nonzero elements have values of shape ({A.val.shape[1]}). Expects \"\n \"scalar values. \"\n )\n # PyTorch's sddmm operator only supports CSR format.\n res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)\n return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)\n", "path": "python/dgl/mock_sparse/sddmm.py"}]}
| 1,311 | 812 |
gh_patches_debug_26211
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-111151
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sharded checkpointing fails on load for certain tensor sizes
### 🐛 Describe the bug
Sharded checkpointing (particularly with FSDP for the optimizer state) uses ChunkShardingSpec to save/load tensors. ChunkShardingSpec's behavior is similar to torch.chunk and will result in some chunks of size 0.
This can be reproduced by trying to save a tensor of size 6 with 4 gpus. This tensor is sharded across the first 3 gpus. The resulting size of the chunks will look like [2, 2, 2, 0]. On save, it seems like ChunkShardingSpec is aware of which gpus contain shards, so it saves the tensor with shard metadata showing the size of chunks to be [2, 2, 2].
The problem occurs when attempting to load the sharded checkpoint. ChunkShardingSpec attempts to rebuild the metadata, this time being unaware of how many gpus originally contained shards. It knows that there is a tensor of size 6 and 4 gpus though, so it generates shard metadata with chunk sizes [2, 2, 2], [skipping the last gpu since it has size 0](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L94). Then when attempting to shard the tensor, the 4th gpu has no shard metadata, so a [local_tensor is never created](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L165), resulting in an [assertion error on the 4th rank](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L172) and a [type error on all other ranks](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/distributed_c10d.py#L808) because None is contained in the scatter_list.
There are a couple possible solutions to this.
1. Add shardmetadata for all gpus that allow for a tensor to be size 0
2. Change ChunkShardingSpec to distribute a tensor evenly across gpus (e.g. [2, 2, 1, 1] instead of [2, 2, 2, 0])
I've implemented and tested both solutions and both are backwards compatible with previously saved sharded checkpoints on versions 2.0.1, 2.1.0-rc3, and 8/27 nightly (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.0.1 with the new ChunkShardingSpec). Both solutions are also cross-version compatible for 2.0.1->2.1.0-rc3, 2.0.1->8/27 nightly, and 8/27 nightly->2.0.1 (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.1.0 with the new ChunkShardingSpec). The solutions might be version/cross-version compatible for other combinations, but I haven't tested those.
### Versions
This happens with pytorch 2.0.1, 2.1.0-rc3, and 8/27 nightly.
cc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu
</issue>
<code>
[start of torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py]
1 from dataclasses import dataclass
2 import torch
3 import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
4 from torch.distributed._shard.metadata import ShardMetadata
5 from torch.distributed._shard.sharded_tensor.shard import Shard
6 from torch.distributed._shard.sharded_tensor.utils import (
7 _parse_and_validate_remote_device
8 )
9 from torch.distributed._shard._utils import narrow_tensor
10 import torch.distributed as dist
11 import torch.distributed.distributed_c10d as distributed_c10d
12 from typing import List, Union, TYPE_CHECKING
13 from ._internals import (
14 get_chunked_dim_size,
15 get_split_size,
16 )
17
18 from .api import ShardingSpec
19
20 if TYPE_CHECKING:
21 # Only include ShardedTensor when do type checking, exclude it
22 # from run-time to resolve circular dependency.
23 from torch.distributed._shard.sharded_tensor import ShardedTensor
24
25 @dataclass
26 class ChunkShardingSpec(ShardingSpec):
27 """
28 This is a type of PlacementSpec that defines the placement as being sharded
29 across multiple devices. In particular, it represents sharding a Tensor
30 along a single dimension into equal chunks (similar to :meth:`torch.chunk`).
31
32 The semantics of how a tensor is partitioned is inline with
33 :meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the
34 specified ``dim`` and ``chunks`` in torch.chunk is the number of elements
35 in the placement specified.
36
37 Args:
38 dim (int or str):
39 The dimension to shard on, could be an integer representing the
40 dimension or a string in case of named tensors where dimensions are
41 named. Note that named tensor support is not added yet.
42 placement(List[Union[_remote_device, str]]):
43 Specifies the placement of each shard of the Tensor. The size of
44 the list represents the number of shards to be created. This could
45 be a list of
46 :class:`torch.distributed._remote_device`'s. This list
47 could also contain a string which represents remote
48 device as accepted by
49 :class:`torch.distributed._remote_device`
50 """
51
52 ShardingDim = Union[int, str]
53
54 dim: ShardingDim
55 placements: List[Union[torch.distributed._remote_device, str]]
56
57 def __post_init__(self):
58 self._verify_dim(self.dim)
59 for i, remote_device in enumerate(self.placements):
60 if not isinstance(remote_device, torch.distributed._remote_device):
61 self.placements[i] = torch.distributed._remote_device(remote_device)
62
63 @staticmethod
64 def _verify_dim(dim):
65 # Validate the sharding spec.
66 # TODO: support named dimension
67 if isinstance(dim, str):
68 raise NotImplementedError(
69 "ChunkShardingSpec does not support named dimension yet!"
70 )
71
72 if not isinstance(dim, int):
73 raise ValueError(
74 f"Sharding dim needs to be an integer, found: {dim}"
75 )
76
77 def build_metadata(self,
78 tensor_sizes: torch.Size,
79 tensor_properties: sharded_tensor_meta.TensorProperties,
80 ) -> sharded_tensor_meta.ShardedTensorMetadata:
81 tensor_num_dim = len(tensor_sizes)
82
83 self._verify_dim(self.dim)
84 if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]
85 raise ValueError(f"Invalid sharding dim: {self.dim}")
86
87 shards_metadata = []
88 sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]
89 chunks = len(self.placements)
90 split_size = get_split_size(sharding_dim_size, chunks)
91 for idx, placement in enumerate(self.placements):
92 # generate ShardMetadata for each placement device
93 chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
94 if chunked_dim_size > 0:
95 shard_size = list(tensor_sizes)
96 current_offsets = [0] * tensor_num_dim
97 current_offsets[self.dim] = split_size * idx # type: ignore[index]
98 shard_size[self.dim] = chunked_dim_size # type: ignore[index]
99
100 shard_metadata = ShardMetadata(
101 shard_offsets=current_offsets,
102 shard_sizes=shard_size,
103 placement=placement,
104 )
105 shards_metadata.append(shard_metadata)
106
107 # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]
108
109 return sharded_tensor_meta.ShardedTensorMetadata(
110 shards_metadata,
111 tensor_sizes,
112 tensor_properties
113 )
114
115
116 def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
117 """
118 Args:
119 src_rank: group rank relative to ``process_group``
120
121 N.B. If ``process_group`` is None, ``src_rank`` is a global rank.
122 """
123 # relative imports to avoid circular dependency
124 from torch.distributed._shard.sharded_tensor import (
125 ShardedTensor
126 )
127 tensor_properties = sharded_tensor_meta.TensorProperties(
128 dtype=tensor.dtype,
129 layout=tensor.layout,
130 requires_grad=tensor.requires_grad,
131 memory_format=torch.contiguous_format,
132 pin_memory=tensor.is_pinned()
133 )
134 current_rank = dist.get_rank(process_group)
135 tensor_meta = self.build_metadata(tensor.size(), tensor_properties)
136 local_shards = []
137 local_tensor = None
138 local_metadata = None
139 tensors_to_scatter = [None] * dist.get_world_size(process_group)
140
141 sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]
142 chunks = len(self.placements)
143 split_size = get_split_size(sharding_dim_size, chunks)
144 scatter_shape = list(tensor.size())
145 scatter_shape[self.dim] = split_size # type: ignore[index]
146
147 for shard_meta in tensor_meta.shards_metadata:
148 rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)
149 if current_rank == src_rank:
150 # Reshape to get shard for this rank and we don't want autograd
151 # recording here for the narrow op and 'local_shard' should be a
152 # leaf variable in the autograd graph.
153 narrowed_tensor = narrow_tensor(tensor, shard_meta)
154 if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]
155 # for the last shard that might be smaller to other shards
156 # resize the narrowed tensor to the same size and use it for
157 # the scatter collective as dist.scatter requires same size
158 # inputs on every rank
159 tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape)
160 else:
161 tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()
162
163 tensors_to_scatter[rank] = tensor_to_scatter
164
165 if current_rank == rank:
166 local_tensor = torch.empty(
167 scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)
168 local_metadata = shard_meta
169
170 # each rank should have local_tensor and local_metadata initialized if we build
171 # the metadata list in a correct way.
172 assert local_tensor is not None
173 assert local_metadata is not None
174
175 # Scatter the shards to all ranks in the pg
176 # scatter takes the global rank as ``src``
177 src_for_scatter = src_rank
178 if process_group is not None and process_group is not distributed_c10d._get_default_group():
179 src_for_scatter = distributed_c10d.get_global_rank(process_group, src_for_scatter)
180
181 dist.scatter(
182 local_tensor,
183 scatter_list=tensors_to_scatter if current_rank == src_rank else None,
184 src=src_for_scatter,
185 group=process_group
186 )
187
188 if list(local_tensor.size()) != local_metadata.shard_sizes:
189 # detach again after receiving to ensure local shards remain a leaf node
190 local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()
191
192 # Sync requires_grad to local_shard.
193 local_tensor.requires_grad = tensor.requires_grad
194
195 local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))
196
197 st = ShardedTensor._init_from_local_shards_and_global_metadata(
198 local_shards,
199 tensor_meta,
200 process_group=process_group)
201
202 # Manually set sharding_spec
203 st._sharding_spec = self
204
205 return st
206
[end of torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
@@ -91,20 +91,17 @@
for idx, placement in enumerate(self.placements):
# generate ShardMetadata for each placement device
chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
- if chunked_dim_size > 0:
- shard_size = list(tensor_sizes)
- current_offsets = [0] * tensor_num_dim
- current_offsets[self.dim] = split_size * idx # type: ignore[index]
- shard_size[self.dim] = chunked_dim_size # type: ignore[index]
-
- shard_metadata = ShardMetadata(
- shard_offsets=current_offsets,
- shard_sizes=shard_size,
- placement=placement,
- )
- shards_metadata.append(shard_metadata)
-
- # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]
+ shard_size = list(tensor_sizes)
+ current_offsets = [0] * tensor_num_dim
+ current_offsets[self.dim] = split_size * idx # type: ignore[index]
+ shard_size[self.dim] = chunked_dim_size # type: ignore[index]
+
+ shard_metadata = ShardMetadata(
+ shard_offsets=current_offsets,
+ shard_sizes=shard_size,
+ placement=placement,
+ )
+ shards_metadata.append(shard_metadata)
return sharded_tensor_meta.ShardedTensorMetadata(
shards_metadata,
|
{"golden_diff": "diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py\n--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py\n+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py\n@@ -91,20 +91,17 @@\n for idx, placement in enumerate(self.placements):\n # generate ShardMetadata for each placement device\n chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)\n- if chunked_dim_size > 0:\n- shard_size = list(tensor_sizes)\n- current_offsets = [0] * tensor_num_dim\n- current_offsets[self.dim] = split_size * idx # type: ignore[index]\n- shard_size[self.dim] = chunked_dim_size # type: ignore[index]\n-\n- shard_metadata = ShardMetadata(\n- shard_offsets=current_offsets,\n- shard_sizes=shard_size,\n- placement=placement,\n- )\n- shards_metadata.append(shard_metadata)\n-\n- # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]\n+ shard_size = list(tensor_sizes)\n+ current_offsets = [0] * tensor_num_dim\n+ current_offsets[self.dim] = split_size * idx # type: ignore[index]\n+ shard_size[self.dim] = chunked_dim_size # type: ignore[index]\n+\n+ shard_metadata = ShardMetadata(\n+ shard_offsets=current_offsets,\n+ shard_sizes=shard_size,\n+ placement=placement,\n+ )\n+ shards_metadata.append(shard_metadata)\n \n return sharded_tensor_meta.ShardedTensorMetadata(\n shards_metadata,\n", "issue": "Sharded checkpointing fails on load for certain tensor sizes\n### \ud83d\udc1b Describe the bug\n\nSharded checkpointing (particularly with FSDP for the optimizer state) uses ChunkShardingSpec to save/load tensors. ChunkShardingSpec's behavior is similar to torch.chunk and will result in some chunks of size 0.\r\n\r\nThis can be reproduced by trying to save a tensor of size 6 with 4 gpus. This tensor is sharded across the first 3 gpus. The resulting size of the chunks will look like [2, 2, 2, 0]. On save, it seems like ChunkShardingSpec is aware of which gpus contain shards, so it saves the tensor with shard metadata showing the size of chunks to be [2, 2, 2].\r\n\r\nThe problem occurs when attempting to load the sharded checkpoint. ChunkShardingSpec attempts to rebuild the metadata, this time being unaware of how many gpus originally contained shards. It knows that there is a tensor of size 6 and 4 gpus though, so it generates shard metadata with chunk sizes [2, 2, 2], [skipping the last gpu since it has size 0](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L94). Then when attempting to shard the tensor, the 4th gpu has no shard metadata, so a [local_tensor is never created](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L165), resulting in an [assertion error on the 4th rank](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L172) and a [type error on all other ranks](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/distributed_c10d.py#L808) because None is contained in the scatter_list.\r\n\r\nThere are a couple possible solutions to this.\r\n1. Add shardmetadata for all gpus that allow for a tensor to be size 0\r\n2. Change ChunkShardingSpec to distribute a tensor evenly across gpus (e.g. [2, 2, 1, 1] instead of [2, 2, 2, 0])\r\n\r\nI've implemented and tested both solutions and both are backwards compatible with previously saved sharded checkpoints on versions 2.0.1, 2.1.0-rc3, and 8/27 nightly (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.0.1 with the new ChunkShardingSpec). Both solutions are also cross-version compatible for 2.0.1->2.1.0-rc3, 2.0.1->8/27 nightly, and 8/27 nightly->2.0.1 (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.1.0 with the new ChunkShardingSpec). The solutions might be version/cross-version compatible for other combinations, but I haven't tested those.\n\n### Versions\n\nThis happens with pytorch 2.0.1, 2.1.0-rc3, and 8/27 nightly.\n\ncc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu\n", "before_files": [{"content": "from dataclasses import dataclass\nimport torch\nimport torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta\nfrom torch.distributed._shard.metadata import ShardMetadata\nfrom torch.distributed._shard.sharded_tensor.shard import Shard\nfrom torch.distributed._shard.sharded_tensor.utils import (\n _parse_and_validate_remote_device\n)\nfrom torch.distributed._shard._utils import narrow_tensor\nimport torch.distributed as dist\nimport torch.distributed.distributed_c10d as distributed_c10d\nfrom typing import List, Union, TYPE_CHECKING\nfrom ._internals import (\n get_chunked_dim_size,\n get_split_size,\n)\n\nfrom .api import ShardingSpec\n\nif TYPE_CHECKING:\n # Only include ShardedTensor when do type checking, exclude it\n # from run-time to resolve circular dependency.\n from torch.distributed._shard.sharded_tensor import ShardedTensor\n\n@dataclass\nclass ChunkShardingSpec(ShardingSpec):\n \"\"\"\n This is a type of PlacementSpec that defines the placement as being sharded\n across multiple devices. In particular, it represents sharding a Tensor\n along a single dimension into equal chunks (similar to :meth:`torch.chunk`).\n\n The semantics of how a tensor is partitioned is inline with\n :meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the\n specified ``dim`` and ``chunks`` in torch.chunk is the number of elements\n in the placement specified.\n\n Args:\n dim (int or str):\n The dimension to shard on, could be an integer representing the\n dimension or a string in case of named tensors where dimensions are\n named. Note that named tensor support is not added yet.\n placement(List[Union[_remote_device, str]]):\n Specifies the placement of each shard of the Tensor. The size of\n the list represents the number of shards to be created. This could\n be a list of\n :class:`torch.distributed._remote_device`'s. This list\n could also contain a string which represents remote\n device as accepted by\n :class:`torch.distributed._remote_device`\n \"\"\"\n\n ShardingDim = Union[int, str]\n\n dim: ShardingDim\n placements: List[Union[torch.distributed._remote_device, str]]\n\n def __post_init__(self):\n self._verify_dim(self.dim)\n for i, remote_device in enumerate(self.placements):\n if not isinstance(remote_device, torch.distributed._remote_device):\n self.placements[i] = torch.distributed._remote_device(remote_device)\n\n @staticmethod\n def _verify_dim(dim):\n # Validate the sharding spec.\n # TODO: support named dimension\n if isinstance(dim, str):\n raise NotImplementedError(\n \"ChunkShardingSpec does not support named dimension yet!\"\n )\n\n if not isinstance(dim, int):\n raise ValueError(\n f\"Sharding dim needs to be an integer, found: {dim}\"\n )\n\n def build_metadata(self,\n tensor_sizes: torch.Size,\n tensor_properties: sharded_tensor_meta.TensorProperties,\n ) -> sharded_tensor_meta.ShardedTensorMetadata:\n tensor_num_dim = len(tensor_sizes)\n\n self._verify_dim(self.dim)\n if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]\n raise ValueError(f\"Invalid sharding dim: {self.dim}\")\n\n shards_metadata = []\n sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]\n chunks = len(self.placements)\n split_size = get_split_size(sharding_dim_size, chunks)\n for idx, placement in enumerate(self.placements):\n # generate ShardMetadata for each placement device\n chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)\n if chunked_dim_size > 0:\n shard_size = list(tensor_sizes)\n current_offsets = [0] * tensor_num_dim\n current_offsets[self.dim] = split_size * idx # type: ignore[index]\n shard_size[self.dim] = chunked_dim_size # type: ignore[index]\n\n shard_metadata = ShardMetadata(\n shard_offsets=current_offsets,\n shard_sizes=shard_size,\n placement=placement,\n )\n shards_metadata.append(shard_metadata)\n\n # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]\n\n return sharded_tensor_meta.ShardedTensorMetadata(\n shards_metadata,\n tensor_sizes,\n tensor_properties\n )\n\n\n def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> \"ShardedTensor\":\n \"\"\"\n Args:\n src_rank: group rank relative to ``process_group``\n\n N.B. If ``process_group`` is None, ``src_rank`` is a global rank.\n \"\"\"\n # relative imports to avoid circular dependency\n from torch.distributed._shard.sharded_tensor import (\n ShardedTensor\n )\n tensor_properties = sharded_tensor_meta.TensorProperties(\n dtype=tensor.dtype,\n layout=tensor.layout,\n requires_grad=tensor.requires_grad,\n memory_format=torch.contiguous_format,\n pin_memory=tensor.is_pinned()\n )\n current_rank = dist.get_rank(process_group)\n tensor_meta = self.build_metadata(tensor.size(), tensor_properties)\n local_shards = []\n local_tensor = None\n local_metadata = None\n tensors_to_scatter = [None] * dist.get_world_size(process_group)\n\n sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]\n chunks = len(self.placements)\n split_size = get_split_size(sharding_dim_size, chunks)\n scatter_shape = list(tensor.size())\n scatter_shape[self.dim] = split_size # type: ignore[index]\n\n for shard_meta in tensor_meta.shards_metadata:\n rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)\n if current_rank == src_rank:\n # Reshape to get shard for this rank and we don't want autograd\n # recording here for the narrow op and 'local_shard' should be a\n # leaf variable in the autograd graph.\n narrowed_tensor = narrow_tensor(tensor, shard_meta)\n if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]\n # for the last shard that might be smaller to other shards\n # resize the narrowed tensor to the same size and use it for\n # the scatter collective as dist.scatter requires same size\n # inputs on every rank\n tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape)\n else:\n tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()\n\n tensors_to_scatter[rank] = tensor_to_scatter\n\n if current_rank == rank:\n local_tensor = torch.empty(\n scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)\n local_metadata = shard_meta\n\n # each rank should have local_tensor and local_metadata initialized if we build\n # the metadata list in a correct way.\n assert local_tensor is not None\n assert local_metadata is not None\n\n # Scatter the shards to all ranks in the pg\n # scatter takes the global rank as ``src``\n src_for_scatter = src_rank\n if process_group is not None and process_group is not distributed_c10d._get_default_group():\n src_for_scatter = distributed_c10d.get_global_rank(process_group, src_for_scatter)\n\n dist.scatter(\n local_tensor,\n scatter_list=tensors_to_scatter if current_rank == src_rank else None,\n src=src_for_scatter,\n group=process_group\n )\n\n if list(local_tensor.size()) != local_metadata.shard_sizes:\n # detach again after receiving to ensure local shards remain a leaf node\n local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()\n\n # Sync requires_grad to local_shard.\n local_tensor.requires_grad = tensor.requires_grad\n\n local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))\n\n st = ShardedTensor._init_from_local_shards_and_global_metadata(\n local_shards,\n tensor_meta,\n process_group=process_group)\n\n # Manually set sharding_spec\n st._sharding_spec = self\n\n return st\n", "path": "torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py"}]}
| 3,805 | 386 |
gh_patches_debug_64879
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-5396
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
protobuf error on clean streamlit install of 1.12.3
### Summary
I installed streamlit, and couldn't use it due to a reported error from protobuf. Protobuf version 3.20.2 was installed automatically along with streamlit.
### Steps to reproduce
Code snippet:
```
pip install streamlit
streamlit run something.py
```
Error
```
streamlit run app.py
Traceback (most recent call last):
File "/private/tmp/tttt/.direnv/python-3.10.3/bin/streamlit", line 5, in <module>
from streamlit.web.cli import main
File "/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/streamlit/__init__.py", line 48, in <module>
from streamlit.proto.RootContainer_pb2 import RootContainer
File "/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/streamlit/proto/RootContainer_pb2.py", line 6, in <module>
from google.protobuf import descriptor as _descriptor
File "/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/google/protobuf/descriptor.py", line 47, in <module>
from google.protobuf.pyext import _message
ImportError: dlopen(/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/google/protobuf/pyext/_message.cpython-310-darwin.so, 0x0002): symbol not found in flat namespace (__ZN6google8protobuf15FieldDescriptor12TypeOnceInitEPKS1_)
```
This was solved by downgrading protobuf
```
pip install protobuf==3.19.4
```
(Please provide a code snippet! This will help expedite us finding and solving the problem.)
If applicable, please provide the steps we should take to reproduce the bug:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
**Expected behavior:**
App opens
**Actual behavior:**
Error message
### Is this a regression?
Yes
### Debug info
- Streamlit version: (get it with `$ streamlit version`)
```
In [4]: version._get_installed_streamlit_version()
Out[4]: <Version('1.12.3.dev20220919')>
```
- Python version: (get it with `$ python --version`) 3.10.3
- Using Conda? PipEnv? PyEnv? Pex? pyenv
- OS version: OSX
- Browser version:
### Additional information
Came from https://discuss.streamlit.io/t/streamlit-hello-returns-importerror/30929/3?u=blackary
</issue>
<code>
[start of lib/setup.py]
1 # Copyright 2018-2022 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import setuptools
17 import sys
18
19 from setuptools.command.install import install
20
21 VERSION = "1.12.2" # PEP-440
22
23 NAME = "streamlit"
24
25 DESCRIPTION = "The fastest way to build data apps in Python"
26
27 LONG_DESCRIPTION = (
28 "Streamlit's open-source app framework is the easiest way "
29 "for data scientists and machine learning engineers to "
30 "create beautiful, performant apps in only a few hours! "
31 "All in pure Python. All for free."
32 )
33
34 # IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.
35 # And if you do add one, make the required version as general as possible.
36 # But include relevant lower bounds for any features we use from our dependencies.
37 INSTALL_REQUIRES = [
38 "altair>=3.2.0",
39 "blinker>=1.0.0",
40 "cachetools>=4.0",
41 "click>=7.0",
42 # 1.4 introduced the functionality found in python 3.8's importlib.metadata module
43 "importlib-metadata>=1.4",
44 "numpy",
45 "packaging>=14.1",
46 "pandas>=0.21.0",
47 "pillow>=6.2.0",
48 "protobuf<4,>=3.12",
49 "pyarrow>=4.0",
50 "pydeck>=0.1.dev5",
51 "pympler>=0.9",
52 "python-dateutil",
53 "requests>=2.4",
54 "rich>=10.11.0",
55 "semver",
56 "toml",
57 # 5.0 has a fix for etag header: https://github.com/tornadoweb/tornado/issues/2262
58 "tornado>=5.0",
59 "typing-extensions>=3.10.0.0",
60 "tzlocal>=1.1",
61 "validators>=0.2",
62 # Don't require watchdog on MacOS, since it'll fail without xcode tools.
63 # Without watchdog, we fallback to a polling file watcher to check for app changes.
64 "watchdog; platform_system != 'Darwin'",
65 ]
66
67 # We want to exclude some dependencies in our internal conda distribution of
68 # Streamlit.
69 CONDA_OPTIONAL_DEPENDENCIES = [
70 "gitpython!=3.1.19",
71 ]
72
73 # NOTE: ST_CONDA_BUILD is used here (even though CONDA_BUILD is set
74 # automatically when using the `conda build` command) because the
75 # `load_setup_py_data()` conda build helper function does not have the
76 # CONDA_BUILD environment variable set when it runs to generate our build
77 # recipe from meta.yaml.
78 if not os.getenv("ST_CONDA_BUILD"):
79 INSTALL_REQUIRES.extend(CONDA_OPTIONAL_DEPENDENCIES)
80
81
82 class VerifyVersionCommand(install):
83 """Custom command to verify that the git tag matches our version"""
84
85 description = "verify that the git tag matches our version"
86
87 def run(self):
88 tag = os.getenv("CIRCLE_TAG")
89
90 if tag != VERSION:
91 info = "Git tag: {0} does not match the version of this app: {1}".format(
92 tag, VERSION
93 )
94 sys.exit(info)
95
96
97 setuptools.setup(
98 name=NAME,
99 version=VERSION,
100 description=DESCRIPTION,
101 long_description=LONG_DESCRIPTION,
102 url="https://streamlit.io",
103 project_urls={
104 "Source": "https://github.com/streamlit/streamlit",
105 },
106 author="Streamlit Inc",
107 author_email="[email protected]",
108 # We exclude Python 3.9.7 from our compatible versions due to a bug in that version
109 # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and
110 # https://bugs.python.org/issue45121
111 python_requires=">=3.7, !=3.9.7",
112 license="Apache 2",
113 # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html
114 package_data={"streamlit": ["py.typed", "hello/**/*.py"]},
115 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
116 # Requirements
117 install_requires=INSTALL_REQUIRES,
118 zip_safe=False, # install source files not egg
119 include_package_data=True, # copy html and friends
120 entry_points={"console_scripts": ["streamlit = streamlit.web.cli:main"]},
121 # For Windows so that streamlit * commands work ie.
122 # - streamlit version
123 # - streamlit hello
124 scripts=["bin/streamlit.cmd"],
125 cmdclass={
126 "verify": VerifyVersionCommand,
127 },
128 )
129
[end of lib/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/setup.py b/lib/setup.py
--- a/lib/setup.py
+++ b/lib/setup.py
@@ -45,7 +45,8 @@
"packaging>=14.1",
"pandas>=0.21.0",
"pillow>=6.2.0",
- "protobuf<4,>=3.12",
+ # protobuf 3.20.2 is broken: https://github.com/protocolbuffers/protobuf/issues/10571
+ "protobuf<4,>=3.12,!=3.20.2",
"pyarrow>=4.0",
"pydeck>=0.1.dev5",
"pympler>=0.9",
|
{"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -45,7 +45,8 @@\n \"packaging>=14.1\",\n \"pandas>=0.21.0\",\n \"pillow>=6.2.0\",\n- \"protobuf<4,>=3.12\",\n+ # protobuf 3.20.2 is broken: https://github.com/protocolbuffers/protobuf/issues/10571\n+ \"protobuf<4,>=3.12,!=3.20.2\",\n \"pyarrow>=4.0\",\n \"pydeck>=0.1.dev5\",\n \"pympler>=0.9\",\n", "issue": "protobuf error on clean streamlit install of 1.12.3\n### Summary\r\n\r\nI installed streamlit, and couldn't use it due to a reported error from protobuf. Protobuf version 3.20.2 was installed automatically along with streamlit.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\npip install streamlit\r\nstreamlit run something.py\r\n```\r\n\r\nError\r\n```\r\nstreamlit run app.py\r\n\r\nTraceback (most recent call last):\r\n File \"/private/tmp/tttt/.direnv/python-3.10.3/bin/streamlit\", line 5, in <module>\r\n from streamlit.web.cli import main\r\n File \"/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/streamlit/__init__.py\", line 48, in <module>\r\n from streamlit.proto.RootContainer_pb2 import RootContainer\r\n File \"/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/streamlit/proto/RootContainer_pb2.py\", line 6, in <module>\r\n from google.protobuf import descriptor as _descriptor\r\n File \"/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/google/protobuf/descriptor.py\", line 47, in <module>\r\n from google.protobuf.pyext import _message\r\nImportError: dlopen(/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/google/protobuf/pyext/_message.cpython-310-darwin.so, 0x0002): symbol not found in flat namespace (__ZN6google8protobuf15FieldDescriptor12TypeOnceInitEPKS1_)\r\n```\r\n\r\nThis was solved by downgrading protobuf \r\n```\r\npip install protobuf==3.19.4\r\n```\r\n(Please provide a code snippet! This will help expedite us finding and solving the problem.)\r\n\r\nIf applicable, please provide the steps we should take to reproduce the bug:\r\n\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n\r\n**Expected behavior:**\r\n\r\nApp opens\r\n\r\n**Actual behavior:**\r\n\r\nError message\r\n\r\n### Is this a regression?\r\n\r\nYes\r\n\r\n### Debug info\r\n\r\n- Streamlit version: (get it with `$ streamlit version`) \r\n```\r\nIn [4]: version._get_installed_streamlit_version()\r\nOut[4]: <Version('1.12.3.dev20220919')>\r\n```\r\n- Python version: (get it with `$ python --version`) 3.10.3\r\n- Using Conda? PipEnv? PyEnv? Pex? pyenv\r\n- OS version: OSX\r\n- Browser version: \r\n\r\n### Additional information\r\n\r\nCame from https://discuss.streamlit.io/t/streamlit-hello-returns-importerror/30929/3?u=blackary\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\nVERSION = \"1.12.2\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\n# IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.\n# And if you do add one, make the required version as general as possible.\n# But include relevant lower bounds for any features we use from our dependencies.\nINSTALL_REQUIRES = [\n \"altair>=3.2.0\",\n \"blinker>=1.0.0\",\n \"cachetools>=4.0\",\n \"click>=7.0\",\n # 1.4 introduced the functionality found in python 3.8's importlib.metadata module\n \"importlib-metadata>=1.4\",\n \"numpy\",\n \"packaging>=14.1\",\n \"pandas>=0.21.0\",\n \"pillow>=6.2.0\",\n \"protobuf<4,>=3.12\",\n \"pyarrow>=4.0\",\n \"pydeck>=0.1.dev5\",\n \"pympler>=0.9\",\n \"python-dateutil\",\n \"requests>=2.4\",\n \"rich>=10.11.0\",\n \"semver\",\n \"toml\",\n # 5.0 has a fix for etag header: https://github.com/tornadoweb/tornado/issues/2262\n \"tornado>=5.0\",\n \"typing-extensions>=3.10.0.0\",\n \"tzlocal>=1.1\",\n \"validators>=0.2\",\n # Don't require watchdog on MacOS, since it'll fail without xcode tools.\n # Without watchdog, we fallback to a polling file watcher to check for app changes.\n \"watchdog; platform_system != 'Darwin'\",\n]\n\n# We want to exclude some dependencies in our internal conda distribution of\n# Streamlit.\nCONDA_OPTIONAL_DEPENDENCIES = [\n \"gitpython!=3.1.19\",\n]\n\n# NOTE: ST_CONDA_BUILD is used here (even though CONDA_BUILD is set\n# automatically when using the `conda build` command) because the\n# `load_setup_py_data()` conda build helper function does not have the\n# CONDA_BUILD environment variable set when it runs to generate our build\n# recipe from meta.yaml.\nif not os.getenv(\"ST_CONDA_BUILD\"):\n INSTALL_REQUIRES.extend(CONDA_OPTIONAL_DEPENDENCIES)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n project_urls={\n \"Source\": \"https://github.com/streamlit/streamlit\",\n },\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n # We exclude Python 3.9.7 from our compatible versions due to a bug in that version\n # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and\n # https://bugs.python.org/issue45121\n python_requires=\">=3.7, !=3.9.7\",\n license=\"Apache 2\",\n # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html\n package_data={\"streamlit\": [\"py.typed\", \"hello/**/*.py\"]},\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=INSTALL_REQUIRES,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.web.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]}
| 2,614 | 164 |
gh_patches_debug_38783
|
rasdani/github-patches
|
git_diff
|
aws__aws-sam-cli-1030
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`sam build` for Gradle using gradlew does not work with --use-container
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed).
If you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->
### Description
Briefly describe the bug you are facing.
When a Gradle project uses `gradlew` scripts, this file does not get picked up with building inside a container.
### Steps to reproduce
Provide steps to replicate.
This integration test sets everything up to repro this issue - https://github.com/awslabs/aws-sam-cli/blob/develop/tests/integration/buildcmd/test_build_cmd.py#L256
### Observed result
Please provide command output with `--debug` flag set.
`gradle` installation within the container is used instead of the `gradlew` script
### Expected result
Describe what you expected.
`gradlew` script is used to build the project
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS:
2. `sam --version`:
`Add --debug flag to command you are running`
</issue>
<code>
[start of samcli/local/docker/lambda_build_container.py]
1 """
2 Represents Lambda Build Containers.
3 """
4
5 import json
6 import logging
7
8 try:
9 import pathlib
10 except ImportError:
11 import pathlib2 as pathlib
12
13 from .container import Container
14
15 LOG = logging.getLogger(__name__)
16
17
18 class LambdaBuildContainer(Container):
19 """
20 Class to manage Build containers that are capable of building AWS Lambda functions.
21 This container mounts necessary folders, issues a command to the Lambda Builder CLI,
22 and if the build was successful, copies back artifacts to the host filesystem
23 """
24
25 _IMAGE_REPO_NAME = "lambci/lambda"
26 _BUILDERS_EXECUTABLE = "lambda-builders"
27
28 def __init__(self, # pylint: disable=too-many-locals
29 protocol_version,
30 language,
31 dependency_manager,
32 application_framework,
33 source_dir,
34 manifest_path,
35 runtime,
36 optimizations=None,
37 options=None,
38 executable_search_paths=None,
39 log_level=None):
40
41 abs_manifest_path = pathlib.Path(manifest_path).resolve()
42 manifest_file_name = abs_manifest_path.name
43 manifest_dir = str(abs_manifest_path.parent)
44
45 source_dir = str(pathlib.Path(source_dir).resolve())
46
47 container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)
48
49 request_json = self._make_request(protocol_version,
50 language,
51 dependency_manager,
52 application_framework,
53 container_dirs,
54 manifest_file_name,
55 runtime,
56 optimizations,
57 options,
58 executable_search_paths)
59
60 image = LambdaBuildContainer._get_image(runtime)
61 entry = LambdaBuildContainer._get_entrypoint(request_json)
62 cmd = []
63
64 additional_volumes = {
65 # Manifest is mounted separately in order to support the case where manifest
66 # is outside of source directory
67 manifest_dir: {
68 "bind": container_dirs["manifest_dir"],
69 "mode": "ro"
70 }
71 }
72
73 env_vars = None
74 if log_level:
75 env_vars = {
76 "LAMBDA_BUILDERS_LOG_LEVEL": log_level
77 }
78
79 super(LambdaBuildContainer, self).__init__(
80 image,
81 cmd,
82 container_dirs["source_dir"],
83 source_dir,
84 additional_volumes=additional_volumes,
85 entrypoint=entry,
86 env_vars=env_vars)
87
88 @property
89 def executable_name(self):
90 return LambdaBuildContainer._BUILDERS_EXECUTABLE
91
92 @staticmethod
93 def _make_request(protocol_version,
94 language,
95 dependency_manager,
96 application_framework,
97 container_dirs,
98 manifest_file_name,
99 runtime,
100 optimizations,
101 options,
102 executable_search_paths):
103
104 return json.dumps({
105 "jsonschema": "2.0",
106 "id": 1,
107 "method": "LambdaBuilder.build",
108 "params": {
109 "__protocol_version": protocol_version,
110 "capability": {
111 "language": language,
112 "dependency_manager": dependency_manager,
113 "application_framework": application_framework
114 },
115 "source_dir": container_dirs["source_dir"],
116 "artifacts_dir": container_dirs["artifacts_dir"],
117 "scratch_dir": container_dirs["scratch_dir"],
118
119 # Path is always inside a Linux container. So '/' is valid
120 "manifest_path": "{}/{}".format(container_dirs["manifest_dir"], manifest_file_name),
121
122 "runtime": runtime,
123 "optimizations": optimizations,
124 "options": options,
125 "executable_search_paths": executable_search_paths
126 }
127 })
128
129 @staticmethod
130 def _get_entrypoint(request_json):
131 return [LambdaBuildContainer._BUILDERS_EXECUTABLE, request_json]
132
133 @staticmethod
134 def _get_container_dirs(source_dir, manifest_dir):
135 """
136 Provides paths to directories within the container that is required by the builder
137
138 Parameters
139 ----------
140 source_dir : str
141 Path to the function source code
142
143 manifest_dir : str
144 Path to the directory containing manifest
145
146 Returns
147 -------
148 dict
149 Contains paths to source, artifacts, scratch & manifest directories
150 """
151 base = "/tmp/samcli"
152 result = {
153 "source_dir": "{}/source".format(base),
154 "artifacts_dir": "{}/artifacts".format(base),
155 "scratch_dir": "{}/scratch".format(base),
156 "manifest_dir": "{}/manifest".format(base)
157 }
158
159 if pathlib.PurePath(source_dir) == pathlib.PurePath(manifest_dir):
160 # It is possible that the manifest resides within the source. In that case, we won't mount the manifest
161 # directory separately.
162 result["manifest_dir"] = result["source_dir"]
163
164 return result
165
166 @staticmethod
167 def _get_image(runtime):
168 return "{}:build-{}".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)
169
[end of samcli/local/docker/lambda_build_container.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/samcli/local/docker/lambda_build_container.py b/samcli/local/docker/lambda_build_container.py
--- a/samcli/local/docker/lambda_build_container.py
+++ b/samcli/local/docker/lambda_build_container.py
@@ -46,6 +46,18 @@
container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)
+ # `executable_search_paths` are provided as a list of paths on the host file system that needs to passed to
+ # the builder. But these paths don't exist within the container. We use the following method to convert the
+ # host paths to container paths. But if a host path is NOT mounted within the container, we will simply ignore
+ # it. In essence, only when the path is already in the mounted path, can the path resolver within the
+ # container even find the executable.
+ executable_search_paths = LambdaBuildContainer._convert_to_container_dirs(
+ host_paths_to_convert=executable_search_paths,
+ host_to_container_path_mapping={
+ source_dir: container_dirs["source_dir"],
+ manifest_dir: container_dirs["manifest_dir"]
+ })
+
request_json = self._make_request(protocol_version,
language,
dependency_manager,
@@ -163,6 +175,54 @@
return result
+ @staticmethod
+ def _convert_to_container_dirs(host_paths_to_convert, host_to_container_path_mapping):
+ """
+ Use this method to convert a list of host paths to a list of equivalent paths within the container
+ where the given host path is mounted. This is necessary when SAM CLI needs to pass path information to
+ the Lambda Builder running within the container.
+
+ If a host path is not mounted within the container, then this method simply passes the path to the result
+ without any changes.
+
+ Ex:
+ [ "/home/foo", "/home/bar", "/home/not/mounted"] => ["/tmp/source", "/tmp/manifest", "/home/not/mounted"]
+
+ Parameters
+ ----------
+ host_paths_to_convert : list
+ List of paths in host that needs to be converted
+
+ host_to_container_path_mapping : dict
+ Mapping of paths in host to the equivalent paths within the container
+
+ Returns
+ -------
+ list
+ Equivalent paths within the container
+ """
+
+ if not host_paths_to_convert:
+ # Nothing to do
+ return host_paths_to_convert
+
+ # Make sure the key is absolute host path. Relative paths are tricky to work with because two different
+ # relative paths can point to the same directory ("../foo", "../../foo")
+ mapping = {str(pathlib.Path(p).resolve()): v for p, v in host_to_container_path_mapping.items()}
+
+ result = []
+ for original_path in host_paths_to_convert:
+ abspath = str(pathlib.Path(original_path).resolve())
+
+ if abspath in mapping:
+ result.append(mapping[abspath])
+ else:
+ result.append(original_path)
+ LOG.debug("Cannot convert host path '%s' to its equivalent path within the container. "
+ "Host path is not mounted within the container", abspath)
+
+ return result
+
@staticmethod
def _get_image(runtime):
return "{}:build-{}".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)
|
{"golden_diff": "diff --git a/samcli/local/docker/lambda_build_container.py b/samcli/local/docker/lambda_build_container.py\n--- a/samcli/local/docker/lambda_build_container.py\n+++ b/samcli/local/docker/lambda_build_container.py\n@@ -46,6 +46,18 @@\n \n container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)\n \n+ # `executable_search_paths` are provided as a list of paths on the host file system that needs to passed to\n+ # the builder. But these paths don't exist within the container. We use the following method to convert the\n+ # host paths to container paths. But if a host path is NOT mounted within the container, we will simply ignore\n+ # it. In essence, only when the path is already in the mounted path, can the path resolver within the\n+ # container even find the executable.\n+ executable_search_paths = LambdaBuildContainer._convert_to_container_dirs(\n+ host_paths_to_convert=executable_search_paths,\n+ host_to_container_path_mapping={\n+ source_dir: container_dirs[\"source_dir\"],\n+ manifest_dir: container_dirs[\"manifest_dir\"]\n+ })\n+\n request_json = self._make_request(protocol_version,\n language,\n dependency_manager,\n@@ -163,6 +175,54 @@\n \n return result\n \n+ @staticmethod\n+ def _convert_to_container_dirs(host_paths_to_convert, host_to_container_path_mapping):\n+ \"\"\"\n+ Use this method to convert a list of host paths to a list of equivalent paths within the container\n+ where the given host path is mounted. This is necessary when SAM CLI needs to pass path information to\n+ the Lambda Builder running within the container.\n+\n+ If a host path is not mounted within the container, then this method simply passes the path to the result\n+ without any changes.\n+\n+ Ex:\n+ [ \"/home/foo\", \"/home/bar\", \"/home/not/mounted\"] => [\"/tmp/source\", \"/tmp/manifest\", \"/home/not/mounted\"]\n+\n+ Parameters\n+ ----------\n+ host_paths_to_convert : list\n+ List of paths in host that needs to be converted\n+\n+ host_to_container_path_mapping : dict\n+ Mapping of paths in host to the equivalent paths within the container\n+\n+ Returns\n+ -------\n+ list\n+ Equivalent paths within the container\n+ \"\"\"\n+\n+ if not host_paths_to_convert:\n+ # Nothing to do\n+ return host_paths_to_convert\n+\n+ # Make sure the key is absolute host path. Relative paths are tricky to work with because two different\n+ # relative paths can point to the same directory (\"../foo\", \"../../foo\")\n+ mapping = {str(pathlib.Path(p).resolve()): v for p, v in host_to_container_path_mapping.items()}\n+\n+ result = []\n+ for original_path in host_paths_to_convert:\n+ abspath = str(pathlib.Path(original_path).resolve())\n+\n+ if abspath in mapping:\n+ result.append(mapping[abspath])\n+ else:\n+ result.append(original_path)\n+ LOG.debug(\"Cannot convert host path '%s' to its equivalent path within the container. \"\n+ \"Host path is not mounted within the container\", abspath)\n+\n+ return result\n+\n @staticmethod\n def _get_image(runtime):\n return \"{}:build-{}\".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)\n", "issue": "`sam build` for Gradle using gradlew does not work with --use-container\n<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed). \r\nIf you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->\r\n\r\n### Description\r\n\r\nBriefly describe the bug you are facing.\r\nWhen a Gradle project uses `gradlew` scripts, this file does not get picked up with building inside a container.\r\n\r\n### Steps to reproduce\r\n\r\nProvide steps to replicate.\r\nThis integration test sets everything up to repro this issue - https://github.com/awslabs/aws-sam-cli/blob/develop/tests/integration/buildcmd/test_build_cmd.py#L256\r\n\r\n\r\n### Observed result\r\n\r\nPlease provide command output with `--debug` flag set.\r\n`gradle` installation within the container is used instead of the `gradlew` script\r\n\r\n### Expected result\r\n\r\nDescribe what you expected.\r\n`gradlew` script is used to build the project \r\n\r\n### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)\r\n\r\n1. OS:\r\n2. `sam --version`:\r\n\r\n`Add --debug flag to command you are running`\n", "before_files": [{"content": "\"\"\"\nRepresents Lambda Build Containers.\n\"\"\"\n\nimport json\nimport logging\n\ntry:\n import pathlib\nexcept ImportError:\n import pathlib2 as pathlib\n\nfrom .container import Container\n\nLOG = logging.getLogger(__name__)\n\n\nclass LambdaBuildContainer(Container):\n \"\"\"\n Class to manage Build containers that are capable of building AWS Lambda functions.\n This container mounts necessary folders, issues a command to the Lambda Builder CLI,\n and if the build was successful, copies back artifacts to the host filesystem\n \"\"\"\n\n _IMAGE_REPO_NAME = \"lambci/lambda\"\n _BUILDERS_EXECUTABLE = \"lambda-builders\"\n\n def __init__(self, # pylint: disable=too-many-locals\n protocol_version,\n language,\n dependency_manager,\n application_framework,\n source_dir,\n manifest_path,\n runtime,\n optimizations=None,\n options=None,\n executable_search_paths=None,\n log_level=None):\n\n abs_manifest_path = pathlib.Path(manifest_path).resolve()\n manifest_file_name = abs_manifest_path.name\n manifest_dir = str(abs_manifest_path.parent)\n\n source_dir = str(pathlib.Path(source_dir).resolve())\n\n container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)\n\n request_json = self._make_request(protocol_version,\n language,\n dependency_manager,\n application_framework,\n container_dirs,\n manifest_file_name,\n runtime,\n optimizations,\n options,\n executable_search_paths)\n\n image = LambdaBuildContainer._get_image(runtime)\n entry = LambdaBuildContainer._get_entrypoint(request_json)\n cmd = []\n\n additional_volumes = {\n # Manifest is mounted separately in order to support the case where manifest\n # is outside of source directory\n manifest_dir: {\n \"bind\": container_dirs[\"manifest_dir\"],\n \"mode\": \"ro\"\n }\n }\n\n env_vars = None\n if log_level:\n env_vars = {\n \"LAMBDA_BUILDERS_LOG_LEVEL\": log_level\n }\n\n super(LambdaBuildContainer, self).__init__(\n image,\n cmd,\n container_dirs[\"source_dir\"],\n source_dir,\n additional_volumes=additional_volumes,\n entrypoint=entry,\n env_vars=env_vars)\n\n @property\n def executable_name(self):\n return LambdaBuildContainer._BUILDERS_EXECUTABLE\n\n @staticmethod\n def _make_request(protocol_version,\n language,\n dependency_manager,\n application_framework,\n container_dirs,\n manifest_file_name,\n runtime,\n optimizations,\n options,\n executable_search_paths):\n\n return json.dumps({\n \"jsonschema\": \"2.0\",\n \"id\": 1,\n \"method\": \"LambdaBuilder.build\",\n \"params\": {\n \"__protocol_version\": protocol_version,\n \"capability\": {\n \"language\": language,\n \"dependency_manager\": dependency_manager,\n \"application_framework\": application_framework\n },\n \"source_dir\": container_dirs[\"source_dir\"],\n \"artifacts_dir\": container_dirs[\"artifacts_dir\"],\n \"scratch_dir\": container_dirs[\"scratch_dir\"],\n\n # Path is always inside a Linux container. So '/' is valid\n \"manifest_path\": \"{}/{}\".format(container_dirs[\"manifest_dir\"], manifest_file_name),\n\n \"runtime\": runtime,\n \"optimizations\": optimizations,\n \"options\": options,\n \"executable_search_paths\": executable_search_paths\n }\n })\n\n @staticmethod\n def _get_entrypoint(request_json):\n return [LambdaBuildContainer._BUILDERS_EXECUTABLE, request_json]\n\n @staticmethod\n def _get_container_dirs(source_dir, manifest_dir):\n \"\"\"\n Provides paths to directories within the container that is required by the builder\n\n Parameters\n ----------\n source_dir : str\n Path to the function source code\n\n manifest_dir : str\n Path to the directory containing manifest\n\n Returns\n -------\n dict\n Contains paths to source, artifacts, scratch & manifest directories\n \"\"\"\n base = \"/tmp/samcli\"\n result = {\n \"source_dir\": \"{}/source\".format(base),\n \"artifacts_dir\": \"{}/artifacts\".format(base),\n \"scratch_dir\": \"{}/scratch\".format(base),\n \"manifest_dir\": \"{}/manifest\".format(base)\n }\n\n if pathlib.PurePath(source_dir) == pathlib.PurePath(manifest_dir):\n # It is possible that the manifest resides within the source. In that case, we won't mount the manifest\n # directory separately.\n result[\"manifest_dir\"] = result[\"source_dir\"]\n\n return result\n\n @staticmethod\n def _get_image(runtime):\n return \"{}:build-{}\".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)\n", "path": "samcli/local/docker/lambda_build_container.py"}]}
| 2,200 | 754 |
gh_patches_debug_8390
|
rasdani/github-patches
|
git_diff
|
pydantic__pydantic-298
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Date annotation is parsed inconsistently depending on input data.
# Bug
I've tried compare two date fields of pydantic model's instances.
But I got: `TypeError: can't compare datetime.datetime to datetime.date`
I figured out that its a bug when parsing field annotated as date providing a datetime value.
Since one of my instances is created from data stored in persistence (which is typed as "datetime") and the other instance is created from a CSV, the first is treated as a datetime and the other is treated as date.
For bugs/questions:
* OS: **Linux 64-bit **
* Python version `import sys; print(sys.version)`: **3.7.1 (default, Oct 22 2018, 10:41:28) [GCC 8.2.1 20180831]**
* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.14**
```py
from datetime import date, datetime
from pydantic import BaseModel
class DateRange(BaseModel):
dt_start: date
dt_finish: date
data = DateRange(dt_start=datetime(2017,11, 7, 12, 14), dt_finish=datetime(2017,11, 26, 12, 14))
data_ = DateRange(dt_start='2017-01-01', dt_finish='2018-01-01')
print(type(data.dt_finish), type(data.dt_start))
print(type(data_.dt_finish), type(data_.dt_start))
...
```
</issue>
<code>
[start of pydantic/datetime_parse.py]
1 """
2 Functions to parse datetime objects.
3
4 We're using regular expressions rather than time.strptime because:
5 - They provide both validation and parsing.
6 - They're more flexible for datetimes.
7 - The date/datetime/time constructors produce friendlier error messages.
8
9 Stolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at
10 9718fa2e8abe430c3526a9278dd976443d4ae3c6
11
12 Changed to:
13 * use standard python datetime types not django.utils.timezone
14 * raise ValueError when regex doesn't match rather than returning None
15 * support parsing unix timestamps for dates and datetimes
16 """
17 import re
18 from datetime import date, datetime, time, timedelta, timezone
19 from typing import Union
20
21 from . import errors
22 from .utils import change_exception
23
24 date_re = re.compile(r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$')
25
26 time_re = re.compile(
27 r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
28 )
29
30 datetime_re = re.compile(
31 r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
32 r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
33 r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
34 r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
35 )
36
37 standard_duration_re = re.compile(
38 r'^'
39 r'(?:(?P<days>-?\d+) (days?, )?)?'
40 r'((?:(?P<hours>-?\d+):)(?=\d+:\d+))?'
41 r'(?:(?P<minutes>-?\d+):)?'
42 r'(?P<seconds>-?\d+)'
43 r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
44 r'$'
45 )
46
47 # Support the sections of ISO 8601 date representation that are accepted by timedelta
48 iso8601_duration_re = re.compile(
49 r'^(?P<sign>[-+]?)'
50 r'P'
51 r'(?:(?P<days>\d+(.\d+)?)D)?'
52 r'(?:T'
53 r'(?:(?P<hours>\d+(.\d+)?)H)?'
54 r'(?:(?P<minutes>\d+(.\d+)?)M)?'
55 r'(?:(?P<seconds>\d+(.\d+)?)S)?'
56 r')?'
57 r'$'
58 )
59
60 EPOCH = datetime(1970, 1, 1)
61 MS_WATERSHED = int(1e11) # if greater than this, the number is in ms (in seconds this is 3rd March 5138)
62 StrIntFloat = Union[str, int, float]
63
64
65 def get_numeric(value: StrIntFloat):
66 if isinstance(value, (int, float)):
67 return value
68 try:
69 return int(value)
70 except ValueError:
71 pass
72 try:
73 return float(value)
74 except ValueError:
75 pass
76
77
78 def from_unix_seconds(seconds: int) -> datetime:
79 while seconds > MS_WATERSHED:
80 seconds /= 1000
81 dt = EPOCH + timedelta(seconds=seconds)
82 return dt.replace(tzinfo=timezone.utc)
83
84
85 def parse_date(value: Union[date, StrIntFloat]) -> date:
86 """
87 Parse a date/int/float/string and return a datetime.date.
88
89 Raise ValueError if the input is well formatted but not a valid date.
90 Raise ValueError if the input isn't well formatted.
91 """
92 if isinstance(value, date):
93 return value
94
95 number = get_numeric(value)
96 if number is not None:
97 return from_unix_seconds(number).date()
98
99 match = date_re.match(value)
100 if not match:
101 raise errors.DateError()
102
103 kw = {k: int(v) for k, v in match.groupdict().items()}
104
105 with change_exception(errors.DateError, ValueError):
106 return date(**kw)
107
108
109 def parse_time(value: Union[time, str]) -> time:
110 """
111 Parse a time/string and return a datetime.time.
112
113 This function doesn't support time zone offsets.
114
115 Raise ValueError if the input is well formatted but not a valid time.
116 Raise ValueError if the input isn't well formatted, in particular if it contains an offset.
117 """
118 if isinstance(value, time):
119 return value
120
121 match = time_re.match(value)
122 if not match:
123 raise errors.TimeError()
124
125 kw = match.groupdict()
126 if kw['microsecond']:
127 kw['microsecond'] = kw['microsecond'].ljust(6, '0')
128
129 kw = {k: int(v) for k, v in kw.items() if v is not None}
130
131 with change_exception(errors.TimeError, ValueError):
132 return time(**kw)
133
134
135 def parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime:
136 """
137 Parse a datetime/int/float/string and return a datetime.datetime.
138
139 This function supports time zone offsets. When the input contains one,
140 the output uses a timezone with a fixed offset from UTC.
141
142 Raise ValueError if the input is well formatted but not a valid datetime.
143 Raise ValueError if the input isn't well formatted.
144 """
145 if isinstance(value, datetime):
146 return value
147
148 number = get_numeric(value)
149 if number is not None:
150 return from_unix_seconds(number)
151
152 match = datetime_re.match(value)
153 if not match:
154 raise errors.DateTimeError()
155
156 kw = match.groupdict()
157 if kw['microsecond']:
158 kw['microsecond'] = kw['microsecond'].ljust(6, '0')
159
160 tzinfo = kw.pop('tzinfo')
161 if tzinfo == 'Z':
162 tzinfo = timezone.utc
163 elif tzinfo is not None:
164 offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
165 offset = 60 * int(tzinfo[1:3]) + offset_mins
166 if tzinfo[0] == '-':
167 offset = -offset
168 tzinfo = timezone(timedelta(minutes=offset))
169
170 kw = {k: int(v) for k, v in kw.items() if v is not None}
171 kw['tzinfo'] = tzinfo
172
173 with change_exception(errors.DateTimeError, ValueError):
174 return datetime(**kw)
175
176
177 def parse_duration(value: StrIntFloat) -> timedelta:
178 """
179 Parse a duration int/float/string and return a datetime.timedelta.
180
181 The preferred format for durations in Django is '%d %H:%M:%S.%f'.
182
183 Also supports ISO 8601 representation.
184 """
185 if isinstance(value, timedelta):
186 return value
187
188 if isinstance(value, (int, float)):
189 # bellow code requires a string
190 value = str(value)
191
192 match = standard_duration_re.match(value) or iso8601_duration_re.match(value)
193 if not match:
194 raise errors.DurationError()
195
196 kw = match.groupdict()
197 sign = -1 if kw.pop('sign', '+') == '-' else 1
198 if kw.get('microseconds'):
199 kw['microseconds'] = kw['microseconds'].ljust(6, '0')
200
201 if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
202 kw['microseconds'] = '-' + kw['microseconds']
203
204 kw = {k: float(v) for k, v in kw.items() if v is not None}
205
206 return sign * timedelta(**kw)
207
[end of pydantic/datetime_parse.py]
[start of pydantic/version.py]
1 from distutils.version import StrictVersion
2
3 __all__ = ['VERSION']
4
5 VERSION = StrictVersion('0.14')
6
[end of pydantic/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pydantic/datetime_parse.py b/pydantic/datetime_parse.py
--- a/pydantic/datetime_parse.py
+++ b/pydantic/datetime_parse.py
@@ -90,7 +90,10 @@
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, date):
- return value
+ if isinstance(value, datetime):
+ return value.date()
+ else:
+ return value
number = get_numeric(value)
if number is not None:
diff --git a/pydantic/version.py b/pydantic/version.py
--- a/pydantic/version.py
+++ b/pydantic/version.py
@@ -2,4 +2,4 @@
__all__ = ['VERSION']
-VERSION = StrictVersion('0.14')
+VERSION = StrictVersion('0.15a1')
|
{"golden_diff": "diff --git a/pydantic/datetime_parse.py b/pydantic/datetime_parse.py\n--- a/pydantic/datetime_parse.py\n+++ b/pydantic/datetime_parse.py\n@@ -90,7 +90,10 @@\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, date):\n- return value\n+ if isinstance(value, datetime):\n+ return value.date()\n+ else:\n+ return value\n \n number = get_numeric(value)\n if number is not None:\ndiff --git a/pydantic/version.py b/pydantic/version.py\n--- a/pydantic/version.py\n+++ b/pydantic/version.py\n@@ -2,4 +2,4 @@\n \n __all__ = ['VERSION']\n \n-VERSION = StrictVersion('0.14')\n+VERSION = StrictVersion('0.15a1')\n", "issue": "Date annotation is parsed inconsistently depending on input data.\n# Bug\r\nI've tried compare two date fields of pydantic model's instances.\r\nBut I got: `TypeError: can't compare datetime.datetime to datetime.date`\r\n\r\nI figured out that its a bug when parsing field annotated as date providing a datetime value.\r\n\r\nSince one of my instances is created from data stored in persistence (which is typed as \"datetime\") and the other instance is created from a CSV, the first is treated as a datetime and the other is treated as date.\r\n\r\nFor bugs/questions:\r\n* OS: **Linux 64-bit **\r\n* Python version `import sys; print(sys.version)`: **3.7.1 (default, Oct 22 2018, 10:41:28) [GCC 8.2.1 20180831]**\r\n* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.14**\r\n\r\n```py\r\nfrom datetime import date, datetime\r\n\r\nfrom pydantic import BaseModel\r\n\r\n\r\nclass DateRange(BaseModel):\r\n dt_start: date\r\n dt_finish: date\r\n\r\n\r\n\r\ndata = DateRange(dt_start=datetime(2017,11, 7, 12, 14), dt_finish=datetime(2017,11, 26, 12, 14))\r\ndata_ = DateRange(dt_start='2017-01-01', dt_finish='2018-01-01')\r\n\r\nprint(type(data.dt_finish), type(data.dt_start))\r\nprint(type(data_.dt_finish), type(data_.dt_start))\r\n...\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nFunctions to parse datetime objects.\n\nWe're using regular expressions rather than time.strptime because:\n- They provide both validation and parsing.\n- They're more flexible for datetimes.\n- The date/datetime/time constructors produce friendlier error messages.\n\nStolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at\n9718fa2e8abe430c3526a9278dd976443d4ae3c6\n\nChanged to:\n* use standard python datetime types not django.utils.timezone\n* raise ValueError when regex doesn't match rather than returning None\n* support parsing unix timestamps for dates and datetimes\n\"\"\"\nimport re\nfrom datetime import date, datetime, time, timedelta, timezone\nfrom typing import Union\n\nfrom . import errors\nfrom .utils import change_exception\n\ndate_re = re.compile(r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})$')\n\ntime_re = re.compile(\n r'(?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})' r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n)\n\ndatetime_re = re.compile(\n r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})'\n r'[T ](?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})'\n r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n r'(?P<tzinfo>Z|[+-]\\d{2}(?::?\\d{2})?)?$'\n)\n\nstandard_duration_re = re.compile(\n r'^'\n r'(?:(?P<days>-?\\d+) (days?, )?)?'\n r'((?:(?P<hours>-?\\d+):)(?=\\d+:\\d+))?'\n r'(?:(?P<minutes>-?\\d+):)?'\n r'(?P<seconds>-?\\d+)'\n r'(?:\\.(?P<microseconds>\\d{1,6})\\d{0,6})?'\n r'$'\n)\n\n# Support the sections of ISO 8601 date representation that are accepted by timedelta\niso8601_duration_re = re.compile(\n r'^(?P<sign>[-+]?)'\n r'P'\n r'(?:(?P<days>\\d+(.\\d+)?)D)?'\n r'(?:T'\n r'(?:(?P<hours>\\d+(.\\d+)?)H)?'\n r'(?:(?P<minutes>\\d+(.\\d+)?)M)?'\n r'(?:(?P<seconds>\\d+(.\\d+)?)S)?'\n r')?'\n r'$'\n)\n\nEPOCH = datetime(1970, 1, 1)\nMS_WATERSHED = int(1e11) # if greater than this, the number is in ms (in seconds this is 3rd March 5138)\nStrIntFloat = Union[str, int, float]\n\n\ndef get_numeric(value: StrIntFloat):\n if isinstance(value, (int, float)):\n return value\n try:\n return int(value)\n except ValueError:\n pass\n try:\n return float(value)\n except ValueError:\n pass\n\n\ndef from_unix_seconds(seconds: int) -> datetime:\n while seconds > MS_WATERSHED:\n seconds /= 1000\n dt = EPOCH + timedelta(seconds=seconds)\n return dt.replace(tzinfo=timezone.utc)\n\n\ndef parse_date(value: Union[date, StrIntFloat]) -> date:\n \"\"\"\n Parse a date/int/float/string and return a datetime.date.\n\n Raise ValueError if the input is well formatted but not a valid date.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, date):\n return value\n\n number = get_numeric(value)\n if number is not None:\n return from_unix_seconds(number).date()\n\n match = date_re.match(value)\n if not match:\n raise errors.DateError()\n\n kw = {k: int(v) for k, v in match.groupdict().items()}\n\n with change_exception(errors.DateError, ValueError):\n return date(**kw)\n\n\ndef parse_time(value: Union[time, str]) -> time:\n \"\"\"\n Parse a time/string and return a datetime.time.\n\n This function doesn't support time zone offsets.\n\n Raise ValueError if the input is well formatted but not a valid time.\n Raise ValueError if the input isn't well formatted, in particular if it contains an offset.\n \"\"\"\n if isinstance(value, time):\n return value\n\n match = time_re.match(value)\n if not match:\n raise errors.TimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n kw = {k: int(v) for k, v in kw.items() if v is not None}\n\n with change_exception(errors.TimeError, ValueError):\n return time(**kw)\n\n\ndef parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime:\n \"\"\"\n Parse a datetime/int/float/string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n\n Raise ValueError if the input is well formatted but not a valid datetime.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, datetime):\n return value\n\n number = get_numeric(value)\n if number is not None:\n return from_unix_seconds(number)\n\n match = datetime_re.match(value)\n if not match:\n raise errors.DateTimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = kw.pop('tzinfo')\n if tzinfo == 'Z':\n tzinfo = timezone.utc\n elif tzinfo is not None:\n offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0\n offset = 60 * int(tzinfo[1:3]) + offset_mins\n if tzinfo[0] == '-':\n offset = -offset\n tzinfo = timezone(timedelta(minutes=offset))\n\n kw = {k: int(v) for k, v in kw.items() if v is not None}\n kw['tzinfo'] = tzinfo\n\n with change_exception(errors.DateTimeError, ValueError):\n return datetime(**kw)\n\n\ndef parse_duration(value: StrIntFloat) -> timedelta:\n \"\"\"\n Parse a duration int/float/string and return a datetime.timedelta.\n\n The preferred format for durations in Django is '%d %H:%M:%S.%f'.\n\n Also supports ISO 8601 representation.\n \"\"\"\n if isinstance(value, timedelta):\n return value\n\n if isinstance(value, (int, float)):\n # bellow code requires a string\n value = str(value)\n\n match = standard_duration_re.match(value) or iso8601_duration_re.match(value)\n if not match:\n raise errors.DurationError()\n\n kw = match.groupdict()\n sign = -1 if kw.pop('sign', '+') == '-' else 1\n if kw.get('microseconds'):\n kw['microseconds'] = kw['microseconds'].ljust(6, '0')\n\n if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):\n kw['microseconds'] = '-' + kw['microseconds']\n\n kw = {k: float(v) for k, v in kw.items() if v is not None}\n\n return sign * timedelta(**kw)\n", "path": "pydantic/datetime_parse.py"}, {"content": "from distutils.version import StrictVersion\n\n__all__ = ['VERSION']\n\nVERSION = StrictVersion('0.14')\n", "path": "pydantic/version.py"}]}
| 3,275 | 190 |
gh_patches_debug_35871
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2156
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RSS feeds give a 504
For example:
- http://rsr.akvo.org/rss/org-updates/273/
- http://rsr.akvo.org/rss/updates/788/
</issue>
<code>
[start of akvo/rsr/feeds.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 import re
8
9 from xml.sax.saxutils import XMLGenerator
10
11 from django.contrib.syndication.views import FeedDoesNotExist, Feed
12 from django.core.urlresolvers import reverse
13 from django.shortcuts import get_object_or_404
14 from django.utils.feedgenerator import Rss201rev2Feed
15 from django.utils.translation import ugettext_lazy as _
16
17 from akvo.rsr.models import Project, ProjectUpdate, Organisation
18
19
20 def __dict_replace(s, d):
21 """Replace substrings of a string using a dictionary."""
22 for key, value in d.items():
23 s = s.replace(key, value)
24 return s
25
26 def __escape(data, entities):
27 # must do ampersand first
28 data = data.replace("&", "&")
29 data = data.replace(">", ">")
30 data = data.replace("<", "<")
31 if entities:
32 data = __dict_replace(data, entities)
33 return data
34
35 def escape(data, entities={}):
36 """Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped
37
38 Escape &, <, and > in a string of data.
39
40 You can escape other strings of data by passing a dictionary as
41 the optional entities parameter. The keys and values must all be
42 strings; each key will be replaced with its corresponding value.
43
44 """
45 # find character data, re.DOTALL includes linefeed in .
46 pattern = re.compile('<!\[CDATA\[.*\]\]>', re.DOTALL)
47 iterator = pattern.finditer(data)
48 start = 0
49 bits = []
50 for match in iterator:
51 #grab chunk before first match
52 bit = data[start:match.span()[0]]
53 bit = __escape(bit, entities)
54 bits.append(bit)
55 #grab match
56 bit = data[match.span()[0]:match.span()[1]]
57 bits.extend(bit)
58 start = match.span()[1]
59 # escape tail bit after last match
60 bit = data[start:]
61 bit = __escape(bit, entities)
62 bits.extend(bit)
63 data = ''.join(bits)
64 return data
65
66
67 class RSRSimplerXMLGenerator(XMLGenerator):
68 """subclassed to be able to call custom escape() function, see above
69 """
70 def characters(self, content):
71 self._write(escape(content))
72
73 def addQuickElement(self, name, contents=None, attrs=None):
74 "Convenience method for adding an element with no children"
75 if attrs is None: attrs = {}
76 self.startElement(name, attrs)
77 if contents is not None:
78 self.characters(contents)
79 self.endElement(name)
80
81
82 class RSRMediaRssFeed(Rss201rev2Feed):
83 def rss_attributes(self):
84 attrs = super(RSRMediaRssFeed, self).rss_attributes()
85 attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'
86 attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'
87 return attrs
88
89 def add_item_elements(self, handler, item):
90 """Callback to add elements to each item (item/entry) element."""
91 super(RSRMediaRssFeed, self).add_item_elements(handler, item)
92
93 if 'media:title' in item:
94 handler.addQuickElement(u"media:title", item['title'])
95 if 'media:description' in item:
96 handler.addQuickElement(u"media:description", item['media:description'])
97 if 'media:credit' in item:
98 handler.addQuickElement(u"media:credit", item['media:credit'])
99
100 if 'content_url' in item:
101 content = dict(url=item['content_url'])
102 if 'content_width' in item:
103 content['width'] = str(item['content_width'])
104 if 'content_height' in item:
105 content['height'] = str(item['content_height'])
106 handler.addQuickElement(u"media:content", '', content)
107
108 if 'thumbnail_url' in item:
109 thumbnail = dict(url=item['thumbnail_url'])
110 if 'thumbnail_width' in item:
111 thumbnail['width'] = str(item['thumbnail_width'])
112 if 'thumbnail_height' in item:
113 thumbnail['height'] = str(item['thumbnail_height'])
114 handler.addQuickElement(u"media:thumbnail", '', thumbnail)
115
116 if 'keywords' in item:
117 handler.addQuickElement(u"media:keywords", item['keywords'])
118
119 def write(self, outfile, encoding):
120 handler = RSRSimplerXMLGenerator(outfile, encoding)
121 handler.startDocument()
122 handler.startElement(u"rss", self.rss_attributes())
123 handler.startElement(u"channel", self.root_attributes())
124 self.add_root_elements(handler)
125 self.write_items(handler)
126 self.endChannelElement(handler)
127 handler.endElement(u"rss")
128
129 class UpdateFeed(Feed):
130 """base class generating Update feeds
131 """
132 feed_type = RSRMediaRssFeed
133
134 def link(self, obj):
135 if not obj:
136 raise FeedDoesNotExist
137 return obj.get_absolute_url()
138
139 def item_link(self, item):
140 return item.get_absolute_url()
141
142 def item_title(self, item):
143 return item.title
144
145 def item_description(self, item):
146 try:
147 size = item.photo.size
148 return '<![CDATA[<p><a href="%s"><img src="%s" alt="" /></a></p><p>%s</p>]]>' % (
149 item.get_absolute_url(),
150 item.photo.thumbnail.absolute_url,
151 item.text,
152 )
153 except:
154 return item.text
155
156 def item_pubdate(self, item):
157 return item.created_at
158
159 def item_author_name(self, item):
160 return item.user.get_full_name()
161
162 def item_credit(self, item):
163 return item.photo_credit
164
165 def item_extra_kwargs(self, item):
166 """return a dictionary to the feedgenerator for each item to be added to the feed.
167 """
168 try:
169 size = item.photo.size
170 photo = item.photo
171 kwargs = {
172 'media:title': item.title,
173 'media:description': item.photo_caption,
174 'media:credit': item.photo_credit,
175 'content_url': photo.url,
176 'content_width': photo.width,
177 'content_height': photo.height,
178 'thumbnail_url': photo.thumbnail.absolute_url,
179 'thumbnail_width': photo.thumbnail.width(),
180 'thumbnail_height': photo.thumbnail.height(),
181 }
182 return kwargs
183 except:
184 return {}
185
186
187 class ProjectUpdates(UpdateFeed):
188 """RSS feed for last 50 RSR updates of a project."""
189 def get_object(self, request, project_id):
190 return Project.objects.get(pk__exact=project_id)
191
192 def title(self, obj):
193 return _(u'Akvo RSR project %(id)d: %(project_title)s') % {
194 'id': obj.id,
195 'project_title': obj.title
196 }
197
198 def description(self, obj):
199 return _(u'Project updates for project %(project_title)s') % {
200 'project_title': obj.title
201 }
202
203 def items(self, obj):
204 # Limited to 50 items to prevent gateway timeouts.
205 return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:50]
206
207
208 class OrganisationUpdates(UpdateFeed):
209 """RSS feed for last 50 RSR updates of an organisation."""
210 feed_type = RSRMediaRssFeed
211
212 def get_object(self, request, org_id):
213 return get_object_or_404(Organisation, id=int(org_id))
214
215 def title(self, obj):
216 return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}
217
218 def description(self, obj):
219 if obj.name == obj.long_name:
220 return _(u"Project updates for projects partnered by %(org_name)s") % {
221 'org_name': obj.name
222 }
223 else:
224 return _(
225 u"Project updates for projects partnered by %(org_name)s - %(long_name)s"
226 ) % {'org_name': obj.name, 'long_name': obj.long_name}
227
228 def items(self, obj):
229 # Limited to 50 items to prevent gateway timeouts.
230 return obj.published_projects().all_updates()[:50]
231
232 def item_title(self, item):
233 return _(
234 u'Project %(project_id)d - %(project_title)s: %(update_title)s'
235 ) % {
236 'project_id': item.project.id,
237 'project_title': item.project.title,
238 'update_title': item.title
239 }
240
241
242 class AllProjectUpdates(UpdateFeed):
243 """RSS feed for last 50 RSR updates."""
244 title = _(u'Last 50 RSR project updates')
245
246 def link(self):
247 return reverse('update-directory')
248
249 description = _(u'Project updates for all Akvo RSR projects')
250
251 def items(self):
252 # Limited to 50 items to prevent gateway timeouts.
253 return ProjectUpdate.objects.select_related().order_by('-id')[:50]
254
255 def item_title(self, item):
256 return _(
257 u'Project %(project_id)d - %(project_title)s: %(update_title)s'
258 ) % {
259 'project_id': item.project.id,
260 'project_title': item.project.title,
261 'update_title': item.title
262 }
263
[end of akvo/rsr/feeds.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/akvo/rsr/feeds.py b/akvo/rsr/feeds.py
--- a/akvo/rsr/feeds.py
+++ b/akvo/rsr/feeds.py
@@ -185,7 +185,7 @@
class ProjectUpdates(UpdateFeed):
- """RSS feed for last 50 RSR updates of a project."""
+ """RSS feed for last 25 RSR updates of a project."""
def get_object(self, request, project_id):
return Project.objects.get(pk__exact=project_id)
@@ -201,12 +201,12 @@
}
def items(self, obj):
- # Limited to 50 items to prevent gateway timeouts.
- return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:50]
+ # Limited to 25 items to prevent gateway timeouts.
+ return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]
class OrganisationUpdates(UpdateFeed):
- """RSS feed for last 50 RSR updates of an organisation."""
+ """RSS feed for last 25 RSR updates of an organisation."""
feed_type = RSRMediaRssFeed
def get_object(self, request, org_id):
@@ -226,8 +226,8 @@
) % {'org_name': obj.name, 'long_name': obj.long_name}
def items(self, obj):
- # Limited to 50 items to prevent gateway timeouts.
- return obj.published_projects().all_updates()[:50]
+ # Limited to 25 items to prevent gateway timeouts.
+ return obj.published_projects().all_updates()[:25]
def item_title(self, item):
return _(
@@ -240,8 +240,8 @@
class AllProjectUpdates(UpdateFeed):
- """RSS feed for last 50 RSR updates."""
- title = _(u'Last 50 RSR project updates')
+ """RSS feed for last 25 RSR updates."""
+ title = _(u'Last 25 RSR project updates')
def link(self):
return reverse('update-directory')
@@ -249,8 +249,8 @@
description = _(u'Project updates for all Akvo RSR projects')
def items(self):
- # Limited to 50 items to prevent gateway timeouts.
- return ProjectUpdate.objects.select_related().order_by('-id')[:50]
+ # Limited to 25 items to prevent gateway timeouts.
+ return ProjectUpdate.objects.select_related().order_by('-id')[:25]
def item_title(self, item):
return _(
|
{"golden_diff": "diff --git a/akvo/rsr/feeds.py b/akvo/rsr/feeds.py\n--- a/akvo/rsr/feeds.py\n+++ b/akvo/rsr/feeds.py\n@@ -185,7 +185,7 @@\n \n \n class ProjectUpdates(UpdateFeed):\n- \"\"\"RSS feed for last 50 RSR updates of a project.\"\"\"\n+ \"\"\"RSS feed for last 25 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n \n@@ -201,12 +201,12 @@\n }\n \n def items(self, obj):\n- # Limited to 50 items to prevent gateway timeouts.\n- return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:50]\n+ # Limited to 25 items to prevent gateway timeouts.\n+ return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]\n \n \n class OrganisationUpdates(UpdateFeed):\n- \"\"\"RSS feed for last 50 RSR updates of an organisation.\"\"\"\n+ \"\"\"RSS feed for last 25 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n \n def get_object(self, request, org_id):\n@@ -226,8 +226,8 @@\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n \n def items(self, obj):\n- # Limited to 50 items to prevent gateway timeouts.\n- return obj.published_projects().all_updates()[:50]\n+ # Limited to 25 items to prevent gateway timeouts.\n+ return obj.published_projects().all_updates()[:25]\n \n def item_title(self, item):\n return _(\n@@ -240,8 +240,8 @@\n \n \n class AllProjectUpdates(UpdateFeed):\n- \"\"\"RSS feed for last 50 RSR updates.\"\"\"\n- title = _(u'Last 50 RSR project updates')\n+ \"\"\"RSS feed for last 25 RSR updates.\"\"\"\n+ title = _(u'Last 25 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n@@ -249,8 +249,8 @@\n description = _(u'Project updates for all Akvo RSR projects')\n \n def items(self):\n- # Limited to 50 items to prevent gateway timeouts.\n- return ProjectUpdate.objects.select_related().order_by('-id')[:50]\n+ # Limited to 25 items to prevent gateway timeouts.\n+ return ProjectUpdate.objects.select_related().order_by('-id')[:25]\n \n def item_title(self, item):\n return _(\n", "issue": "RSS feeds give a 504\nFor example: \n- http://rsr.akvo.org/rss/org-updates/273/\n- http://rsr.akvo.org/rss/updates/788/\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module. \n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport re\n\nfrom xml.sax.saxutils import XMLGenerator\n\nfrom django.contrib.syndication.views import FeedDoesNotExist, Feed\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.rsr.models import Project, ProjectUpdate, Organisation\n\n\ndef __dict_replace(s, d):\n \"\"\"Replace substrings of a string using a dictionary.\"\"\"\n for key, value in d.items():\n s = s.replace(key, value)\n return s\n\ndef __escape(data, entities):\n # must do ampersand first\n data = data.replace(\"&\", \"&\")\n data = data.replace(\">\", \">\")\n data = data.replace(\"<\", \"<\")\n if entities:\n data = __dict_replace(data, entities)\n return data\n\ndef escape(data, entities={}):\n \"\"\"Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped\n\n Escape &, <, and > in a string of data.\n\n You can escape other strings of data by passing a dictionary as\n the optional entities parameter. The keys and values must all be\n strings; each key will be replaced with its corresponding value.\n\n \"\"\"\n # find character data, re.DOTALL includes linefeed in .\n pattern = re.compile('<!\\[CDATA\\[.*\\]\\]>', re.DOTALL)\n iterator = pattern.finditer(data)\n start = 0\n bits = []\n for match in iterator:\n #grab chunk before first match\n bit = data[start:match.span()[0]]\n bit = __escape(bit, entities)\n bits.append(bit)\n #grab match\n bit = data[match.span()[0]:match.span()[1]]\n bits.extend(bit)\n start = match.span()[1]\n # escape tail bit after last match\n bit = data[start:]\n bit = __escape(bit, entities)\n bits.extend(bit)\n data = ''.join(bits)\n return data\n\n\nclass RSRSimplerXMLGenerator(XMLGenerator):\n \"\"\"subclassed to be able to call custom escape() function, see above\n \"\"\"\n def characters(self, content):\n self._write(escape(content))\n\n def addQuickElement(self, name, contents=None, attrs=None):\n \"Convenience method for adding an element with no children\"\n if attrs is None: attrs = {}\n self.startElement(name, attrs)\n if contents is not None:\n self.characters(contents)\n self.endElement(name)\n\n\nclass RSRMediaRssFeed(Rss201rev2Feed):\n def rss_attributes(self):\n attrs = super(RSRMediaRssFeed, self).rss_attributes()\n attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'\n attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'\n return attrs\n\n def add_item_elements(self, handler, item):\n \"\"\"Callback to add elements to each item (item/entry) element.\"\"\"\n super(RSRMediaRssFeed, self).add_item_elements(handler, item)\n\n if 'media:title' in item:\n handler.addQuickElement(u\"media:title\", item['title'])\n if 'media:description' in item:\n handler.addQuickElement(u\"media:description\", item['media:description'])\n if 'media:credit' in item:\n handler.addQuickElement(u\"media:credit\", item['media:credit'])\n\n if 'content_url' in item:\n content = dict(url=item['content_url'])\n if 'content_width' in item:\n content['width'] = str(item['content_width'])\n if 'content_height' in item:\n content['height'] = str(item['content_height'])\n handler.addQuickElement(u\"media:content\", '', content)\n\n if 'thumbnail_url' in item:\n thumbnail = dict(url=item['thumbnail_url'])\n if 'thumbnail_width' in item:\n thumbnail['width'] = str(item['thumbnail_width'])\n if 'thumbnail_height' in item:\n thumbnail['height'] = str(item['thumbnail_height'])\n handler.addQuickElement(u\"media:thumbnail\", '', thumbnail)\n\n if 'keywords' in item:\n handler.addQuickElement(u\"media:keywords\", item['keywords'])\n\n def write(self, outfile, encoding):\n handler = RSRSimplerXMLGenerator(outfile, encoding)\n handler.startDocument()\n handler.startElement(u\"rss\", self.rss_attributes())\n handler.startElement(u\"channel\", self.root_attributes())\n self.add_root_elements(handler)\n self.write_items(handler)\n self.endChannelElement(handler)\n handler.endElement(u\"rss\")\n\nclass UpdateFeed(Feed):\n \"\"\"base class generating Update feeds\n \"\"\"\n feed_type = RSRMediaRssFeed\n\n def link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.get_absolute_url()\n\n def item_link(self, item):\n return item.get_absolute_url()\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n try:\n size = item.photo.size\n return '<![CDATA[<p><a href=\"%s\"><img src=\"%s\" alt=\"\" /></a></p><p>%s</p>]]>' % (\n item.get_absolute_url(),\n item.photo.thumbnail.absolute_url,\n item.text,\n )\n except:\n return item.text\n\n def item_pubdate(self, item):\n return item.created_at\n\n def item_author_name(self, item):\n return item.user.get_full_name()\n\n def item_credit(self, item):\n return item.photo_credit\n\n def item_extra_kwargs(self, item):\n \"\"\"return a dictionary to the feedgenerator for each item to be added to the feed.\n \"\"\"\n try:\n size = item.photo.size\n photo = item.photo\n kwargs = {\n 'media:title': item.title,\n 'media:description': item.photo_caption,\n 'media:credit': item.photo_credit,\n 'content_url': photo.url,\n 'content_width': photo.width,\n 'content_height': photo.height,\n 'thumbnail_url': photo.thumbnail.absolute_url,\n 'thumbnail_width': photo.thumbnail.width(),\n 'thumbnail_height': photo.thumbnail.height(),\n }\n return kwargs\n except:\n return {}\n\n\nclass ProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 50 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n\n def title(self, obj):\n return _(u'Akvo RSR project %(id)d: %(project_title)s') % {\n 'id': obj.id,\n 'project_title': obj.title\n }\n\n def description(self, obj):\n return _(u'Project updates for project %(project_title)s') % {\n 'project_title': obj.title\n }\n\n def items(self, obj):\n # Limited to 50 items to prevent gateway timeouts.\n return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:50]\n\n\nclass OrganisationUpdates(UpdateFeed):\n \"\"\"RSS feed for last 50 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n\n def get_object(self, request, org_id):\n return get_object_or_404(Organisation, id=int(org_id))\n\n def title(self, obj):\n return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}\n\n def description(self, obj):\n if obj.name == obj.long_name:\n return _(u\"Project updates for projects partnered by %(org_name)s\") % {\n 'org_name': obj.name\n }\n else:\n return _(\n u\"Project updates for projects partnered by %(org_name)s - %(long_name)s\"\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n\n def items(self, obj):\n # Limited to 50 items to prevent gateway timeouts.\n return obj.published_projects().all_updates()[:50]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n\n\nclass AllProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 50 RSR updates.\"\"\"\n title = _(u'Last 50 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n\n description = _(u'Project updates for all Akvo RSR projects')\n\n def items(self):\n # Limited to 50 items to prevent gateway timeouts.\n return ProjectUpdate.objects.select_related().order_by('-id')[:50]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n", "path": "akvo/rsr/feeds.py"}]}
| 3,340 | 604 |
gh_patches_debug_224
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-7390
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PYTEST WARNING] Horn schunk
### Feature description
@skief @poyea Please could you resolve this warning
```
computer_vision/horn_schunck.py:15
/home/runner/work/Python/Python/computer_vision/horn_schunck.py:15:
DeprecationWarning: Please use `convolve` from the `scipy.ndimage` namespace, the `scipy.ndimage.filters` namespace is deprecated.
from scipy.ndimage.filters import convolve
```
origin: #7211
</issue>
<code>
[start of computer_vision/horn_schunck.py]
1 """
2 The Horn-Schunck method estimates the optical flow for every single pixel of
3 a sequence of images.
4 It works by assuming brightness constancy between two consecutive frames
5 and smoothness in the optical flow.
6
7 Useful resources:
8 Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method
9 Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf
10 """
11
12 from typing import SupportsIndex
13
14 import numpy as np
15 from scipy.ndimage.filters import convolve
16
17
18 def warp(
19 image: np.ndarray, horizontal_flow: np.ndarray, vertical_flow: np.ndarray
20 ) -> np.ndarray:
21 """
22 Warps the pixels of an image into a new image using the horizontal and vertical
23 flows.
24 Pixels that are warped from an invalid location are set to 0.
25
26 Parameters:
27 image: Grayscale image
28 horizontal_flow: Horizontal flow
29 vertical_flow: Vertical flow
30
31 Returns: Warped image
32
33 >>> warp(np.array([[0, 1, 2], [0, 3, 0], [2, 2, 2]]), \
34 np.array([[0, 1, -1], [-1, 0, 0], [1, 1, 1]]), \
35 np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]))
36 array([[0, 0, 0],
37 [3, 1, 0],
38 [0, 2, 3]])
39 """
40 flow = np.stack((horizontal_flow, vertical_flow), 2)
41
42 # Create a grid of all pixel coordinates and subtract the flow to get the
43 # target pixels coordinates
44 grid = np.stack(
45 np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0])), 2
46 )
47 grid = np.round(grid - flow).astype(np.int32)
48
49 # Find the locations outside of the original image
50 invalid = (grid < 0) | (grid >= np.array([image.shape[1], image.shape[0]]))
51 grid[invalid] = 0
52
53 warped = image[grid[:, :, 1], grid[:, :, 0]]
54
55 # Set pixels at invalid locations to 0
56 warped[invalid[:, :, 0] | invalid[:, :, 1]] = 0
57
58 return warped
59
60
61 def horn_schunck(
62 image0: np.ndarray,
63 image1: np.ndarray,
64 num_iter: SupportsIndex,
65 alpha: float | None = None,
66 ) -> tuple[np.ndarray, np.ndarray]:
67 """
68 This function performs the Horn-Schunck algorithm and returns the estimated
69 optical flow. It is assumed that the input images are grayscale and
70 normalized to be in [0, 1].
71
72 Parameters:
73 image0: First image of the sequence
74 image1: Second image of the sequence
75 alpha: Regularization constant
76 num_iter: Number of iterations performed
77
78 Returns: estimated horizontal & vertical flow
79
80 >>> np.round(horn_schunck(np.array([[0, 0, 2], [0, 0, 2]]), \
81 np.array([[0, 2, 0], [0, 2, 0]]), alpha=0.1, num_iter=110)).\
82 astype(np.int32)
83 array([[[ 0, -1, -1],
84 [ 0, -1, -1]],
85 <BLANKLINE>
86 [[ 0, 0, 0],
87 [ 0, 0, 0]]], dtype=int32)
88 """
89 if alpha is None:
90 alpha = 0.1
91
92 # Initialize flow
93 horizontal_flow = np.zeros_like(image0)
94 vertical_flow = np.zeros_like(image0)
95
96 # Prepare kernels for the calculation of the derivatives and the average velocity
97 kernel_x = np.array([[-1, 1], [-1, 1]]) * 0.25
98 kernel_y = np.array([[-1, -1], [1, 1]]) * 0.25
99 kernel_t = np.array([[1, 1], [1, 1]]) * 0.25
100 kernel_laplacian = np.array(
101 [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]]
102 )
103
104 # Iteratively refine the flow
105 for _ in range(num_iter):
106 warped_image = warp(image0, horizontal_flow, vertical_flow)
107 derivative_x = convolve(warped_image, kernel_x) + convolve(image1, kernel_x)
108 derivative_y = convolve(warped_image, kernel_y) + convolve(image1, kernel_y)
109 derivative_t = convolve(warped_image, kernel_t) + convolve(image1, -kernel_t)
110
111 avg_horizontal_velocity = convolve(horizontal_flow, kernel_laplacian)
112 avg_vertical_velocity = convolve(vertical_flow, kernel_laplacian)
113
114 # This updates the flow as proposed in the paper (Step 12)
115 update = (
116 derivative_x * avg_horizontal_velocity
117 + derivative_y * avg_vertical_velocity
118 + derivative_t
119 )
120 update = update / (alpha**2 + derivative_x**2 + derivative_y**2)
121
122 horizontal_flow = avg_horizontal_velocity - derivative_x * update
123 vertical_flow = avg_vertical_velocity - derivative_y * update
124
125 return horizontal_flow, vertical_flow
126
127
128 if __name__ == "__main__":
129 import doctest
130
131 doctest.testmod()
132
[end of computer_vision/horn_schunck.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py
--- a/computer_vision/horn_schunck.py
+++ b/computer_vision/horn_schunck.py
@@ -12,7 +12,7 @@
from typing import SupportsIndex
import numpy as np
-from scipy.ndimage.filters import convolve
+from scipy.ndimage import convolve
def warp(
|
{"golden_diff": "diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py\n--- a/computer_vision/horn_schunck.py\n+++ b/computer_vision/horn_schunck.py\n@@ -12,7 +12,7 @@\n from typing import SupportsIndex\n \n import numpy as np\n-from scipy.ndimage.filters import convolve\n+from scipy.ndimage import convolve\n \n \n def warp(\n", "issue": "[PYTEST WARNING] Horn schunk\n### Feature description\r\n\r\n@skief @poyea Please could you resolve this warning\r\n```\r\ncomputer_vision/horn_schunck.py:15\r\n /home/runner/work/Python/Python/computer_vision/horn_schunck.py:15:\r\n DeprecationWarning: Please use `convolve` from the `scipy.ndimage` namespace, the `scipy.ndimage.filters` namespace is deprecated.\r\n from scipy.ndimage.filters import convolve\r\n```\r\n\r\norigin: #7211\n", "before_files": [{"content": "\"\"\"\n The Horn-Schunck method estimates the optical flow for every single pixel of\n a sequence of images.\n It works by assuming brightness constancy between two consecutive frames\n and smoothness in the optical flow.\n\n Useful resources:\n Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method\n Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf\n\"\"\"\n\nfrom typing import SupportsIndex\n\nimport numpy as np\nfrom scipy.ndimage.filters import convolve\n\n\ndef warp(\n image: np.ndarray, horizontal_flow: np.ndarray, vertical_flow: np.ndarray\n) -> np.ndarray:\n \"\"\"\n Warps the pixels of an image into a new image using the horizontal and vertical\n flows.\n Pixels that are warped from an invalid location are set to 0.\n\n Parameters:\n image: Grayscale image\n horizontal_flow: Horizontal flow\n vertical_flow: Vertical flow\n\n Returns: Warped image\n\n >>> warp(np.array([[0, 1, 2], [0, 3, 0], [2, 2, 2]]), \\\n np.array([[0, 1, -1], [-1, 0, 0], [1, 1, 1]]), \\\n np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]))\n array([[0, 0, 0],\n [3, 1, 0],\n [0, 2, 3]])\n \"\"\"\n flow = np.stack((horizontal_flow, vertical_flow), 2)\n\n # Create a grid of all pixel coordinates and subtract the flow to get the\n # target pixels coordinates\n grid = np.stack(\n np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0])), 2\n )\n grid = np.round(grid - flow).astype(np.int32)\n\n # Find the locations outside of the original image\n invalid = (grid < 0) | (grid >= np.array([image.shape[1], image.shape[0]]))\n grid[invalid] = 0\n\n warped = image[grid[:, :, 1], grid[:, :, 0]]\n\n # Set pixels at invalid locations to 0\n warped[invalid[:, :, 0] | invalid[:, :, 1]] = 0\n\n return warped\n\n\ndef horn_schunck(\n image0: np.ndarray,\n image1: np.ndarray,\n num_iter: SupportsIndex,\n alpha: float | None = None,\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This function performs the Horn-Schunck algorithm and returns the estimated\n optical flow. It is assumed that the input images are grayscale and\n normalized to be in [0, 1].\n\n Parameters:\n image0: First image of the sequence\n image1: Second image of the sequence\n alpha: Regularization constant\n num_iter: Number of iterations performed\n\n Returns: estimated horizontal & vertical flow\n\n >>> np.round(horn_schunck(np.array([[0, 0, 2], [0, 0, 2]]), \\\n np.array([[0, 2, 0], [0, 2, 0]]), alpha=0.1, num_iter=110)).\\\n astype(np.int32)\n array([[[ 0, -1, -1],\n [ 0, -1, -1]],\n <BLANKLINE>\n [[ 0, 0, 0],\n [ 0, 0, 0]]], dtype=int32)\n \"\"\"\n if alpha is None:\n alpha = 0.1\n\n # Initialize flow\n horizontal_flow = np.zeros_like(image0)\n vertical_flow = np.zeros_like(image0)\n\n # Prepare kernels for the calculation of the derivatives and the average velocity\n kernel_x = np.array([[-1, 1], [-1, 1]]) * 0.25\n kernel_y = np.array([[-1, -1], [1, 1]]) * 0.25\n kernel_t = np.array([[1, 1], [1, 1]]) * 0.25\n kernel_laplacian = np.array(\n [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]]\n )\n\n # Iteratively refine the flow\n for _ in range(num_iter):\n warped_image = warp(image0, horizontal_flow, vertical_flow)\n derivative_x = convolve(warped_image, kernel_x) + convolve(image1, kernel_x)\n derivative_y = convolve(warped_image, kernel_y) + convolve(image1, kernel_y)\n derivative_t = convolve(warped_image, kernel_t) + convolve(image1, -kernel_t)\n\n avg_horizontal_velocity = convolve(horizontal_flow, kernel_laplacian)\n avg_vertical_velocity = convolve(vertical_flow, kernel_laplacian)\n\n # This updates the flow as proposed in the paper (Step 12)\n update = (\n derivative_x * avg_horizontal_velocity\n + derivative_y * avg_vertical_velocity\n + derivative_t\n )\n update = update / (alpha**2 + derivative_x**2 + derivative_y**2)\n\n horizontal_flow = avg_horizontal_velocity - derivative_x * update\n vertical_flow = avg_vertical_velocity - derivative_y * update\n\n return horizontal_flow, vertical_flow\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "computer_vision/horn_schunck.py"}]}
| 2,242 | 101 |
gh_patches_debug_21803
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1827
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GTK animated widgets freeze until first resize
### Describe the bug
GTK Widgets that contain an "animated" component - e.g., Switch (the slide left/right animation) or an indeterminate ProgressBar (pulse animation) won't animate when the window is first drawn. However, if you resize the window, the animation will resume, including any historical state.
### Steps to reproduce
1. Run the toga_switch example.
Try to toggle the "initial state" button. The color of the switch will change, but the switch itself won't "slide".
Also worth note: The exterior border of the "change label" button is very slightly clipped.
2. Run the progressbar example
The indeterminate progress bar may not start.
Toggle the Running Mode and Indeterminate Mode buttons. With sufficient toggling, you will eventually be able to cause the running indeterminate progress bar to stop animating.
### Expected behavior
Animated widgets should consistently render.
### Screenshots
_No response_
### Environment
- Operating System: Linux (any; tested on Fedora 36 and Ubuntu 22.04)
- Python version: 3.10 (but likely any)
- Software versions:
- Toga: 0.3.0+; tested at 235ff2e, but anything after the landing of #1794 will show the problem
### Logs
```
```
### Additional context
The problem appears to be related to the GTK Container fixes introduced in #1794.
In both examples listed above, I have found that the *first* time I run examples after making a code change, it *sometimes* works. However, on second and subsequent runs, the problem reliably occurs. There may be a timing issue at play - maybe the slowdown of PYC recompilation is sufficient to mask event order problems?
A suboptimal fix: if the optimisation on L166 that only does a refresh() `if resized or self.needs_redraw` is removed (so a lot more refreshes are performed), the problem goes away
A partial fix: If `window` listens for the `window-state-event` signal, and marks the container dirty when that event occurs, the switch demo works. The switches on the progress bar demo are also fixed. However, the animation of the indeterminate progress bar *isn't* fixed (at least, not consistently)
This suggests that the issue is a signal that constitutes a "dirty" window, but isn't being treated as a dirty signal.
</issue>
<code>
[start of gtk/src/toga_gtk/container.py]
1 from .libs import Gdk, Gtk
2
3
4 class TogaContainer(Gtk.Fixed):
5 """A GTK container widget implementing Toga's layout.
6
7 This is a GTK widget, with no Toga interface manifestation.
8 """
9
10 def __init__(self):
11 super().__init__()
12 self._content = None
13 self.min_width = 100
14 self.min_height = 100
15
16 # GDK/GTK always renders at 96dpi. When HiDPI mode is enabled, it is
17 # managed at the compositor level. See
18 # https://wiki.archlinux.org/index.php/HiDPI#GDK_3_(GTK_3) for details
19 self.dpi = 96
20 self.baseline_dpi = self.dpi
21
22 # The dirty widgets are the set of widgets that are known to need
23 # re-hinting before any redraw occurs.
24 self._dirty_widgets = set()
25
26 # A flag that can be used to explicitly flag that a redraw is required.
27 self.needs_redraw = True
28
29 def make_dirty(self, widget=None):
30 """Mark the container (or a specific widget in the container) as dirty.
31
32 :param widget: If provided, this widget will be rehinted before the next layout.
33 """
34 self.needs_redraw = True
35 if widget is not None:
36 self._dirty_widgets.add(widget)
37 self.queue_resize()
38
39 @property
40 def width(self):
41 """The display width of the container.
42
43 If the container doesn't have any content yet, the width is 0.
44 """
45 if self._content is None:
46 return 0
47 return self.get_allocated_width()
48
49 @property
50 def height(self):
51 """The display height of the container.
52
53 If the container doesn't have any content yet, the height is 0.
54 """
55 if self._content is None:
56 return 0
57 return self.get_allocated_height()
58
59 @property
60 def content(self):
61 """The Toga implementation widget that is the root content of this
62 container.
63
64 All children of the root content will also be added to the container as
65 a result of assigning content.
66
67 If the container already has content, the old content will be replaced.
68 The old root content and all it's children will be removed from the
69 container.
70 """
71 return self._content
72
73 @content.setter
74 def content(self, widget):
75 if self._content:
76 self._content.container = None
77
78 self._content = widget
79 if widget:
80 widget.container = self
81
82 def recompute(self):
83 """Rehint and re-layout the container's content, if necessary.
84
85 Any widgets known to be dirty will be rehinted. The minimum
86 possible layout size for the container will also be recomputed.
87 """
88 if self._content and self.needs_redraw:
89 # If any of the widgets have been marked as dirty,
90 # recompute their bounds, and re-evaluate the minimum
91 # allowed size fo the layout.
92 while self._dirty_widgets:
93 widget = self._dirty_widgets.pop()
94 widget.rehint()
95
96 # Compute the layout using a 0-size container
97 self._content.interface.style.layout(
98 self._content.interface, TogaContainer()
99 )
100
101 # print(" computed min layout", self._content.interface.layout)
102 self.min_width = self._content.interface.layout.width
103 self.min_height = self._content.interface.layout.height
104
105 def do_get_preferred_width(self):
106 """Return (recomputing if necessary) the preferred width for the
107 container.
108
109 The preferred size of the container is it's minimum size. This
110 preference will be overridden with the layout size when the layout is
111 applied.
112
113 If the container does not yet have content, the minimum width is set to
114 0.
115 """
116 # print("GET PREFERRED WIDTH", self._content)
117 if self._content is None:
118 return 0, 0
119
120 # Ensure we have an accurate min layout size
121 self.recompute()
122
123 # The container will conform to the size of the allocation it is given,
124 # so the min and preferred size are the same.
125 return self.min_width, self.min_width
126
127 def do_get_preferred_height(self):
128 """Return (recomputing if necessary) the preferred height for the
129 container.
130
131 The preferred size of the container is it's minimum size. This
132 preference will be overridden with the layout size when the
133 layout is applied.
134
135 If the container does not yet have content, the minimum height
136 is set to 0.
137 """
138 # print("GET PREFERRED HEIGHT", self._content)
139 if self._content is None:
140 return 0, 0
141
142 # Ensure we have an accurate min layout size
143 self.recompute()
144
145 # The container will conform to the size of the allocation it is given,
146 # so the min and preferred size are the same.
147 return self.min_height, self.min_height
148
149 def do_size_allocate(self, allocation):
150 """Perform the actual layout for the widget, and all it's children.
151
152 The container will assume whatever size it has been given by GTK -
153 usually the full space of the window that holds the container.
154 The layout will then be re-computed based on this new available size,
155 and that new geometry will be applied to all child widgets of the
156 container.
157 """
158 # print(self._content, f"Container layout {allocation.width}x{allocation.height} @ {allocation.x}x{allocation.y}")
159
160 # The container will occupy the full space it has been allocated.
161 resized = (allocation.width, allocation.height) != (self.width, self.height)
162 self.set_allocation(allocation)
163
164 # This function may be called in response to irrelevant events like button clicks,
165 # so only refresh if we really need to.
166 if self._content and (resized or self.needs_redraw):
167 # Re-evaluate the layout using the allocation size as the basis for geometry
168 # print("REFRESH LAYOUT", allocation.width, allocation.height)
169 self._content.interface.refresh()
170
171 # WARNING! This is the list of children of the *container*, not
172 # the Toga widget. Toga maintains a tree of children; all nodes
173 # in that tree are direct children of the container.
174 for widget in self.get_children():
175 if not widget.get_visible():
176 # print(" not visible {widget.interface}")
177 pass
178 else:
179 # Set the size of the child widget to the computed layout size.
180 # print(f" allocate child {widget.interface}: {widget.interface.layout}")
181 widget_allocation = Gdk.Rectangle()
182 widget_allocation.x = (
183 widget.interface.layout.absolute_content_left + allocation.x
184 )
185 widget_allocation.y = (
186 widget.interface.layout.absolute_content_top + allocation.y
187 )
188 widget_allocation.width = widget.interface.layout.content_width
189 widget_allocation.height = widget.interface.layout.content_height
190
191 widget.size_allocate(widget_allocation)
192
193 # The layout has been redrawn
194 self.needs_redraw = False
195
[end of gtk/src/toga_gtk/container.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gtk/src/toga_gtk/container.py b/gtk/src/toga_gtk/container.py
--- a/gtk/src/toga_gtk/container.py
+++ b/gtk/src/toga_gtk/container.py
@@ -161,12 +161,13 @@
resized = (allocation.width, allocation.height) != (self.width, self.height)
self.set_allocation(allocation)
- # This function may be called in response to irrelevant events like button clicks,
- # so only refresh if we really need to.
- if self._content and (resized or self.needs_redraw):
- # Re-evaluate the layout using the allocation size as the basis for geometry
- # print("REFRESH LAYOUT", allocation.width, allocation.height)
- self._content.interface.refresh()
+ if self._content:
+ # This function may be called in response to irrelevant events like button clicks,
+ # so only refresh if we really need to.
+ if resized or self.needs_redraw:
+ # Re-evaluate the layout using the allocation size as the basis for geometry
+ # print("REFRESH LAYOUT", allocation.width, allocation.height)
+ self._content.interface.refresh()
# WARNING! This is the list of children of the *container*, not
# the Toga widget. Toga maintains a tree of children; all nodes
|
{"golden_diff": "diff --git a/gtk/src/toga_gtk/container.py b/gtk/src/toga_gtk/container.py\n--- a/gtk/src/toga_gtk/container.py\n+++ b/gtk/src/toga_gtk/container.py\n@@ -161,12 +161,13 @@\n resized = (allocation.width, allocation.height) != (self.width, self.height)\n self.set_allocation(allocation)\n \n- # This function may be called in response to irrelevant events like button clicks,\n- # so only refresh if we really need to.\n- if self._content and (resized or self.needs_redraw):\n- # Re-evaluate the layout using the allocation size as the basis for geometry\n- # print(\"REFRESH LAYOUT\", allocation.width, allocation.height)\n- self._content.interface.refresh()\n+ if self._content:\n+ # This function may be called in response to irrelevant events like button clicks,\n+ # so only refresh if we really need to.\n+ if resized or self.needs_redraw:\n+ # Re-evaluate the layout using the allocation size as the basis for geometry\n+ # print(\"REFRESH LAYOUT\", allocation.width, allocation.height)\n+ self._content.interface.refresh()\n \n # WARNING! This is the list of children of the *container*, not\n # the Toga widget. Toga maintains a tree of children; all nodes\n", "issue": "GTK animated widgets freeze until first resize\n### Describe the bug\n\nGTK Widgets that contain an \"animated\" component - e.g., Switch (the slide left/right animation) or an indeterminate ProgressBar (pulse animation) won't animate when the window is first drawn. However, if you resize the window, the animation will resume, including any historical state.\n\n### Steps to reproduce\n\n1. Run the toga_switch example.\r\n\r\nTry to toggle the \"initial state\" button. The color of the switch will change, but the switch itself won't \"slide\".\r\n\r\nAlso worth note: The exterior border of the \"change label\" button is very slightly clipped.\r\n\r\n2. Run the progressbar example\r\n\r\nThe indeterminate progress bar may not start. \r\n\r\nToggle the Running Mode and Indeterminate Mode buttons. With sufficient toggling, you will eventually be able to cause the running indeterminate progress bar to stop animating.\r\n\n\n### Expected behavior\n\nAnimated widgets should consistently render.\n\n### Screenshots\n\n_No response_\n\n### Environment\n\n- Operating System: Linux (any; tested on Fedora 36 and Ubuntu 22.04)\r\n- Python version: 3.10 (but likely any)\r\n- Software versions:\r\n - Toga: 0.3.0+; tested at 235ff2e, but anything after the landing of #1794 will show the problem\r\n\n\n### Logs\n\n```\r\n\r\n```\r\n\n\n### Additional context\n\nThe problem appears to be related to the GTK Container fixes introduced in #1794.\r\n\r\nIn both examples listed above, I have found that the *first* time I run examples after making a code change, it *sometimes* works. However, on second and subsequent runs, the problem reliably occurs. There may be a timing issue at play - maybe the slowdown of PYC recompilation is sufficient to mask event order problems?\r\n\r\nA suboptimal fix: if the optimisation on L166 that only does a refresh() `if resized or self.needs_redraw` is removed (so a lot more refreshes are performed), the problem goes away\r\n\r\nA partial fix: If `window` listens for the `window-state-event` signal, and marks the container dirty when that event occurs, the switch demo works. The switches on the progress bar demo are also fixed. However, the animation of the indeterminate progress bar *isn't* fixed (at least, not consistently)\r\n\r\nThis suggests that the issue is a signal that constitutes a \"dirty\" window, but isn't being treated as a dirty signal.\r\n\n", "before_files": [{"content": "from .libs import Gdk, Gtk\n\n\nclass TogaContainer(Gtk.Fixed):\n \"\"\"A GTK container widget implementing Toga's layout.\n\n This is a GTK widget, with no Toga interface manifestation.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._content = None\n self.min_width = 100\n self.min_height = 100\n\n # GDK/GTK always renders at 96dpi. When HiDPI mode is enabled, it is\n # managed at the compositor level. See\n # https://wiki.archlinux.org/index.php/HiDPI#GDK_3_(GTK_3) for details\n self.dpi = 96\n self.baseline_dpi = self.dpi\n\n # The dirty widgets are the set of widgets that are known to need\n # re-hinting before any redraw occurs.\n self._dirty_widgets = set()\n\n # A flag that can be used to explicitly flag that a redraw is required.\n self.needs_redraw = True\n\n def make_dirty(self, widget=None):\n \"\"\"Mark the container (or a specific widget in the container) as dirty.\n\n :param widget: If provided, this widget will be rehinted before the next layout.\n \"\"\"\n self.needs_redraw = True\n if widget is not None:\n self._dirty_widgets.add(widget)\n self.queue_resize()\n\n @property\n def width(self):\n \"\"\"The display width of the container.\n\n If the container doesn't have any content yet, the width is 0.\n \"\"\"\n if self._content is None:\n return 0\n return self.get_allocated_width()\n\n @property\n def height(self):\n \"\"\"The display height of the container.\n\n If the container doesn't have any content yet, the height is 0.\n \"\"\"\n if self._content is None:\n return 0\n return self.get_allocated_height()\n\n @property\n def content(self):\n \"\"\"The Toga implementation widget that is the root content of this\n container.\n\n All children of the root content will also be added to the container as\n a result of assigning content.\n\n If the container already has content, the old content will be replaced.\n The old root content and all it's children will be removed from the\n container.\n \"\"\"\n return self._content\n\n @content.setter\n def content(self, widget):\n if self._content:\n self._content.container = None\n\n self._content = widget\n if widget:\n widget.container = self\n\n def recompute(self):\n \"\"\"Rehint and re-layout the container's content, if necessary.\n\n Any widgets known to be dirty will be rehinted. The minimum\n possible layout size for the container will also be recomputed.\n \"\"\"\n if self._content and self.needs_redraw:\n # If any of the widgets have been marked as dirty,\n # recompute their bounds, and re-evaluate the minimum\n # allowed size fo the layout.\n while self._dirty_widgets:\n widget = self._dirty_widgets.pop()\n widget.rehint()\n\n # Compute the layout using a 0-size container\n self._content.interface.style.layout(\n self._content.interface, TogaContainer()\n )\n\n # print(\" computed min layout\", self._content.interface.layout)\n self.min_width = self._content.interface.layout.width\n self.min_height = self._content.interface.layout.height\n\n def do_get_preferred_width(self):\n \"\"\"Return (recomputing if necessary) the preferred width for the\n container.\n\n The preferred size of the container is it's minimum size. This\n preference will be overridden with the layout size when the layout is\n applied.\n\n If the container does not yet have content, the minimum width is set to\n 0.\n \"\"\"\n # print(\"GET PREFERRED WIDTH\", self._content)\n if self._content is None:\n return 0, 0\n\n # Ensure we have an accurate min layout size\n self.recompute()\n\n # The container will conform to the size of the allocation it is given,\n # so the min and preferred size are the same.\n return self.min_width, self.min_width\n\n def do_get_preferred_height(self):\n \"\"\"Return (recomputing if necessary) the preferred height for the\n container.\n\n The preferred size of the container is it's minimum size. This\n preference will be overridden with the layout size when the\n layout is applied.\n\n If the container does not yet have content, the minimum height\n is set to 0.\n \"\"\"\n # print(\"GET PREFERRED HEIGHT\", self._content)\n if self._content is None:\n return 0, 0\n\n # Ensure we have an accurate min layout size\n self.recompute()\n\n # The container will conform to the size of the allocation it is given,\n # so the min and preferred size are the same.\n return self.min_height, self.min_height\n\n def do_size_allocate(self, allocation):\n \"\"\"Perform the actual layout for the widget, and all it's children.\n\n The container will assume whatever size it has been given by GTK -\n usually the full space of the window that holds the container.\n The layout will then be re-computed based on this new available size,\n and that new geometry will be applied to all child widgets of the\n container.\n \"\"\"\n # print(self._content, f\"Container layout {allocation.width}x{allocation.height} @ {allocation.x}x{allocation.y}\")\n\n # The container will occupy the full space it has been allocated.\n resized = (allocation.width, allocation.height) != (self.width, self.height)\n self.set_allocation(allocation)\n\n # This function may be called in response to irrelevant events like button clicks,\n # so only refresh if we really need to.\n if self._content and (resized or self.needs_redraw):\n # Re-evaluate the layout using the allocation size as the basis for geometry\n # print(\"REFRESH LAYOUT\", allocation.width, allocation.height)\n self._content.interface.refresh()\n\n # WARNING! This is the list of children of the *container*, not\n # the Toga widget. Toga maintains a tree of children; all nodes\n # in that tree are direct children of the container.\n for widget in self.get_children():\n if not widget.get_visible():\n # print(\" not visible {widget.interface}\")\n pass\n else:\n # Set the size of the child widget to the computed layout size.\n # print(f\" allocate child {widget.interface}: {widget.interface.layout}\")\n widget_allocation = Gdk.Rectangle()\n widget_allocation.x = (\n widget.interface.layout.absolute_content_left + allocation.x\n )\n widget_allocation.y = (\n widget.interface.layout.absolute_content_top + allocation.y\n )\n widget_allocation.width = widget.interface.layout.content_width\n widget_allocation.height = widget.interface.layout.content_height\n\n widget.size_allocate(widget_allocation)\n\n # The layout has been redrawn\n self.needs_redraw = False\n", "path": "gtk/src/toga_gtk/container.py"}]}
| 3,102 | 303 |
gh_patches_debug_31649
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-5970
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JoinDocuments should use highest score when multiple retrievers recall the same document
**Is your feature request related to a problem? Please describe.**
The JoinDocuments node currently uses the document from the last retriever if their are multiple retrievers and they recall the same document. The first retriever could have the highest score and be more useful. That's why I propose to use the document with the highest score if there are duplicate documents recalled
</issue>
<code>
[start of haystack/nodes/other/join_docs.py]
1 from collections import defaultdict
2 import logging
3 from math import inf
4
5 from typing import Optional, List
6
7 from haystack.schema import Document
8 from haystack.nodes.other.join import JoinNode
9
10 logger = logging.getLogger(__name__)
11
12
13 class JoinDocuments(JoinNode):
14 """
15 A node to join documents outputted by multiple retriever nodes.
16
17 The node allows multiple join modes:
18 * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.
19 The score is only determined by the last node that outputs the document.
20 * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different
21 `weight` & a `top_k` limit can be set. This mode can also be used for "reranking" retrieved documents.
22 * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.
23 """
24
25 outgoing_edges = 1
26
27 def __init__(
28 self,
29 join_mode: str = "concatenate",
30 weights: Optional[List[float]] = None,
31 top_k_join: Optional[int] = None,
32 sort_by_score: bool = True,
33 ):
34 """
35 :param join_mode: `concatenate` to combine documents from multiple retrievers `merge` to aggregate scores of
36 individual documents, `reciprocal_rank_fusion` to apply rank based scoring.
37 :param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for
38 adjusting document scores when using the `merge` join_mode. By default, equal weight is given
39 to each retriever score. This param is not compatible with the `concatenate` join_mode.
40 :param top_k_join: Limit documents to top_k based on the resulting scores of the join.
41 :param sort_by_score: Whether to sort the incoming documents by their score. Set this to True if all your
42 Documents are coming with `score` values. Set to False if any of the Documents come
43 from sources where the `score` is set to `None`, like `TfidfRetriever` on Elasticsearch.
44 """
45 assert join_mode in [
46 "concatenate",
47 "merge",
48 "reciprocal_rank_fusion",
49 ], f"JoinDocuments node does not support '{join_mode}' join_mode."
50
51 assert not (
52 weights is not None and join_mode == "concatenate"
53 ), "Weights are not compatible with 'concatenate' join_mode."
54
55 super().__init__()
56
57 self.join_mode = join_mode
58 self.weights = [float(i) / sum(weights) for i in weights] if weights else None
59 self.top_k_join = top_k_join
60 self.sort_by_score = sort_by_score
61
62 def run_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore
63 results = [inp["documents"] for inp in inputs]
64 document_map = {doc.id: doc for result in results for doc in result}
65
66 if self.join_mode == "concatenate":
67 scores_map = self._concatenate_results(results)
68 elif self.join_mode == "merge":
69 scores_map = self._calculate_comb_sum(results)
70 elif self.join_mode == "reciprocal_rank_fusion":
71 scores_map = self._calculate_rrf(results)
72 else:
73 raise ValueError(f"Invalid join_mode: {self.join_mode}")
74
75 # only sort the docs if that was requested
76 if self.sort_by_score:
77 sorted_docs = sorted(scores_map.items(), key=lambda d: d[1] if d[1] is not None else -inf, reverse=True)
78 if any(s is None for s in scores_map.values()):
79 logger.info(
80 "The `JoinDocuments` node has received some documents with `score=None` - and was requested "
81 "to sort the documents by score, so the `score=None` documents got sorted as if their "
82 "score would be `-infinity`."
83 )
84 else:
85 sorted_docs = list(scores_map.items())
86
87 if not top_k_join:
88 top_k_join = self.top_k_join
89 if not top_k_join:
90 top_k_join = len(sorted_docs)
91
92 docs = []
93 for id, score in sorted_docs[:top_k_join]:
94 doc = document_map[id]
95 doc.score = score
96 docs.append(doc)
97
98 output = {"documents": docs, "labels": inputs[0].get("labels", None)}
99
100 return output, "output_1"
101
102 def run_batch_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore
103 # Join single document lists
104 if isinstance(inputs[0]["documents"][0], Document):
105 return self.run(inputs=inputs, top_k_join=top_k_join)
106 # Join lists of document lists
107 else:
108 output_docs = []
109 incoming_edges = [inp["documents"] for inp in inputs]
110 for idx in range(len(incoming_edges[0])):
111 cur_docs_to_join = []
112 for edge in incoming_edges:
113 cur_docs_to_join.append({"documents": edge[idx]})
114 cur, _ = self.run(inputs=cur_docs_to_join, top_k_join=top_k_join)
115 output_docs.append(cur["documents"])
116
117 output = {"documents": output_docs, "labels": inputs[0].get("labels", None)}
118
119 return output, "output_1"
120
121 def _concatenate_results(self, results):
122 """
123 Concatenates multiple document result lists.
124 """
125 return {doc.id: doc.score for result in results for doc in result}
126
127 def _calculate_comb_sum(self, results):
128 """
129 Calculates a combination sum by multiplying each score by its weight.
130 """
131 scores_map = defaultdict(int)
132 weights = self.weights if self.weights else [1 / len(results)] * len(results)
133
134 for result, weight in zip(results, weights):
135 for doc in result:
136 scores_map[doc.id] += (doc.score if doc.score else 0) * weight
137
138 return scores_map
139
140 def _calculate_rrf(self, results):
141 """
142 Calculates the reciprocal rank fusion. The constant K is set to 61 (60 was suggested by the original paper,
143 plus 1 as python lists are 0-based and the paper used 1-based ranking).
144 """
145 K = 61
146
147 scores_map = defaultdict(int)
148 for result in results:
149 for rank, doc in enumerate(result):
150 scores_map[doc.id] += 1 / (K + rank)
151
152 return scores_map
153
[end of haystack/nodes/other/join_docs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/haystack/nodes/other/join_docs.py b/haystack/nodes/other/join_docs.py
--- a/haystack/nodes/other/join_docs.py
+++ b/haystack/nodes/other/join_docs.py
@@ -1,11 +1,10 @@
-from collections import defaultdict
import logging
+from collections import defaultdict
from math import inf
+from typing import List, Optional
-from typing import Optional, List
-
-from haystack.schema import Document
from haystack.nodes.other.join import JoinNode
+from haystack.schema import Document
logger = logging.getLogger(__name__)
@@ -64,7 +63,7 @@
document_map = {doc.id: doc for result in results for doc in result}
if self.join_mode == "concatenate":
- scores_map = self._concatenate_results(results)
+ scores_map = self._concatenate_results(results, document_map)
elif self.join_mode == "merge":
scores_map = self._calculate_comb_sum(results)
elif self.join_mode == "reciprocal_rank_fusion":
@@ -118,11 +117,22 @@
return output, "output_1"
- def _concatenate_results(self, results):
+ def _concatenate_results(self, results, document_map):
"""
Concatenates multiple document result lists.
+ Return the documents with the higher score.
"""
- return {doc.id: doc.score for result in results for doc in result}
+ list_id = list(document_map.keys())
+ scores_map = {}
+ for idx in list_id:
+ tmp = []
+ for result in results:
+ for doc in result:
+ if doc.id == idx:
+ tmp.append(doc)
+ item_best_score = max(tmp, key=lambda x: x.score)
+ scores_map.update({idx: item_best_score.score})
+ return scores_map
def _calculate_comb_sum(self, results):
"""
|
{"golden_diff": "diff --git a/haystack/nodes/other/join_docs.py b/haystack/nodes/other/join_docs.py\n--- a/haystack/nodes/other/join_docs.py\n+++ b/haystack/nodes/other/join_docs.py\n@@ -1,11 +1,10 @@\n-from collections import defaultdict\n import logging\n+from collections import defaultdict\n from math import inf\n+from typing import List, Optional\n \n-from typing import Optional, List\n-\n-from haystack.schema import Document\n from haystack.nodes.other.join import JoinNode\n+from haystack.schema import Document\n \n logger = logging.getLogger(__name__)\n \n@@ -64,7 +63,7 @@\n document_map = {doc.id: doc for result in results for doc in result}\n \n if self.join_mode == \"concatenate\":\n- scores_map = self._concatenate_results(results)\n+ scores_map = self._concatenate_results(results, document_map)\n elif self.join_mode == \"merge\":\n scores_map = self._calculate_comb_sum(results)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n@@ -118,11 +117,22 @@\n \n return output, \"output_1\"\n \n- def _concatenate_results(self, results):\n+ def _concatenate_results(self, results, document_map):\n \"\"\"\n Concatenates multiple document result lists.\n+ Return the documents with the higher score.\n \"\"\"\n- return {doc.id: doc.score for result in results for doc in result}\n+ list_id = list(document_map.keys())\n+ scores_map = {}\n+ for idx in list_id:\n+ tmp = []\n+ for result in results:\n+ for doc in result:\n+ if doc.id == idx:\n+ tmp.append(doc)\n+ item_best_score = max(tmp, key=lambda x: x.score)\n+ scores_map.update({idx: item_best_score.score})\n+ return scores_map\n \n def _calculate_comb_sum(self, results):\n \"\"\"\n", "issue": "JoinDocuments should use highest score when multiple retrievers recall the same document\n**Is your feature request related to a problem? Please describe.**\r\nThe JoinDocuments node currently uses the document from the last retriever if their are multiple retrievers and they recall the same document. The first retriever could have the highest score and be more useful. That's why I propose to use the document with the highest score if there are duplicate documents recalled\r\n\n", "before_files": [{"content": "from collections import defaultdict\nimport logging\nfrom math import inf\n\nfrom typing import Optional, List\n\nfrom haystack.schema import Document\nfrom haystack.nodes.other.join import JoinNode\n\nlogger = logging.getLogger(__name__)\n\n\nclass JoinDocuments(JoinNode):\n \"\"\"\n A node to join documents outputted by multiple retriever nodes.\n\n The node allows multiple join modes:\n * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.\n The score is only determined by the last node that outputs the document.\n * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different\n `weight` & a `top_k` limit can be set. This mode can also be used for \"reranking\" retrieved documents.\n * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.\n \"\"\"\n\n outgoing_edges = 1\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k_join: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n :param join_mode: `concatenate` to combine documents from multiple retrievers `merge` to aggregate scores of\n individual documents, `reciprocal_rank_fusion` to apply rank based scoring.\n :param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for\n adjusting document scores when using the `merge` join_mode. By default, equal weight is given\n to each retriever score. This param is not compatible with the `concatenate` join_mode.\n :param top_k_join: Limit documents to top_k based on the resulting scores of the join.\n :param sort_by_score: Whether to sort the incoming documents by their score. Set this to True if all your\n Documents are coming with `score` values. Set to False if any of the Documents come\n from sources where the `score` is set to `None`, like `TfidfRetriever` on Elasticsearch.\n \"\"\"\n assert join_mode in [\n \"concatenate\",\n \"merge\",\n \"reciprocal_rank_fusion\",\n ], f\"JoinDocuments node does not support '{join_mode}' join_mode.\"\n\n assert not (\n weights is not None and join_mode == \"concatenate\"\n ), \"Weights are not compatible with 'concatenate' join_mode.\"\n\n super().__init__()\n\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k_join = top_k_join\n self.sort_by_score = sort_by_score\n\n def run_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n results = [inp[\"documents\"] for inp in inputs]\n document_map = {doc.id: doc for result in results for doc in result}\n\n if self.join_mode == \"concatenate\":\n scores_map = self._concatenate_results(results)\n elif self.join_mode == \"merge\":\n scores_map = self._calculate_comb_sum(results)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n scores_map = self._calculate_rrf(results)\n else:\n raise ValueError(f\"Invalid join_mode: {self.join_mode}\")\n\n # only sort the docs if that was requested\n if self.sort_by_score:\n sorted_docs = sorted(scores_map.items(), key=lambda d: d[1] if d[1] is not None else -inf, reverse=True)\n if any(s is None for s in scores_map.values()):\n logger.info(\n \"The `JoinDocuments` node has received some documents with `score=None` - and was requested \"\n \"to sort the documents by score, so the `score=None` documents got sorted as if their \"\n \"score would be `-infinity`.\"\n )\n else:\n sorted_docs = list(scores_map.items())\n\n if not top_k_join:\n top_k_join = self.top_k_join\n if not top_k_join:\n top_k_join = len(sorted_docs)\n\n docs = []\n for id, score in sorted_docs[:top_k_join]:\n doc = document_map[id]\n doc.score = score\n docs.append(doc)\n\n output = {\"documents\": docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def run_batch_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n # Join single document lists\n if isinstance(inputs[0][\"documents\"][0], Document):\n return self.run(inputs=inputs, top_k_join=top_k_join)\n # Join lists of document lists\n else:\n output_docs = []\n incoming_edges = [inp[\"documents\"] for inp in inputs]\n for idx in range(len(incoming_edges[0])):\n cur_docs_to_join = []\n for edge in incoming_edges:\n cur_docs_to_join.append({\"documents\": edge[idx]})\n cur, _ = self.run(inputs=cur_docs_to_join, top_k_join=top_k_join)\n output_docs.append(cur[\"documents\"])\n\n output = {\"documents\": output_docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def _concatenate_results(self, results):\n \"\"\"\n Concatenates multiple document result lists.\n \"\"\"\n return {doc.id: doc.score for result in results for doc in result}\n\n def _calculate_comb_sum(self, results):\n \"\"\"\n Calculates a combination sum by multiplying each score by its weight.\n \"\"\"\n scores_map = defaultdict(int)\n weights = self.weights if self.weights else [1 / len(results)] * len(results)\n\n for result, weight in zip(results, weights):\n for doc in result:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n\n return scores_map\n\n def _calculate_rrf(self, results):\n \"\"\"\n Calculates the reciprocal rank fusion. The constant K is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n K = 61\n\n scores_map = defaultdict(int)\n for result in results:\n for rank, doc in enumerate(result):\n scores_map[doc.id] += 1 / (K + rank)\n\n return scores_map\n", "path": "haystack/nodes/other/join_docs.py"}]}
| 2,394 | 432 |
gh_patches_debug_8938
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1448
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unhandled yaml.scanner.ScannerError when trying autoupdate with a malformed pre-commit config
In migrate_config.py we catch `yaml.YAMLError` on [lines 31-36](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/commands/migrate_config.py#L31-L36) (of which `yaml.scanner.ScannerError` is a subclass), but when the exception is raised on line 28, it is unhandled.
```console
$ pre-commit autoupdate
An unexpected error has occurred: ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
Check the log at /home/ryan/.cache/pre-commit/pre-commit.log
```
### version information
```
pre-commit version: 2.3.0
sys.version:
3.8.2 (default, Apr 8 2020, 14:31:25)
[GCC 9.3.0]
sys.executable: /home/ryan/.local/pipx/venvs/pre-commit/bin/python
os.name: posix
sys.platform: linux
```
### error information
```
An unexpected error has occurred: ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
```
```
Traceback (most recent call last):
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/error_handler.py", line 56, in error_handler
yield
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/main.py", line 354, in main
return autoupdate(
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/autoupdate.py", line 141, in autoupdate
migrate_config(config_file, quiet=True)
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py", line 49, in migrate_config
contents = _migrate_map(contents)
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py", line 28, in _migrate_map
if isinstance(yaml_load(contents), list):
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/__init__.py", line 114, in load
return loader.get_single_data()
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/constructor.py", line 49, in get_single_data
node = self.get_single_node()
File "ext/_yaml.pyx", line 707, in _yaml.CParser.get_single_node
File "ext/_yaml.pyx", line 726, in _yaml.CParser._compose_document
File "ext/_yaml.pyx", line 905, in _yaml.CParser._parse_next_event
yaml.scanner.ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
```
</issue>
<code>
[start of pre_commit/commands/migrate_config.py]
1 import re
2
3 import yaml
4
5 from pre_commit.util import yaml_load
6
7
8 def _indent(s: str) -> str:
9 lines = s.splitlines(True)
10 return ''.join(' ' * 4 + line if line.strip() else line for line in lines)
11
12
13 def _is_header_line(line: str) -> bool:
14 return line.startswith(('#', '---')) or not line.strip()
15
16
17 def _migrate_map(contents: str) -> str:
18 # Find the first non-header line
19 lines = contents.splitlines(True)
20 i = 0
21 # Only loop on non empty configuration file
22 while i < len(lines) and _is_header_line(lines[i]):
23 i += 1
24
25 header = ''.join(lines[:i])
26 rest = ''.join(lines[i:])
27
28 if isinstance(yaml_load(contents), list):
29 # If they are using the "default" flow style of yaml, this operation
30 # will yield a valid configuration
31 try:
32 trial_contents = f'{header}repos:\n{rest}'
33 yaml_load(trial_contents)
34 contents = trial_contents
35 except yaml.YAMLError:
36 contents = f'{header}repos:\n{_indent(rest)}'
37
38 return contents
39
40
41 def _migrate_sha_to_rev(contents: str) -> str:
42 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents)
43
44
45 def migrate_config(config_file: str, quiet: bool = False) -> int:
46 with open(config_file) as f:
47 orig_contents = contents = f.read()
48
49 contents = _migrate_map(contents)
50 contents = _migrate_sha_to_rev(contents)
51
52 if contents != orig_contents:
53 with open(config_file, 'w') as f:
54 f.write(contents)
55
56 print('Configuration has been migrated.')
57 elif not quiet:
58 print('Configuration is already migrated.')
59 return 0
60
[end of pre_commit/commands/migrate_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py
--- a/pre_commit/commands/migrate_config.py
+++ b/pre_commit/commands/migrate_config.py
@@ -2,6 +2,7 @@
import yaml
+from pre_commit.clientlib import load_config
from pre_commit.util import yaml_load
@@ -43,6 +44,9 @@
def migrate_config(config_file: str, quiet: bool = False) -> int:
+ # ensure that the configuration is a valid pre-commit configuration
+ load_config(config_file)
+
with open(config_file) as f:
orig_contents = contents = f.read()
|
{"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -2,6 +2,7 @@\n \n import yaml\n \n+from pre_commit.clientlib import load_config\n from pre_commit.util import yaml_load\n \n \n@@ -43,6 +44,9 @@\n \n \n def migrate_config(config_file: str, quiet: bool = False) -> int:\n+ # ensure that the configuration is a valid pre-commit configuration\n+ load_config(config_file)\n+\n with open(config_file) as f:\n orig_contents = contents = f.read()\n", "issue": "Unhandled yaml.scanner.ScannerError when trying autoupdate with a malformed pre-commit config\nIn migrate_config.py we catch `yaml.YAMLError` on [lines 31-36](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/commands/migrate_config.py#L31-L36) (of which `yaml.scanner.ScannerError` is a subclass), but when the exception is raised on line 28, it is unhandled.\r\n\r\n```console\r\n$ pre-commit autoupdate\r\nAn unexpected error has occurred: ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\nCheck the log at /home/ryan/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.3.0\r\nsys.version:\r\n 3.8.2 (default, Apr 8 2020, 14:31:25) \r\n [GCC 9.3.0]\r\nsys.executable: /home/ryan/.local/pipx/venvs/pre-commit/bin/python\r\nos.name: posix\r\nsys.platform: linux\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/error_handler.py\", line 56, in error_handler\r\n yield\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/main.py\", line 354, in main\r\n return autoupdate(\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/autoupdate.py\", line 141, in autoupdate\r\n migrate_config(config_file, quiet=True)\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py\", line 49, in migrate_config\r\n contents = _migrate_map(contents)\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py\", line 28, in _migrate_map\r\n if isinstance(yaml_load(contents), list):\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/__init__.py\", line 114, in load\r\n return loader.get_single_data()\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/constructor.py\", line 49, in get_single_data\r\n node = self.get_single_node()\r\n File \"ext/_yaml.pyx\", line 707, in _yaml.CParser.get_single_node\r\n File \"ext/_yaml.pyx\", line 726, in _yaml.CParser._compose_document\r\n File \"ext/_yaml.pyx\", line 905, in _yaml.CParser._parse_next_event\r\nyaml.scanner.ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\n```\r\n\n", "before_files": [{"content": "import re\n\nimport yaml\n\nfrom pre_commit.util import yaml_load\n\n\ndef _indent(s: str) -> str:\n lines = s.splitlines(True)\n return ''.join(' ' * 4 + line if line.strip() else line for line in lines)\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n if isinstance(yaml_load(contents), list):\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{_indent(rest)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]}
| 1,803 | 147 |
gh_patches_debug_60346
|
rasdani/github-patches
|
git_diff
|
graspologic-org__graspologic-366
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
returning test statistic in LDT
some practitioners (read: Vince, cep) only care about the test statistic and not the p-value. obviously one can still extract it if they perform the full test. however, that wastes time and resources. one can set the number of iterations to 1 to minimize that, but we can still do less. i propose to allow the number of permutations to be set to 0 (hyppo allows that, so really it is just a change in argument check). i am happy to do this, but:
this brings up the following questions: what should be happening to the fit_predict in that case? should it return the test statistic instead? or the p-value of 1? or NaN? should we be raising warnings?
and on a larger scale: should we really have this API? should fit predict return p-value, or a tuple of a p-value and a test statistic, like many other tests in python? furthremore, should it really be a class? once again, most tests in python that i have seen (scipy, statsmodels) are functions, not classes.
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3 from setuptools import setup, find_packages
4 from sys import platform
5
6 PACKAGE_NAME = "graspy"
7 DESCRIPTION = "A set of python modules for graph statistics"
8 with open("README.md", "r") as f:
9 LONG_DESCRIPTION = f.read()
10 AUTHOR = ("Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand",)
11 AUTHOR_EMAIL = "[email protected]"
12 URL = "https://github.com/neurodata/graspy"
13 MINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5
14 REQUIRED_PACKAGES = [
15 "networkx>=2.1",
16 "numpy>=1.8.1",
17 "scikit-learn>=0.19.1",
18 "scipy>=1.1.0",
19 "seaborn>=0.9.0",
20 "matplotlib>=3.0.0",
21 "hyppo>=0.1.2",
22 ]
23
24
25 # Find GraSPy version.
26 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
27 for line in open(os.path.join(PROJECT_PATH, "graspy", "__init__.py")):
28 if line.startswith("__version__ = "):
29 VERSION = line.strip().split()[2][1:-1]
30
31
32 def check_python_version():
33 """Exit when the Python version is too low."""
34 if sys.version_info < MINIMUM_PYTHON_VERSION:
35 sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
36
37
38 check_python_version()
39
40 setup(
41 name=PACKAGE_NAME,
42 version=VERSION,
43 description=DESCRIPTION,
44 long_description=LONG_DESCRIPTION,
45 long_description_content_type="text/markdown",
46 author=AUTHOR,
47 author_email=AUTHOR_EMAIL,
48 install_requires=REQUIRED_PACKAGES,
49 url=URL,
50 license="Apache License 2.0",
51 classifiers=[
52 "Development Status :: 3 - Alpha",
53 "Intended Audience :: Science/Research",
54 "Topic :: Scientific/Engineering :: Mathematics",
55 "License :: OSI Approved :: Apache Software License",
56 "Programming Language :: Python :: 3",
57 "Programming Language :: Python :: 3.6",
58 "Programming Language :: Python :: 3.7",
59 ],
60 packages=find_packages(),
61 include_package_data=True,
62 )
63
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
"scipy>=1.1.0",
"seaborn>=0.9.0",
"matplotlib>=3.0.0",
- "hyppo>=0.1.2",
+ "hyppo>=0.1.3",
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n- \"hyppo>=0.1.2\",\n+ \"hyppo>=0.1.3\",\n ]\n", "issue": "returning test statistic in LDT\nsome practitioners (read: Vince, cep) only care about the test statistic and not the p-value. obviously one can still extract it if they perform the full test. however, that wastes time and resources. one can set the number of iterations to 1 to minimize that, but we can still do less. i propose to allow the number of permutations to be set to 0 (hyppo allows that, so really it is just a change in argument check). i am happy to do this, but:\r\n\r\nthis brings up the following questions: what should be happening to the fit_predict in that case? should it return the test statistic instead? or the p-value of 1? or NaN? should we be raising warnings?\r\n\r\nand on a larger scale: should we really have this API? should fit predict return p-value, or a tuple of a p-value and a test statistic, like many other tests in python? furthremore, should it really be a class? once again, most tests in python that i have seen (scipy, statsmodels) are functions, not classes.\n", "before_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n \"hyppo>=0.1.2\",\n]\n\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py"}]}
| 1,376 | 90 |
gh_patches_debug_17961
|
rasdani/github-patches
|
git_diff
|
pytorch__tnt-101
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AverageValueMeter returns incorrect results when `tensor` is passed
Based on [this thread](https://discuss.pytorch.org/t/confusing-result-about-meter-averagevaluemeter/21819) it seems as if the internal members of the class hold references to the `tensors`, thus yielding wrong results.
When the `tensor` value is passed by `.item()` the result is correct.
A simple fix would be to add this condition to `add`:
```python
def add(self, value, n=1):
if isinstance(value, torch.Tensor):
value = value.item()
self.val = value
```
I can submit a PR, if that makes sense to you.
</issue>
<code>
[start of torchnet/meter/averagevaluemeter.py]
1 import math
2 from . import meter
3 import numpy as np
4
5
6 class AverageValueMeter(meter.Meter):
7 def __init__(self):
8 super(AverageValueMeter, self).__init__()
9 self.reset()
10 self.val = 0
11
12 def add(self, value, n=1):
13 self.val = value
14 self.sum += value
15 self.var += value * value
16 self.n += n
17
18 if self.n == 0:
19 self.mean, self.std = np.nan, np.nan
20 elif self.n == 1:
21 self.mean, self.std = self.sum, np.inf
22 self.mean_old = self.mean
23 self.m_s = 0.0
24 else:
25 self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
26 self.m_s += (value - self.mean_old) * (value - self.mean)
27 self.mean_old = self.mean
28 self.std = math.sqrt(self.m_s / (self.n - 1.0))
29
30 def value(self):
31 return self.mean, self.std
32
33 def reset(self):
34 self.n = 0
35 self.sum = 0.0
36 self.var = 0.0
37 self.val = 0.0
38 self.mean = np.nan
39 self.mean_old = 0.0
40 self.m_s = 0.0
41 self.std = np.nan
42
[end of torchnet/meter/averagevaluemeter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchnet/meter/averagevaluemeter.py b/torchnet/meter/averagevaluemeter.py
--- a/torchnet/meter/averagevaluemeter.py
+++ b/torchnet/meter/averagevaluemeter.py
@@ -18,14 +18,15 @@
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
- self.mean, self.std = self.sum, np.inf
+ self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy
+ self.std = np.inf
self.mean_old = self.mean
self.m_s = 0.0
else:
self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
self.m_s += (value - self.mean_old) * (value - self.mean)
self.mean_old = self.mean
- self.std = math.sqrt(self.m_s / (self.n - 1.0))
+ self.std = np.sqrt(self.m_s / (self.n - 1.0))
def value(self):
return self.mean, self.std
|
{"golden_diff": "diff --git a/torchnet/meter/averagevaluemeter.py b/torchnet/meter/averagevaluemeter.py\n--- a/torchnet/meter/averagevaluemeter.py\n+++ b/torchnet/meter/averagevaluemeter.py\n@@ -18,14 +18,15 @@\n if self.n == 0:\n self.mean, self.std = np.nan, np.nan\n elif self.n == 1:\n- self.mean, self.std = self.sum, np.inf\n+ self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy\n+ self.std = np.inf\n self.mean_old = self.mean\n self.m_s = 0.0\n else:\n self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)\n self.m_s += (value - self.mean_old) * (value - self.mean)\n self.mean_old = self.mean\n- self.std = math.sqrt(self.m_s / (self.n - 1.0))\n+ self.std = np.sqrt(self.m_s / (self.n - 1.0))\n \n def value(self):\n return self.mean, self.std\n", "issue": "AverageValueMeter returns incorrect results when `tensor` is passed\nBased on [this thread](https://discuss.pytorch.org/t/confusing-result-about-meter-averagevaluemeter/21819) it seems as if the internal members of the class hold references to the `tensors`, thus yielding wrong results.\r\nWhen the `tensor` value is passed by `.item()` the result is correct.\r\nA simple fix would be to add this condition to `add`:\r\n```python\r\ndef add(self, value, n=1):\r\n if isinstance(value, torch.Tensor):\r\n value = value.item()\r\n self.val = value\r\n```\r\n\r\nI can submit a PR, if that makes sense to you.\n", "before_files": [{"content": "import math\nfrom . import meter\nimport numpy as np\n\n\nclass AverageValueMeter(meter.Meter):\n def __init__(self):\n super(AverageValueMeter, self).__init__()\n self.reset()\n self.val = 0\n\n def add(self, value, n=1):\n self.val = value\n self.sum += value\n self.var += value * value\n self.n += n\n\n if self.n == 0:\n self.mean, self.std = np.nan, np.nan\n elif self.n == 1:\n self.mean, self.std = self.sum, np.inf\n self.mean_old = self.mean\n self.m_s = 0.0\n else:\n self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)\n self.m_s += (value - self.mean_old) * (value - self.mean)\n self.mean_old = self.mean\n self.std = math.sqrt(self.m_s / (self.n - 1.0))\n\n def value(self):\n return self.mean, self.std\n\n def reset(self):\n self.n = 0\n self.sum = 0.0\n self.var = 0.0\n self.val = 0.0\n self.mean = np.nan\n self.mean_old = 0.0\n self.m_s = 0.0\n self.std = np.nan\n", "path": "torchnet/meter/averagevaluemeter.py"}]}
| 1,078 | 274 |
gh_patches_debug_6669
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-1955
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hook after params attribute is missing
Hi
```python
def do_before(req, resp, resource, params):
# here params is available
def do_after(req, resp, resource, params):
# here params is not available
```
How can I access the params?
</issue>
<code>
[start of falcon/hooks.py]
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Hook decorators."""
16
17 from functools import wraps
18 from inspect import getmembers
19 from inspect import iscoroutinefunction
20 import re
21
22 from falcon.constants import COMBINED_METHODS
23 from falcon.util.misc import get_argnames
24 from falcon.util.sync import _wrap_non_coroutine_unsafe
25
26
27 _DECORABLE_METHOD_NAME = re.compile(
28 r'^on_({})(_\w+)?$'.format('|'.join(method.lower() for method in COMBINED_METHODS))
29 )
30
31
32 def before(action, *args, is_async=False, **kwargs):
33 """Execute the given action function *before* the responder.
34
35 The `params` argument that is passed to the hook
36 contains only the fields from the URI template path; it does not
37 include query string values.
38
39 Hooks may inject extra params as needed. For example::
40
41 def do_something(req, resp, resource, params):
42 try:
43 params['id'] = int(params['id'])
44 except ValueError:
45 raise falcon.HTTPBadRequest(title='Invalid ID',
46 description='ID was not valid.')
47
48 params['answer'] = 42
49
50 Args:
51 action (callable): A function of the form
52 ``func(req, resp, resource, params)``, where `resource` is a
53 reference to the resource class instance associated with the
54 request and `params` is a dict of URI template field names,
55 if any, that will be passed into the resource responder as
56 kwargs.
57
58 *args: Any additional arguments will be passed to *action* in the
59 order given, immediately following the *req*, *resp*, *resource*,
60 and *params* arguments.
61
62 Keyword Args:
63 is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
64 the decorated responder is a coroutine function (i.e., that it
65 is defined with ``async def``) or that it returns an awaitable
66 coroutine object.
67
68 Normally, when the function source is declared using ``async def``,
69 the resulting function object is flagged to indicate it returns a
70 coroutine when invoked, and this can be automatically detected.
71 However, it is possible to use a regular function to return an
72 awaitable coroutine object, in which case a hint is required to let
73 the framework know what to expect. Also, a hint is always required
74 when using a cythonized coroutine function, since Cython does not
75 flag them in a way that can be detected in advance, even when the
76 function is declared using ``async def``.
77
78 **kwargs: Any additional keyword arguments will be passed through to
79 *action*.
80 """
81
82 def _before(responder_or_resource):
83 if isinstance(responder_or_resource, type):
84 resource = responder_or_resource
85
86 for responder_name, responder in getmembers(resource, callable):
87 if _DECORABLE_METHOD_NAME.match(responder_name):
88 # This pattern is necessary to capture the current value of
89 # responder in the do_before_all closure; otherwise, they
90 # will capture the same responder variable that is shared
91 # between iterations of the for loop, above.
92 def let(responder=responder):
93 do_before_all = _wrap_with_before(
94 responder, action, args, kwargs, is_async
95 )
96
97 setattr(resource, responder_name, do_before_all)
98
99 let()
100
101 return resource
102
103 else:
104 responder = responder_or_resource
105 do_before_one = _wrap_with_before(responder, action, args, kwargs, is_async)
106
107 return do_before_one
108
109 return _before
110
111
112 def after(action, *args, is_async=False, **kwargs):
113 """Execute the given action function *after* the responder.
114
115 Args:
116 action (callable): A function of the form
117 ``func(req, resp, resource)``, where `resource` is a
118 reference to the resource class instance associated with the
119 request
120
121 *args: Any additional arguments will be passed to *action* in the
122 order given, immediately following the *req*, *resp*, *resource*,
123 and *params* arguments.
124
125 Keyword Args:
126 is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
127 the decorated responder is a coroutine function (i.e., that it
128 is defined with ``async def``) or that it returns an awaitable
129 coroutine object.
130
131 Normally, when the function source is declared using ``async def``,
132 the resulting function object is flagged to indicate it returns a
133 coroutine when invoked, and this can be automatically detected.
134 However, it is possible to use a regular function to return an
135 awaitable coroutine object, in which case a hint is required to let
136 the framework know what to expect. Also, a hint is always required
137 when using a cythonized coroutine function, since Cython does not
138 flag them in a way that can be detected in advance, even when the
139 function is declared using ``async def``.
140
141 **kwargs: Any additional keyword arguments will be passed through to
142 *action*.
143 """
144
145 def _after(responder_or_resource):
146 if isinstance(responder_or_resource, type):
147 resource = responder_or_resource
148
149 for responder_name, responder in getmembers(resource, callable):
150 if _DECORABLE_METHOD_NAME.match(responder_name):
151
152 def let(responder=responder):
153 do_after_all = _wrap_with_after(
154 responder, action, args, kwargs, is_async
155 )
156
157 setattr(resource, responder_name, do_after_all)
158
159 let()
160
161 return resource
162
163 else:
164 responder = responder_or_resource
165 do_after_one = _wrap_with_after(responder, action, args, kwargs, is_async)
166
167 return do_after_one
168
169 return _after
170
171
172 # -----------------------------------------------------------------------------
173 # Helpers
174 # -----------------------------------------------------------------------------
175
176
177 def _wrap_with_after(responder, action, action_args, action_kwargs, is_async):
178 """Execute the given action function after a responder method.
179
180 Args:
181 responder: The responder method to wrap.
182 action: A function with a signature similar to a resource responder
183 method, taking the form ``func(req, resp, resource)``.
184 action_args: Additional positional agruments to pass to *action*.
185 action_kwargs: Additional keyword arguments to pass to *action*.
186 is_async: Set to ``True`` for cythonized responders that are
187 actually coroutine functions, since such responders can not
188 be auto-detected. A hint is also required for regular functions
189 that happen to return an awaitable coroutine object.
190 """
191
192 responder_argnames = get_argnames(responder)
193 extra_argnames = responder_argnames[2:] # Skip req, resp
194
195 if is_async or iscoroutinefunction(responder):
196 # NOTE(kgriffs): I manually verified that the implicit "else" branch
197 # is actually covered, but coverage isn't tracking it for
198 # some reason.
199 if not is_async: # pragma: nocover
200 action = _wrap_non_coroutine_unsafe(action)
201
202 @wraps(responder)
203 async def do_after(self, req, resp, *args, **kwargs):
204 if args:
205 _merge_responder_args(args, kwargs, extra_argnames)
206
207 await responder(self, req, resp, **kwargs)
208 await action(req, resp, self, *action_args, **action_kwargs)
209
210 else:
211
212 @wraps(responder)
213 def do_after(self, req, resp, *args, **kwargs):
214 if args:
215 _merge_responder_args(args, kwargs, extra_argnames)
216
217 responder(self, req, resp, **kwargs)
218 action(req, resp, self, *action_args, **action_kwargs)
219
220 return do_after
221
222
223 def _wrap_with_before(responder, action, action_args, action_kwargs, is_async):
224 """Execute the given action function before a responder method.
225
226 Args:
227 responder: The responder method to wrap.
228 action: A function with a similar signature to a resource responder
229 method, taking the form ``func(req, resp, resource, params)``.
230 action_args: Additional positional agruments to pass to *action*.
231 action_kwargs: Additional keyword arguments to pass to *action*.
232 is_async: Set to ``True`` for cythonized responders that are
233 actually coroutine functions, since such responders can not
234 be auto-detected. A hint is also required for regular functions
235 that happen to return an awaitable coroutine object.
236 """
237
238 responder_argnames = get_argnames(responder)
239 extra_argnames = responder_argnames[2:] # Skip req, resp
240
241 if is_async or iscoroutinefunction(responder):
242 # NOTE(kgriffs): I manually verified that the implicit "else" branch
243 # is actually covered, but coverage isn't tracking it for
244 # some reason.
245 if not is_async: # pragma: nocover
246 action = _wrap_non_coroutine_unsafe(action)
247
248 @wraps(responder)
249 async def do_before(self, req, resp, *args, **kwargs):
250 if args:
251 _merge_responder_args(args, kwargs, extra_argnames)
252
253 await action(req, resp, self, kwargs, *action_args, **action_kwargs)
254 await responder(self, req, resp, **kwargs)
255
256 else:
257
258 @wraps(responder)
259 def do_before(self, req, resp, *args, **kwargs):
260 if args:
261 _merge_responder_args(args, kwargs, extra_argnames)
262
263 action(req, resp, self, kwargs, *action_args, **action_kwargs)
264 responder(self, req, resp, **kwargs)
265
266 return do_before
267
268
269 def _merge_responder_args(args, kwargs, argnames):
270 """Merge responder args into kwargs.
271
272 The framework always passes extra args as keyword arguments.
273 However, when the app calls the responder directly, it might use
274 positional arguments instead, so we need to handle that case. This
275 might happen, for example, when overriding a resource and calling
276 a responder via super().
277
278 Args:
279 args (tuple): Extra args passed into the responder
280 kwargs (dict): Keyword args passed into the responder
281 argnames (list): Extra argnames from the responder's
282 signature, ordered as defined
283 """
284
285 # NOTE(kgriffs): Merge positional args into kwargs by matching
286 # them up to the responder's signature. To do that, we must
287 # find out the names of the positional arguments by matching
288 # them in the order of the arguments named in the responder's
289 # signature.
290 for i, argname in enumerate(argnames):
291 # NOTE(kgriffs): extra_argnames may contain keyword arguments,
292 # which wont be in the args list, and are already in the kwargs
293 # dict anyway, so detect and skip them.
294 if argname not in kwargs:
295 kwargs[argname] = args[i]
296
[end of falcon/hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/falcon/hooks.py b/falcon/hooks.py
--- a/falcon/hooks.py
+++ b/falcon/hooks.py
@@ -119,8 +119,8 @@
request
*args: Any additional arguments will be passed to *action* in the
- order given, immediately following the *req*, *resp*, *resource*,
- and *params* arguments.
+ order given, immediately following the *req*, *resp* and *resource*
+ arguments.
Keyword Args:
is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
|
{"golden_diff": "diff --git a/falcon/hooks.py b/falcon/hooks.py\n--- a/falcon/hooks.py\n+++ b/falcon/hooks.py\n@@ -119,8 +119,8 @@\n request\n \n *args: Any additional arguments will be passed to *action* in the\n- order given, immediately following the *req*, *resp*, *resource*,\n- and *params* arguments.\n+ order given, immediately following the *req*, *resp* and *resource*\n+ arguments.\n \n Keyword Args:\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n", "issue": "Hook after params attribute is missing \nHi\r\n\r\n```python\r\ndef do_before(req, resp, resource, params):\r\n\t# here params is available\r\n\r\ndef do_after(req, resp, resource, params):\r\n\t# here params is not available\r\n\r\n```\r\n\r\nHow can I access the params?\r\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hook decorators.\"\"\"\n\nfrom functools import wraps\nfrom inspect import getmembers\nfrom inspect import iscoroutinefunction\nimport re\n\nfrom falcon.constants import COMBINED_METHODS\nfrom falcon.util.misc import get_argnames\nfrom falcon.util.sync import _wrap_non_coroutine_unsafe\n\n\n_DECORABLE_METHOD_NAME = re.compile(\n r'^on_({})(_\\w+)?$'.format('|'.join(method.lower() for method in COMBINED_METHODS))\n)\n\n\ndef before(action, *args, is_async=False, **kwargs):\n \"\"\"Execute the given action function *before* the responder.\n\n The `params` argument that is passed to the hook\n contains only the fields from the URI template path; it does not\n include query string values.\n\n Hooks may inject extra params as needed. For example::\n\n def do_something(req, resp, resource, params):\n try:\n params['id'] = int(params['id'])\n except ValueError:\n raise falcon.HTTPBadRequest(title='Invalid ID',\n description='ID was not valid.')\n\n params['answer'] = 42\n\n Args:\n action (callable): A function of the form\n ``func(req, resp, resource, params)``, where `resource` is a\n reference to the resource class instance associated with the\n request and `params` is a dict of URI template field names,\n if any, that will be passed into the resource responder as\n kwargs.\n\n *args: Any additional arguments will be passed to *action* in the\n order given, immediately following the *req*, *resp*, *resource*,\n and *params* arguments.\n\n Keyword Args:\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n **kwargs: Any additional keyword arguments will be passed through to\n *action*.\n \"\"\"\n\n def _before(responder_or_resource):\n if isinstance(responder_or_resource, type):\n resource = responder_or_resource\n\n for responder_name, responder in getmembers(resource, callable):\n if _DECORABLE_METHOD_NAME.match(responder_name):\n # This pattern is necessary to capture the current value of\n # responder in the do_before_all closure; otherwise, they\n # will capture the same responder variable that is shared\n # between iterations of the for loop, above.\n def let(responder=responder):\n do_before_all = _wrap_with_before(\n responder, action, args, kwargs, is_async\n )\n\n setattr(resource, responder_name, do_before_all)\n\n let()\n\n return resource\n\n else:\n responder = responder_or_resource\n do_before_one = _wrap_with_before(responder, action, args, kwargs, is_async)\n\n return do_before_one\n\n return _before\n\n\ndef after(action, *args, is_async=False, **kwargs):\n \"\"\"Execute the given action function *after* the responder.\n\n Args:\n action (callable): A function of the form\n ``func(req, resp, resource)``, where `resource` is a\n reference to the resource class instance associated with the\n request\n\n *args: Any additional arguments will be passed to *action* in the\n order given, immediately following the *req*, *resp*, *resource*,\n and *params* arguments.\n\n Keyword Args:\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n **kwargs: Any additional keyword arguments will be passed through to\n *action*.\n \"\"\"\n\n def _after(responder_or_resource):\n if isinstance(responder_or_resource, type):\n resource = responder_or_resource\n\n for responder_name, responder in getmembers(resource, callable):\n if _DECORABLE_METHOD_NAME.match(responder_name):\n\n def let(responder=responder):\n do_after_all = _wrap_with_after(\n responder, action, args, kwargs, is_async\n )\n\n setattr(resource, responder_name, do_after_all)\n\n let()\n\n return resource\n\n else:\n responder = responder_or_resource\n do_after_one = _wrap_with_after(responder, action, args, kwargs, is_async)\n\n return do_after_one\n\n return _after\n\n\n# -----------------------------------------------------------------------------\n# Helpers\n# -----------------------------------------------------------------------------\n\n\ndef _wrap_with_after(responder, action, action_args, action_kwargs, is_async):\n \"\"\"Execute the given action function after a responder method.\n\n Args:\n responder: The responder method to wrap.\n action: A function with a signature similar to a resource responder\n method, taking the form ``func(req, resp, resource)``.\n action_args: Additional positional agruments to pass to *action*.\n action_kwargs: Additional keyword arguments to pass to *action*.\n is_async: Set to ``True`` for cythonized responders that are\n actually coroutine functions, since such responders can not\n be auto-detected. A hint is also required for regular functions\n that happen to return an awaitable coroutine object.\n \"\"\"\n\n responder_argnames = get_argnames(responder)\n extra_argnames = responder_argnames[2:] # Skip req, resp\n\n if is_async or iscoroutinefunction(responder):\n # NOTE(kgriffs): I manually verified that the implicit \"else\" branch\n # is actually covered, but coverage isn't tracking it for\n # some reason.\n if not is_async: # pragma: nocover\n action = _wrap_non_coroutine_unsafe(action)\n\n @wraps(responder)\n async def do_after(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n await responder(self, req, resp, **kwargs)\n await action(req, resp, self, *action_args, **action_kwargs)\n\n else:\n\n @wraps(responder)\n def do_after(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n responder(self, req, resp, **kwargs)\n action(req, resp, self, *action_args, **action_kwargs)\n\n return do_after\n\n\ndef _wrap_with_before(responder, action, action_args, action_kwargs, is_async):\n \"\"\"Execute the given action function before a responder method.\n\n Args:\n responder: The responder method to wrap.\n action: A function with a similar signature to a resource responder\n method, taking the form ``func(req, resp, resource, params)``.\n action_args: Additional positional agruments to pass to *action*.\n action_kwargs: Additional keyword arguments to pass to *action*.\n is_async: Set to ``True`` for cythonized responders that are\n actually coroutine functions, since such responders can not\n be auto-detected. A hint is also required for regular functions\n that happen to return an awaitable coroutine object.\n \"\"\"\n\n responder_argnames = get_argnames(responder)\n extra_argnames = responder_argnames[2:] # Skip req, resp\n\n if is_async or iscoroutinefunction(responder):\n # NOTE(kgriffs): I manually verified that the implicit \"else\" branch\n # is actually covered, but coverage isn't tracking it for\n # some reason.\n if not is_async: # pragma: nocover\n action = _wrap_non_coroutine_unsafe(action)\n\n @wraps(responder)\n async def do_before(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n await action(req, resp, self, kwargs, *action_args, **action_kwargs)\n await responder(self, req, resp, **kwargs)\n\n else:\n\n @wraps(responder)\n def do_before(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n action(req, resp, self, kwargs, *action_args, **action_kwargs)\n responder(self, req, resp, **kwargs)\n\n return do_before\n\n\ndef _merge_responder_args(args, kwargs, argnames):\n \"\"\"Merge responder args into kwargs.\n\n The framework always passes extra args as keyword arguments.\n However, when the app calls the responder directly, it might use\n positional arguments instead, so we need to handle that case. This\n might happen, for example, when overriding a resource and calling\n a responder via super().\n\n Args:\n args (tuple): Extra args passed into the responder\n kwargs (dict): Keyword args passed into the responder\n argnames (list): Extra argnames from the responder's\n signature, ordered as defined\n \"\"\"\n\n # NOTE(kgriffs): Merge positional args into kwargs by matching\n # them up to the responder's signature. To do that, we must\n # find out the names of the positional arguments by matching\n # them in the order of the arguments named in the responder's\n # signature.\n for i, argname in enumerate(argnames):\n # NOTE(kgriffs): extra_argnames may contain keyword arguments,\n # which wont be in the args list, and are already in the kwargs\n # dict anyway, so detect and skip them.\n if argname not in kwargs:\n kwargs[argname] = args[i]\n", "path": "falcon/hooks.py"}]}
| 3,902 | 136 |
gh_patches_debug_5898
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-4414
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnboundLocalError: local variable 'distutils' referenced before assignment
This error happens building from source in Linux. Introduced in https://github.com/pytorch/pytorch/pull/3993
</issue>
<code>
[start of tools/setup_helpers/ninja_builder.py]
1 import re
2 import os
3 import sys
4 import setuptools
5 import distutils
6 from contextlib import contextmanager
7 import subprocess
8
9 BUILD_DIR = 'build'
10
11
12 # on the fly create a ninja file in build/ and then
13 # run it when run() is called.
14 class NinjaBuilder(object):
15 def __init__(self, name):
16 import ninja
17 if not os.path.exists(BUILD_DIR):
18 os.mkdir(BUILD_DIR)
19 self.ninja_program = os.path.join(ninja.BIN_DIR, 'ninja')
20 self.name = name
21 self.filename = os.path.join(BUILD_DIR, 'build.{}.ninja'.format(name))
22 self.writer = ninja.Writer(open(self.filename, 'w'))
23 self.writer.rule('do_cmd', '$cmd')
24 self.writer.rule('compile', '$cmd')
25 self.compdb_targets = []
26
27 def run(self):
28 import ninja
29 self.writer.close()
30 try:
31 subprocess.check_call([self.ninja_program, '-f', self.filename])
32 except subprocess.CalledProcessError as err:
33 # avoid printing the setup.py stack trace because it obscures the
34 # C++ errors.
35 sys.stderr.write(str(err) + '\n')
36 sys.exit(1)
37 compile_db_path = os.path.join(BUILD_DIR, '{}_compile_commands.json'.format(self.name))
38 with open(compile_db_path, 'w') as compile_db:
39 subprocess.check_call([self.ninja_program, '-f', self.filename,
40 '-t', 'compdb', 'compile'], stdout=compile_db)
41
42 # weird build logic in build develop causes some things to be run
43 # twice so make sure even after we run the command we still
44 # reset this to a valid state
45 # don't use the same name or you can't inspect the real ninja files
46 self.__init__(self.name + "_")
47
48
49 class ninja_build_ext(setuptools.command.build_ext.build_ext):
50 def _build_default(self, ext):
51 return setuptools.command.build_ext.build_ext.build_extension(self, ext)
52
53 def build_extension(self, ext):
54 builder = NinjaBuilder(ext.name)
55
56 @contextmanager
57 def patch(obj, attr_name, val):
58 orig_val = getattr(obj, attr_name)
59 setattr(obj, attr_name, val)
60 try:
61 yield
62 finally:
63 setattr(obj, attr_name, orig_val)
64
65 if self.compiler.compiler_type == 'msvc':
66 import distutils.msvccompiler
67 import distutils.msvc9compiler
68 if sys.version[0] == 2:
69 orig_compiler = distutils.msvc9compiler.MSVCCompiler
70 else:
71 orig_compiler = distutils._msvccompiler.MSVCCompiler
72 orig_compile = orig_compiler.compile
73 orig_link = orig_compiler.link
74 orig_spawn = orig_compiler.spawn
75 else:
76 orig_compiler = distutils.unixccompiler.UnixCCompiler
77 orig_compile = orig_compiler._compile
78 orig_link = orig_compiler.link
79
80 def win_compile(self, sources,
81 output_dir=None, macros=None, include_dirs=None, debug=0,
82 extra_preargs=None, extra_postargs=None, depends=None):
83
84 def spawn(cmd):
85 # Using regex to match src and obj
86
87 src_regex = re.compile('/T(p|c)(.*)')
88 src_list = [m.group(2) for m in (
89 src_regex.match(elem) for elem in cmd) if m]
90
91 obj_regex = re.compile('/Fo(.*)')
92 obj_list = [m.group(1) for m in (
93 obj_regex.match(elem) for elem in cmd) if m]
94
95 if len(src_list) >= 1 and len(obj_list) >= 1:
96 src = src_list[0]
97 obj = obj_list[0]
98 else:
99 # Cannot find src or obj, revert back to original style
100 return orig_spawn(cmd)
101
102 builder.writer.build(
103 [obj], 'compile', [src],
104 variables={
105 'cmd': cmd,
106 'deps': 'msvc'
107 })
108
109 with patch(self, 'spawn', spawn):
110 orig_compile(self, sources,
111 output_dir, macros, include_dirs, debug,
112 extra_preargs, extra_postargs, depends)
113
114 def unix_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
115 depfile = os.path.splitext(obj)[0] + '.d'
116
117 def spawn(cmd):
118 builder.writer.build(
119 [obj], 'compile', [src],
120 variables={
121 'cmd': cmd,
122 'depfile': depfile,
123 'deps': 'gcc'
124 })
125
126 extra_postargs = extra_postargs + ['-MMD', '-MF', depfile]
127 with patch(self, 'spawn', spawn):
128 orig_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts)
129
130 def link(self, target_desc, objects,
131 output_filename, output_dir=None, libraries=None,
132 library_dirs=None, runtime_library_dirs=None,
133 export_symbols=None, debug=0, extra_preargs=None,
134 extra_postargs=None, build_temp=None, target_lang=None):
135
136 builder.run()
137 orig_link(self, target_desc, objects,
138 output_filename, output_dir, libraries,
139 library_dirs, runtime_library_dirs,
140 export_symbols, debug, extra_preargs,
141 extra_postargs, build_temp, target_lang)
142
143 if self.compiler.compiler_type == 'msvc':
144 _compile_func = win_compile
145 _compile_func_name = 'compile'
146 else:
147 _compile_func = unix_compile
148 _compile_func_name = '_compile'
149
150 with patch(orig_compiler, _compile_func_name, _compile_func):
151 with patch(orig_compiler, 'link', link):
152 with patch(self, 'force', True):
153 self._build_default(ext)
154
[end of tools/setup_helpers/ninja_builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/setup_helpers/ninja_builder.py b/tools/setup_helpers/ninja_builder.py
--- a/tools/setup_helpers/ninja_builder.py
+++ b/tools/setup_helpers/ninja_builder.py
@@ -73,6 +73,7 @@
orig_link = orig_compiler.link
orig_spawn = orig_compiler.spawn
else:
+ import distutils.unixccompiler
orig_compiler = distutils.unixccompiler.UnixCCompiler
orig_compile = orig_compiler._compile
orig_link = orig_compiler.link
|
{"golden_diff": "diff --git a/tools/setup_helpers/ninja_builder.py b/tools/setup_helpers/ninja_builder.py\n--- a/tools/setup_helpers/ninja_builder.py\n+++ b/tools/setup_helpers/ninja_builder.py\n@@ -73,6 +73,7 @@\n orig_link = orig_compiler.link\n orig_spawn = orig_compiler.spawn\n else:\n+ import distutils.unixccompiler\n orig_compiler = distutils.unixccompiler.UnixCCompiler\n orig_compile = orig_compiler._compile\n orig_link = orig_compiler.link\n", "issue": "UnboundLocalError: local variable 'distutils' referenced before assignment\nThis error happens building from source in Linux. Introduced in https://github.com/pytorch/pytorch/pull/3993\n", "before_files": [{"content": "import re\nimport os\nimport sys\nimport setuptools\nimport distutils\nfrom contextlib import contextmanager\nimport subprocess\n\nBUILD_DIR = 'build'\n\n\n# on the fly create a ninja file in build/ and then\n# run it when run() is called.\nclass NinjaBuilder(object):\n def __init__(self, name):\n import ninja\n if not os.path.exists(BUILD_DIR):\n os.mkdir(BUILD_DIR)\n self.ninja_program = os.path.join(ninja.BIN_DIR, 'ninja')\n self.name = name\n self.filename = os.path.join(BUILD_DIR, 'build.{}.ninja'.format(name))\n self.writer = ninja.Writer(open(self.filename, 'w'))\n self.writer.rule('do_cmd', '$cmd')\n self.writer.rule('compile', '$cmd')\n self.compdb_targets = []\n\n def run(self):\n import ninja\n self.writer.close()\n try:\n subprocess.check_call([self.ninja_program, '-f', self.filename])\n except subprocess.CalledProcessError as err:\n # avoid printing the setup.py stack trace because it obscures the\n # C++ errors.\n sys.stderr.write(str(err) + '\\n')\n sys.exit(1)\n compile_db_path = os.path.join(BUILD_DIR, '{}_compile_commands.json'.format(self.name))\n with open(compile_db_path, 'w') as compile_db:\n subprocess.check_call([self.ninja_program, '-f', self.filename,\n '-t', 'compdb', 'compile'], stdout=compile_db)\n\n # weird build logic in build develop causes some things to be run\n # twice so make sure even after we run the command we still\n # reset this to a valid state\n # don't use the same name or you can't inspect the real ninja files\n self.__init__(self.name + \"_\")\n\n\nclass ninja_build_ext(setuptools.command.build_ext.build_ext):\n def _build_default(self, ext):\n return setuptools.command.build_ext.build_ext.build_extension(self, ext)\n\n def build_extension(self, ext):\n builder = NinjaBuilder(ext.name)\n\n @contextmanager\n def patch(obj, attr_name, val):\n orig_val = getattr(obj, attr_name)\n setattr(obj, attr_name, val)\n try:\n yield\n finally:\n setattr(obj, attr_name, orig_val)\n\n if self.compiler.compiler_type == 'msvc':\n import distutils.msvccompiler\n import distutils.msvc9compiler\n if sys.version[0] == 2:\n orig_compiler = distutils.msvc9compiler.MSVCCompiler\n else:\n orig_compiler = distutils._msvccompiler.MSVCCompiler\n orig_compile = orig_compiler.compile\n orig_link = orig_compiler.link\n orig_spawn = orig_compiler.spawn\n else:\n orig_compiler = distutils.unixccompiler.UnixCCompiler\n orig_compile = orig_compiler._compile\n orig_link = orig_compiler.link\n\n def win_compile(self, sources,\n output_dir=None, macros=None, include_dirs=None, debug=0,\n extra_preargs=None, extra_postargs=None, depends=None):\n\n def spawn(cmd):\n # Using regex to match src and obj\n\n src_regex = re.compile('/T(p|c)(.*)')\n src_list = [m.group(2) for m in (\n src_regex.match(elem) for elem in cmd) if m]\n\n obj_regex = re.compile('/Fo(.*)')\n obj_list = [m.group(1) for m in (\n obj_regex.match(elem) for elem in cmd) if m]\n\n if len(src_list) >= 1 and len(obj_list) >= 1:\n src = src_list[0]\n obj = obj_list[0]\n else:\n # Cannot find src or obj, revert back to original style\n return orig_spawn(cmd)\n\n builder.writer.build(\n [obj], 'compile', [src],\n variables={\n 'cmd': cmd,\n 'deps': 'msvc'\n })\n\n with patch(self, 'spawn', spawn):\n orig_compile(self, sources,\n output_dir, macros, include_dirs, debug,\n extra_preargs, extra_postargs, depends)\n\n def unix_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\n depfile = os.path.splitext(obj)[0] + '.d'\n\n def spawn(cmd):\n builder.writer.build(\n [obj], 'compile', [src],\n variables={\n 'cmd': cmd,\n 'depfile': depfile,\n 'deps': 'gcc'\n })\n\n extra_postargs = extra_postargs + ['-MMD', '-MF', depfile]\n with patch(self, 'spawn', spawn):\n orig_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n def link(self, target_desc, objects,\n output_filename, output_dir=None, libraries=None,\n library_dirs=None, runtime_library_dirs=None,\n export_symbols=None, debug=0, extra_preargs=None,\n extra_postargs=None, build_temp=None, target_lang=None):\n\n builder.run()\n orig_link(self, target_desc, objects,\n output_filename, output_dir, libraries,\n library_dirs, runtime_library_dirs,\n export_symbols, debug, extra_preargs,\n extra_postargs, build_temp, target_lang)\n\n if self.compiler.compiler_type == 'msvc':\n _compile_func = win_compile\n _compile_func_name = 'compile'\n else:\n _compile_func = unix_compile\n _compile_func_name = '_compile'\n\n with patch(orig_compiler, _compile_func_name, _compile_func):\n with patch(orig_compiler, 'link', link):\n with patch(self, 'force', True):\n self._build_default(ext)\n", "path": "tools/setup_helpers/ninja_builder.py"}]}
| 2,189 | 113 |
gh_patches_debug_32725
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-5393
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Job Buttons do not honor the Job task_queue
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): 1.6.11
* Python version: 3.11
* Database platform, version: NA
* Middleware(s): NA
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create a JobButtonReciever that uses a queue other than default
2. Create a Job Button that points to the JobButtonReciever and connect it to a contenttype (like dcim.devices)
3. Click the Job Button on the contenttype from above.
<!-- What did you expect to happen? -->
### Expected Behavior
The Job runs under the queue that is configured for JobButtonReceiver.
<!-- What happened instead? -->
### Observed Behavior
The Job runs under the default queue (which in my case causes an error, but may not in other use cases).
</issue>
<code>
[start of nautobot/extras/templatetags/job_buttons.py]
1 from collections import OrderedDict
2
3 from django import template
4 from django.contrib.contenttypes.models import ContentType
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.utils.safestring import mark_safe
8
9 from nautobot.extras.models import Job, JobButton
10 from nautobot.utilities.utils import render_jinja2
11
12
13 register = template.Library()
14
15 GROUP_DROPDOWN = """
16 <div class="btn-group">
17 <button type="button" class="btn btn-sm btn-{group_button_class} dropdown-toggle" data-toggle="dropdown">
18 {group_name} <span class="caret"></span>
19 </button>
20 <ul class="dropdown-menu pull-right">
21 {grouped_buttons}
22 </ul>
23 </div>
24 """
25
26 HIDDEN_INPUTS = """
27 <input type="hidden" name="csrfmiddlewaretoken" value="{csrf_token}">
28 <input type="hidden" name="object_pk" value="{object_pk}">
29 <input type="hidden" name="object_model_name" value="{object_model_name}">
30 <input type="hidden" name="_schedule_type" value="immediately">
31 <input type="hidden" name="_return_url" value="{redirect_path}">
32 <input type="hidden" name="_commit" value="on">
33 """
34
35 NO_CONFIRM_BUTTON = """
36 <button type="submit" form="form_id_{button_id}" class="btn btn-sm btn-{button_class}" {disabled}>{button_text}</button>
37 """
38
39 NO_CONFIRM_FORM = """
40 <form id="form_id_{button_id}" action="{button_url}" method="post" class="form">
41 {hidden_inputs}
42 </form>
43 """
44
45 CONFIRM_BUTTON = """
46 <button type="button" class="btn btn-sm btn-{button_class}" data-toggle="modal" data-target="#confirm_modal_id_{button_id}" {disabled}>
47 {button_text}
48 </button>
49 """
50
51 CONFIRM_MODAL = """
52 <div class="modal fade" id="confirm_modal_id_{button_id}" tabindex="-1" role="dialog" aria-labelledby="confirm_modal_label_{button_id}">
53 <div class="modal-dialog" role="document">
54 <div class="modal-content">
55 <div class="modal-header">
56 <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
57 <h4 class="modal-title" id="confirm_modal_label_{button_id}">Confirmation</h4>
58 </div>
59 <form id="form_id_{button_id}" action="{button_url}" method="post" class="form">
60 <div class="modal-body">
61 {hidden_inputs}
62 Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?
63 </div>
64 <div class="modal-footer">
65 <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
66 <button type="submit" class="btn btn-primary">Confirm</button>
67 </div>
68 </form>
69 </div>
70 </div>
71 </div>
72 """
73
74 SAFE_EMPTY_STR = mark_safe("") # noqa: S308
75
76
77 def _render_job_button_for_obj(job_button, obj, context, content_type):
78 """
79 Helper method for job_buttons templatetag to reduce repetition of code.
80
81 Returns:
82 (str, str): (button_html, form_html)
83 """
84 # Pass select context data when rendering the JobButton text as Jinja2
85 button_context = {
86 "obj": obj,
87 "debug": context.get("debug", False), # django.template.context_processors.debug
88 "request": context["request"], # django.template.context_processors.request
89 "user": context["user"], # django.contrib.auth.context_processors.auth
90 "perms": context["perms"], # django.contrib.auth.context_processors.auth
91 }
92 try:
93 text_rendered = render_jinja2(job_button.text, button_context)
94 except Exception as exc:
95 return (
96 format_html(
97 '<a class="btn btn-sm btn-{}" disabled="disabled" title="{}"><i class="mdi mdi-alert"></i> {}</a>\n',
98 "default" if not job_button.group_name else "link",
99 exc,
100 job_button.name,
101 ),
102 SAFE_EMPTY_STR,
103 )
104
105 if not text_rendered:
106 return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)
107
108 # Disable buttons if the user doesn't have permission to run the underlying Job.
109 has_run_perm = Job.objects.check_perms(context["user"], instance=job_button.job, action="run")
110 hidden_inputs = format_html(
111 HIDDEN_INPUTS,
112 csrf_token=context["csrf_token"],
113 object_pk=obj.pk,
114 object_model_name=f"{content_type.app_label}.{content_type.model}",
115 redirect_path=context["request"].path,
116 )
117 template_args = {
118 "button_id": job_button.pk,
119 "button_text": text_rendered,
120 "button_class": job_button.button_class if not job_button.group_name else "link",
121 "button_url": reverse("extras:job_run", kwargs={"slug": job_button.job.slug}),
122 "object": obj,
123 "job": job_button.job,
124 "hidden_inputs": hidden_inputs,
125 "disabled": "" if has_run_perm else "disabled",
126 }
127
128 if job_button.confirmation:
129 return (
130 format_html(CONFIRM_BUTTON, **template_args),
131 format_html(CONFIRM_MODAL, **template_args),
132 )
133 else:
134 return (
135 format_html(NO_CONFIRM_BUTTON, **template_args),
136 format_html(NO_CONFIRM_FORM, **template_args),
137 )
138
139
140 @register.simple_tag(takes_context=True)
141 def job_buttons(context, obj):
142 """
143 Render all applicable job buttons for the given object.
144 """
145 content_type = ContentType.objects.get_for_model(obj)
146 # We will enforce "run" permission later in deciding which buttons to show as disabled.
147 buttons = JobButton.objects.filter(content_types=content_type)
148 if not buttons:
149 return SAFE_EMPTY_STR
150
151 buttons_html = forms_html = SAFE_EMPTY_STR
152 group_names = OrderedDict()
153
154 for jb in buttons:
155 # Organize job buttons by group for later processing
156 if jb.group_name:
157 group_names.setdefault(jb.group_name, []).append(jb)
158
159 # Render and add non-grouped buttons
160 else:
161 button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
162 buttons_html += button_html
163 forms_html += form_html
164
165 # Add grouped buttons to template
166 for group_name, buttons in group_names.items():
167 group_button_class = buttons[0].button_class
168
169 buttons_rendered = SAFE_EMPTY_STR
170
171 for jb in buttons:
172 # Render grouped buttons as list items
173 button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
174 if button_html:
175 buttons_rendered += format_html("<li>{}</li>", button_html)
176 if form_html:
177 forms_html += form_html
178
179 if buttons_rendered:
180 buttons_html += format_html(
181 GROUP_DROPDOWN,
182 group_button_class=group_button_class,
183 group_name=group_name,
184 grouped_buttons=buttons_rendered,
185 )
186
187 # We want all of the buttons first and then any modals and forms so the buttons render properly
188 return buttons_html + forms_html
189
[end of nautobot/extras/templatetags/job_buttons.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nautobot/extras/templatetags/job_buttons.py b/nautobot/extras/templatetags/job_buttons.py
--- a/nautobot/extras/templatetags/job_buttons.py
+++ b/nautobot/extras/templatetags/job_buttons.py
@@ -1,6 +1,7 @@
from collections import OrderedDict
from django import template
+from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.utils.html import format_html
@@ -28,6 +29,7 @@
<input type="hidden" name="object_pk" value="{object_pk}">
<input type="hidden" name="object_model_name" value="{object_model_name}">
<input type="hidden" name="_schedule_type" value="immediately">
+<input type="hidden" name="_task_queue" value="{task_queue}">
<input type="hidden" name="_return_url" value="{redirect_path}">
<input type="hidden" name="_commit" value="on">
"""
@@ -107,12 +109,17 @@
# Disable buttons if the user doesn't have permission to run the underlying Job.
has_run_perm = Job.objects.check_perms(context["user"], instance=job_button.job, action="run")
+ try:
+ _task_queue = job_button.job.task_queues[0]
+ except IndexError:
+ _task_queue = settings.CELERY_TASK_DEFAULT_QUEUE
hidden_inputs = format_html(
HIDDEN_INPUTS,
csrf_token=context["csrf_token"],
object_pk=obj.pk,
object_model_name=f"{content_type.app_label}.{content_type.model}",
redirect_path=context["request"].path,
+ task_queue=_task_queue,
)
template_args = {
"button_id": job_button.pk,
|
{"golden_diff": "diff --git a/nautobot/extras/templatetags/job_buttons.py b/nautobot/extras/templatetags/job_buttons.py\n--- a/nautobot/extras/templatetags/job_buttons.py\n+++ b/nautobot/extras/templatetags/job_buttons.py\n@@ -1,6 +1,7 @@\n from collections import OrderedDict\n \n from django import template\n+from django.conf import settings\n from django.contrib.contenttypes.models import ContentType\n from django.urls import reverse\n from django.utils.html import format_html\n@@ -28,6 +29,7 @@\n <input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n <input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n <input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n+<input type=\"hidden\" name=\"_task_queue\" value=\"{task_queue}\">\n <input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n <input type=\"hidden\" name=\"_commit\" value=\"on\">\n \"\"\"\n@@ -107,12 +109,17 @@\n \n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n+ try:\n+ _task_queue = job_button.job.task_queues[0]\n+ except IndexError:\n+ _task_queue = settings.CELERY_TASK_DEFAULT_QUEUE\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n+ task_queue=_task_queue,\n )\n template_args = {\n \"button_id\": job_button.pk,\n", "issue": "Job Buttons do not honor the Job task_queue\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 1.6.11\r\n* Python version: 3.11\r\n* Database platform, version: NA\r\n* Middleware(s): NA\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a JobButtonReciever that uses a queue other than default\r\n2. Create a Job Button that points to the JobButtonReciever and connect it to a contenttype (like dcim.devices)\r\n3. Click the Job Button on the contenttype from above.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nThe Job runs under the queue that is configured for JobButtonReceiver.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nThe Job runs under the default queue (which in my case causes an error, but may not in other use cases).\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom nautobot.extras.models import Job, JobButton\nfrom nautobot.utilities.utils import render_jinja2\n\n\nregister = template.Library()\n\nGROUP_DROPDOWN = \"\"\"\n<div class=\"btn-group\">\n <button type=\"button\" class=\"btn btn-sm btn-{group_button_class} dropdown-toggle\" data-toggle=\"dropdown\">\n {group_name} <span class=\"caret\"></span>\n </button>\n <ul class=\"dropdown-menu pull-right\">\n {grouped_buttons}\n </ul>\n</div>\n\"\"\"\n\nHIDDEN_INPUTS = \"\"\"\n<input type=\"hidden\" name=\"csrfmiddlewaretoken\" value=\"{csrf_token}\">\n<input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n<input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n<input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n<input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n<input type=\"hidden\" name=\"_commit\" value=\"on\">\n\"\"\"\n\nNO_CONFIRM_BUTTON = \"\"\"\n<button type=\"submit\" form=\"form_id_{button_id}\" class=\"btn btn-sm btn-{button_class}\" {disabled}>{button_text}</button>\n\"\"\"\n\nNO_CONFIRM_FORM = \"\"\"\n<form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n {hidden_inputs}\n</form>\n\"\"\"\n\nCONFIRM_BUTTON = \"\"\"\n<button type=\"button\" class=\"btn btn-sm btn-{button_class}\" data-toggle=\"modal\" data-target=\"#confirm_modal_id_{button_id}\" {disabled}>\n {button_text}\n</button>\n\"\"\"\n\nCONFIRM_MODAL = \"\"\"\n<div class=\"modal fade\" id=\"confirm_modal_id_{button_id}\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"confirm_modal_label_{button_id}\">\n <div class=\"modal-dialog\" role=\"document\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\"><span aria-hidden=\"true\">×</span></button>\n <h4 class=\"modal-title\" id=\"confirm_modal_label_{button_id}\">Confirmation</h4>\n </div>\n <form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n <div class=\"modal-body\">\n {hidden_inputs}\n Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?\n </div>\n <div class=\"modal-footer\">\n <button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Cancel</button>\n <button type=\"submit\" class=\"btn btn-primary\">Confirm</button>\n </div>\n </form>\n </div>\n </div>\n</div>\n\"\"\"\n\nSAFE_EMPTY_STR = mark_safe(\"\") # noqa: S308\n\n\ndef _render_job_button_for_obj(job_button, obj, context, content_type):\n \"\"\"\n Helper method for job_buttons templatetag to reduce repetition of code.\n\n Returns:\n (str, str): (button_html, form_html)\n \"\"\"\n # Pass select context data when rendering the JobButton text as Jinja2\n button_context = {\n \"obj\": obj,\n \"debug\": context.get(\"debug\", False), # django.template.context_processors.debug\n \"request\": context[\"request\"], # django.template.context_processors.request\n \"user\": context[\"user\"], # django.contrib.auth.context_processors.auth\n \"perms\": context[\"perms\"], # django.contrib.auth.context_processors.auth\n }\n try:\n text_rendered = render_jinja2(job_button.text, button_context)\n except Exception as exc:\n return (\n format_html(\n '<a class=\"btn btn-sm btn-{}\" disabled=\"disabled\" title=\"{}\"><i class=\"mdi mdi-alert\"></i> {}</a>\\n',\n \"default\" if not job_button.group_name else \"link\",\n exc,\n job_button.name,\n ),\n SAFE_EMPTY_STR,\n )\n\n if not text_rendered:\n return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)\n\n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n )\n template_args = {\n \"button_id\": job_button.pk,\n \"button_text\": text_rendered,\n \"button_class\": job_button.button_class if not job_button.group_name else \"link\",\n \"button_url\": reverse(\"extras:job_run\", kwargs={\"slug\": job_button.job.slug}),\n \"object\": obj,\n \"job\": job_button.job,\n \"hidden_inputs\": hidden_inputs,\n \"disabled\": \"\" if has_run_perm else \"disabled\",\n }\n\n if job_button.confirmation:\n return (\n format_html(CONFIRM_BUTTON, **template_args),\n format_html(CONFIRM_MODAL, **template_args),\n )\n else:\n return (\n format_html(NO_CONFIRM_BUTTON, **template_args),\n format_html(NO_CONFIRM_FORM, **template_args),\n )\n\n\[email protected]_tag(takes_context=True)\ndef job_buttons(context, obj):\n \"\"\"\n Render all applicable job buttons for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n # We will enforce \"run\" permission later in deciding which buttons to show as disabled.\n buttons = JobButton.objects.filter(content_types=content_type)\n if not buttons:\n return SAFE_EMPTY_STR\n\n buttons_html = forms_html = SAFE_EMPTY_STR\n group_names = OrderedDict()\n\n for jb in buttons:\n # Organize job buttons by group for later processing\n if jb.group_name:\n group_names.setdefault(jb.group_name, []).append(jb)\n\n # Render and add non-grouped buttons\n else:\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n buttons_html += button_html\n forms_html += form_html\n\n # Add grouped buttons to template\n for group_name, buttons in group_names.items():\n group_button_class = buttons[0].button_class\n\n buttons_rendered = SAFE_EMPTY_STR\n\n for jb in buttons:\n # Render grouped buttons as list items\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n if button_html:\n buttons_rendered += format_html(\"<li>{}</li>\", button_html)\n if form_html:\n forms_html += form_html\n\n if buttons_rendered:\n buttons_html += format_html(\n GROUP_DROPDOWN,\n group_button_class=group_button_class,\n group_name=group_name,\n grouped_buttons=buttons_rendered,\n )\n\n # We want all of the buttons first and then any modals and forms so the buttons render properly\n return buttons_html + forms_html\n", "path": "nautobot/extras/templatetags/job_buttons.py"}]}
| 2,987 | 393 |
gh_patches_debug_42880
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-1185
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
All logging messages produced in ray actors have 'ERROR' level
At this moment, all log messages produced in ray actor came with 'ERROR' level. There are two issues because this:
1. db grow quick.
2. with sqllite as db is many errors related to db lock, especially of start several learn processes.
</issue>
<code>
[start of mindsdb/utilities/log.py]
1 import os
2 import sys
3 import logging
4 import traceback
5
6 from mindsdb.interfaces.storage.db import session, Log
7 from mindsdb.utilities.config import Config
8
9 telemtry_enabled = os.getenv('CHECK_FOR_UPDATES', '1').lower() not in ['0', 'false', 'False']
10 global_config = Config().get_all()
11
12 if telemtry_enabled:
13 import sentry_sdk
14 from sentry_sdk import capture_exception, capture_message, add_breadcrumb
15 sentry_sdk.init(
16 "https://[email protected]/5633566",
17 traces_sample_rate=0 #Set to `1` to experiment with performance metrics
18 )
19
20 class LoggerWrapper(object):
21 def __init__(self, writer_arr, default_writer_pos):
22 self._writer_arr = writer_arr
23 self.default_writer_pos = default_writer_pos
24
25 def write(self, message):
26 if 'DEBUG:' in message:
27 self._writer_arr[0](message)
28 elif 'INFO:' in message:
29 self._writer_arr[1](message)
30 elif 'WARNING:' in message:
31 self._writer_arr[2](message)
32 elif 'ERROR:' in message:
33 self._writer_arr[3](message)
34 else:
35 self._writer_arr[self.default_writer_pos](message)
36
37 def flush(self):
38 pass
39
40 class DbHandler(logging.Handler):
41 def __init__(self):
42 logging.Handler.__init__(self)
43 self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)
44
45 def emit(self, record):
46 log_type = record.levelname
47 source = f'file: {record.pathname} - line: {record.lineno}'
48 payload = record.msg
49
50 if telemtry_enabled:
51 pass
52 # @TODO: Enable once we are sure no sensitive info is being outputed in the logs
53 # if log_type in ['INFO']:
54 # add_breadcrumb(
55 # category='auth',
56 # message=str(payload),
57 # level='info',
58 # )
59 # Might be too much traffic if we send this for users with slow networks
60 #if log_type in ['DEBUG']:
61 # add_breadcrumb(
62 # category='auth',
63 # message=str(payload),
64 # level='debug',
65 # )
66
67 if log_type in ['ERROR', 'WARNING']:
68 trace = str(traceback.format_stack(limit=20))
69 trac_log = Log(log_type='traceback', source=source, payload=trace, company_id=self.company_id)
70 session.add(trac_log)
71 session.commit()
72
73 if telemtry_enabled:
74 add_breadcrumb(
75 category='stack_trace',
76 message=trace,
77 level='info',
78 )
79 if log_type in ['ERROR']:
80 capture_message(str(payload))
81 if log_type in ['WARNING']:
82 capture_message(str(payload))
83
84 log = Log(log_type=str(log_type), source=source, payload=str(payload), company_id=self.company_id)
85 session.add(log)
86 session.commit()
87
88 def fmt_log_record(log_record):
89 return {
90 'log_from': 'mindsdb',
91 'level': log_record.log_type,
92 'context': 'unkown',
93 'text': log_record.payload,
94 'created_at': str(log_record.created_at).split('.')[0]
95 }
96
97 def get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):
98 logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)
99
100 if max_timestamp is not None:
101 logs = logs.filter(Log.created_at<max_timestamp)
102
103 if context is not None:
104 # e.g. datasource/predictor and assoicated id
105 pass
106
107 if level is not None:
108 logs = logs.filter(Log.log_type==level)
109
110 if log_from is not None:
111 # mindsdb/native/lightwood/all
112 pass
113
114 if limit is not None:
115 logs = logs.limit(limit)
116
117 logs = [fmt_log_record(x) for x in logs]
118 return logs
119
120 def initialize_log(config=global_config, logger_name='main', wrap_print=False):
121 ''' Create new logger
122 :param config: object, app config
123 :param logger_name: str, name of logger
124 :param wrap_print: bool, if true, then print() calls will be wrapped by log.debug() function.
125 '''
126 log = logging.getLogger(f'mindsdb.{logger_name}')
127 log.propagate = False
128 log.setLevel(min(
129 getattr(logging, config['log']['level']['console']),
130 getattr(logging, config['log']['level']['file'])
131 ))
132
133 formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
134
135 ch = logging.StreamHandler()
136 ch.setLevel(config['log']['level']['console']) # that level will be in console
137 log.addHandler(ch)
138 db_handler = DbHandler()
139 log.addHandler(db_handler)
140
141 if wrap_print:
142 sys.stdout = LoggerWrapper([log.debug, log.info, log.warning, log.error], 1)
143 sys.stderr = LoggerWrapper([log.debug, log.info, log.warning, log.error], 3)
144
145 return log
146
147
148 def get_log(logger_name=None):
149 if logger_name is None:
150 return logging.getLogger('mindsdb')
151 return logging.getLogger(f'mindsdb.{logger_name}')
152
153
154 log = initialize_log()
155
[end of mindsdb/utilities/log.py]
[start of mindsdb/__init__.py]
1 import os
2 import sys
3 import json
4
5 from mindsdb.__about__ import __package_name__ as name, __version__ # noqa
6 from mindsdb.utilities.fs import get_or_create_dir_struct, create_dirs_recursive
7 from mindsdb.utilities.functions import args_parse, is_notebook
8 from mindsdb.__about__ import __version__ as mindsdb_version
9 from mindsdb.utilities.telemetry import telemetry_file_exists, disable_telemetry
10
11 is_ray_worker = False
12 if '-ray' in str(sys.argv):
13 is_ray_worker = True
14
15 if not is_ray_worker:
16 try:
17 if not is_notebook():
18 args = args_parse()
19 else:
20 args = None
21 except Exception:
22 # This fials in some notebooks ... check above for is_notebook is still needed because even if the exception is caught trying to read the arg still leads to failure in other notebooks... notebooks a
23 args = None
24
25 # ---- CHECK SYSTEM ----
26 if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
27 print("""
28 MindsDB server requires Python >= 3.6 to run
29
30 Once you have Python 3.6 installed you can tun mindsdb as follows:
31
32 1. create and activate venv:
33 python3.6 -m venv venv
34 source venv/bin/activate
35
36 2. install MindsDB:
37 pip3 install mindsdb
38
39 3. Run MindsDB
40 python3.6 -m mindsdb
41
42 More instructions in https://docs.mindsdb.com
43 """)
44 exit(1)
45
46 # --- VERSION MODE ----
47 if args is not None and args.version:
48 print(f'MindsDB {mindsdb_version}')
49 sys.exit(0)
50
51 # --- MODULE OR LIBRARY IMPORT MODE ----
52 if args is not None and args.config is not None:
53 config_path = args.config
54 with open(config_path, 'r') as fp:
55 user_config = json.load(fp)
56 else:
57 user_config = {}
58 config_path = 'absent'
59 os.environ['MINDSDB_CONFIG_PATH'] = config_path
60
61 if 'storage_dir' in user_config:
62 root_storage_dir = user_config['storage_dir']
63 os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir
64 elif os.environ.get('MINDSDB_STORAGE_DIR') is not None:
65 root_storage_dir = os.environ['MINDSDB_STORAGE_DIR']
66 else:
67 _, root_storage_dir = get_or_create_dir_struct()
68 os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir
69
70 if os.path.isdir(root_storage_dir) is False:
71 os.makedirs(root_storage_dir)
72
73
74
75 if 'storage_db' in user_config:
76 os.environ['MINDSDB_DB_CON'] = user_config['storage_db']
77 elif os.environ.get('MINDSDB_DB_CON','') == '':
78 os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'],'mindsdb.sqlite3.db') + '?check_same_thread=False'
79
80 if 'company_id' in user_config:
81 os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']
82
83
84 from mindsdb.utilities.config import Config
85 mindsdb_config = Config()
86 create_dirs_recursive(mindsdb_config['paths'])
87
88 os.environ['DEFAULT_LOG_LEVEL'] = os.environ.get('DEFAULT_LOG_LEVEL', 'ERROR')
89 os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')
90 os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']
91
92
93 if telemetry_file_exists(mindsdb_config['storage_dir']):
94 os.environ['CHECK_FOR_UPDATES'] = '0'
95 print('\n x telemetry disabled! \n')
96 elif os.getenv('CHECK_FOR_UPDATES', '1').lower() in ['0', 'false', 'False']:
97 disable_telemetry(mindsdb_config['storage_dir'])
98 print('\n x telemetry disabled \n')
99 else:
100 print('\n ✓ telemetry enabled \n')
101
102
103 from mindsdb.interfaces.custom.model_interface import ModelInterface
104
[end of mindsdb/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mindsdb/__init__.py b/mindsdb/__init__.py
--- a/mindsdb/__init__.py
+++ b/mindsdb/__init__.py
@@ -70,17 +70,14 @@
if os.path.isdir(root_storage_dir) is False:
os.makedirs(root_storage_dir)
-
-
if 'storage_db' in user_config:
os.environ['MINDSDB_DB_CON'] = user_config['storage_db']
- elif os.environ.get('MINDSDB_DB_CON','') == '':
- os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'],'mindsdb.sqlite3.db') + '?check_same_thread=False'
+ elif os.environ.get('MINDSDB_DB_CON', '') == '':
+ os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'], 'mindsdb.sqlite3.db') + '?check_same_thread=False&timeout=30'
if 'company_id' in user_config:
os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']
-
from mindsdb.utilities.config import Config
mindsdb_config = Config()
create_dirs_recursive(mindsdb_config['paths'])
@@ -89,7 +86,6 @@
os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')
os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']
-
if telemetry_file_exists(mindsdb_config['storage_dir']):
os.environ['CHECK_FOR_UPDATES'] = '0'
print('\n x telemetry disabled! \n')
diff --git a/mindsdb/utilities/log.py b/mindsdb/utilities/log.py
--- a/mindsdb/utilities/log.py
+++ b/mindsdb/utilities/log.py
@@ -17,12 +17,15 @@
traces_sample_rate=0 #Set to `1` to experiment with performance metrics
)
+
class LoggerWrapper(object):
def __init__(self, writer_arr, default_writer_pos):
self._writer_arr = writer_arr
self.default_writer_pos = default_writer_pos
def write(self, message):
+ if len(message.strip(' \n')) == 0:
+ return
if 'DEBUG:' in message:
self._writer_arr[0](message)
elif 'INFO:' in message:
@@ -37,12 +40,17 @@
def flush(self):
pass
+
class DbHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)
def emit(self, record):
+ if len(record.message.strip(' \n')) == 0 \
+ or (record.threadName == 'ray_print_logs' and 'mindsdb-logger' not in record.message):
+ return
+
log_type = record.levelname
source = f'file: {record.pathname} - line: {record.lineno}'
payload = record.msg
@@ -85,14 +93,16 @@
session.add(log)
session.commit()
+
def fmt_log_record(log_record):
- return {
- 'log_from': 'mindsdb',
- 'level': log_record.log_type,
- 'context': 'unkown',
- 'text': log_record.payload,
- 'created_at': str(log_record.created_at).split('.')[0]
- }
+ return {
+ 'log_from': 'mindsdb',
+ 'level': log_record.log_type,
+ 'context': 'unkown',
+ 'text': log_record.payload,
+ 'created_at': str(log_record.created_at).split('.')[0]
+ }
+
def get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):
logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)
|
{"golden_diff": "diff --git a/mindsdb/__init__.py b/mindsdb/__init__.py\n--- a/mindsdb/__init__.py\n+++ b/mindsdb/__init__.py\n@@ -70,17 +70,14 @@\n if os.path.isdir(root_storage_dir) is False:\n os.makedirs(root_storage_dir)\n \n-\n-\n if 'storage_db' in user_config:\n os.environ['MINDSDB_DB_CON'] = user_config['storage_db']\n- elif os.environ.get('MINDSDB_DB_CON','') == '':\n- os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'],'mindsdb.sqlite3.db') + '?check_same_thread=False'\n+ elif os.environ.get('MINDSDB_DB_CON', '') == '':\n+ os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'], 'mindsdb.sqlite3.db') + '?check_same_thread=False&timeout=30'\n \n if 'company_id' in user_config:\n os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']\n \n-\n from mindsdb.utilities.config import Config\n mindsdb_config = Config()\n create_dirs_recursive(mindsdb_config['paths'])\n@@ -89,7 +86,6 @@\n os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')\n os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']\n \n-\n if telemetry_file_exists(mindsdb_config['storage_dir']):\n os.environ['CHECK_FOR_UPDATES'] = '0'\n print('\\n x telemetry disabled! \\n')\ndiff --git a/mindsdb/utilities/log.py b/mindsdb/utilities/log.py\n--- a/mindsdb/utilities/log.py\n+++ b/mindsdb/utilities/log.py\n@@ -17,12 +17,15 @@\n traces_sample_rate=0 #Set to `1` to experiment with performance metrics\n )\n \n+\n class LoggerWrapper(object):\n def __init__(self, writer_arr, default_writer_pos):\n self._writer_arr = writer_arr\n self.default_writer_pos = default_writer_pos\n \n def write(self, message):\n+ if len(message.strip(' \\n')) == 0:\n+ return\n if 'DEBUG:' in message:\n self._writer_arr[0](message)\n elif 'INFO:' in message:\n@@ -37,12 +40,17 @@\n def flush(self):\n pass\n \n+\n class DbHandler(logging.Handler):\n def __init__(self):\n logging.Handler.__init__(self)\n self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)\n \n def emit(self, record):\n+ if len(record.message.strip(' \\n')) == 0 \\\n+ or (record.threadName == 'ray_print_logs' and 'mindsdb-logger' not in record.message):\n+ return\n+\n log_type = record.levelname\n source = f'file: {record.pathname} - line: {record.lineno}'\n payload = record.msg\n@@ -85,14 +93,16 @@\n session.add(log)\n session.commit()\n \n+\n def fmt_log_record(log_record):\n- return {\n- 'log_from': 'mindsdb',\n- 'level': log_record.log_type,\n- 'context': 'unkown',\n- 'text': log_record.payload,\n- 'created_at': str(log_record.created_at).split('.')[0]\n- }\n+ return {\n+ 'log_from': 'mindsdb',\n+ 'level': log_record.log_type,\n+ 'context': 'unkown',\n+ 'text': log_record.payload,\n+ 'created_at': str(log_record.created_at).split('.')[0]\n+ }\n+\n \n def get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):\n logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)\n", "issue": "All logging messages produced in ray actors have 'ERROR' level\nAt this moment, all log messages produced in ray actor came with 'ERROR' level. There are two issues because this:\r\n1. db grow quick.\r\n2. with sqllite as db is many errors related to db lock, especially of start several learn processes.\n", "before_files": [{"content": "import os\nimport sys\nimport logging\nimport traceback\n\nfrom mindsdb.interfaces.storage.db import session, Log\nfrom mindsdb.utilities.config import Config\n\ntelemtry_enabled = os.getenv('CHECK_FOR_UPDATES', '1').lower() not in ['0', 'false', 'False']\nglobal_config = Config().get_all()\n\nif telemtry_enabled:\n import sentry_sdk\n from sentry_sdk import capture_exception, capture_message, add_breadcrumb\n sentry_sdk.init(\n \"https://[email protected]/5633566\",\n traces_sample_rate=0 #Set to `1` to experiment with performance metrics\n )\n\nclass LoggerWrapper(object):\n def __init__(self, writer_arr, default_writer_pos):\n self._writer_arr = writer_arr\n self.default_writer_pos = default_writer_pos\n\n def write(self, message):\n if 'DEBUG:' in message:\n self._writer_arr[0](message)\n elif 'INFO:' in message:\n self._writer_arr[1](message)\n elif 'WARNING:' in message:\n self._writer_arr[2](message)\n elif 'ERROR:' in message:\n self._writer_arr[3](message)\n else:\n self._writer_arr[self.default_writer_pos](message)\n\n def flush(self):\n pass\n\nclass DbHandler(logging.Handler):\n def __init__(self):\n logging.Handler.__init__(self)\n self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)\n\n def emit(self, record):\n log_type = record.levelname\n source = f'file: {record.pathname} - line: {record.lineno}'\n payload = record.msg\n\n if telemtry_enabled:\n pass\n # @TODO: Enable once we are sure no sensitive info is being outputed in the logs\n # if log_type in ['INFO']:\n # add_breadcrumb(\n # category='auth',\n # message=str(payload),\n # level='info',\n # )\n # Might be too much traffic if we send this for users with slow networks\n #if log_type in ['DEBUG']:\n # add_breadcrumb(\n # category='auth',\n # message=str(payload),\n # level='debug',\n # )\n\n if log_type in ['ERROR', 'WARNING']:\n trace = str(traceback.format_stack(limit=20))\n trac_log = Log(log_type='traceback', source=source, payload=trace, company_id=self.company_id)\n session.add(trac_log)\n session.commit()\n\n if telemtry_enabled:\n add_breadcrumb(\n category='stack_trace',\n message=trace,\n level='info',\n )\n if log_type in ['ERROR']:\n capture_message(str(payload))\n if log_type in ['WARNING']:\n capture_message(str(payload))\n\n log = Log(log_type=str(log_type), source=source, payload=str(payload), company_id=self.company_id)\n session.add(log)\n session.commit()\n\ndef fmt_log_record(log_record):\n return {\n 'log_from': 'mindsdb',\n 'level': log_record.log_type,\n 'context': 'unkown',\n 'text': log_record.payload,\n 'created_at': str(log_record.created_at).split('.')[0]\n }\n\ndef get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):\n logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)\n\n if max_timestamp is not None:\n logs = logs.filter(Log.created_at<max_timestamp)\n\n if context is not None:\n # e.g. datasource/predictor and assoicated id\n pass\n\n if level is not None:\n logs = logs.filter(Log.log_type==level)\n\n if log_from is not None:\n # mindsdb/native/lightwood/all\n pass\n\n if limit is not None:\n logs = logs.limit(limit)\n\n logs = [fmt_log_record(x) for x in logs]\n return logs\n\ndef initialize_log(config=global_config, logger_name='main', wrap_print=False):\n ''' Create new logger\n :param config: object, app config\n :param logger_name: str, name of logger\n :param wrap_print: bool, if true, then print() calls will be wrapped by log.debug() function.\n '''\n log = logging.getLogger(f'mindsdb.{logger_name}')\n log.propagate = False\n log.setLevel(min(\n getattr(logging, config['log']['level']['console']),\n getattr(logging, config['log']['level']['file'])\n ))\n\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n ch = logging.StreamHandler()\n ch.setLevel(config['log']['level']['console']) # that level will be in console\n log.addHandler(ch)\n db_handler = DbHandler()\n log.addHandler(db_handler)\n\n if wrap_print:\n sys.stdout = LoggerWrapper([log.debug, log.info, log.warning, log.error], 1)\n sys.stderr = LoggerWrapper([log.debug, log.info, log.warning, log.error], 3)\n\n return log\n\n\ndef get_log(logger_name=None):\n if logger_name is None:\n return logging.getLogger('mindsdb')\n return logging.getLogger(f'mindsdb.{logger_name}')\n\n\nlog = initialize_log()\n", "path": "mindsdb/utilities/log.py"}, {"content": "import os\nimport sys\nimport json\n\nfrom mindsdb.__about__ import __package_name__ as name, __version__ # noqa\nfrom mindsdb.utilities.fs import get_or_create_dir_struct, create_dirs_recursive\nfrom mindsdb.utilities.functions import args_parse, is_notebook\nfrom mindsdb.__about__ import __version__ as mindsdb_version\nfrom mindsdb.utilities.telemetry import telemetry_file_exists, disable_telemetry\n\nis_ray_worker = False\nif '-ray' in str(sys.argv):\n is_ray_worker = True\n\nif not is_ray_worker:\n try:\n if not is_notebook():\n args = args_parse()\n else:\n args = None\n except Exception:\n # This fials in some notebooks ... check above for is_notebook is still needed because even if the exception is caught trying to read the arg still leads to failure in other notebooks... notebooks a\n args = None\n\n # ---- CHECK SYSTEM ----\n if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):\n print(\"\"\"\n MindsDB server requires Python >= 3.6 to run\n\n Once you have Python 3.6 installed you can tun mindsdb as follows:\n\n 1. create and activate venv:\n python3.6 -m venv venv\n source venv/bin/activate\n\n 2. install MindsDB:\n pip3 install mindsdb\n\n 3. Run MindsDB\n python3.6 -m mindsdb\n\n More instructions in https://docs.mindsdb.com\n \"\"\")\n exit(1)\n\n # --- VERSION MODE ----\n if args is not None and args.version:\n print(f'MindsDB {mindsdb_version}')\n sys.exit(0)\n\n # --- MODULE OR LIBRARY IMPORT MODE ----\n if args is not None and args.config is not None:\n config_path = args.config\n with open(config_path, 'r') as fp:\n user_config = json.load(fp)\n else:\n user_config = {}\n config_path = 'absent'\n os.environ['MINDSDB_CONFIG_PATH'] = config_path\n\n if 'storage_dir' in user_config:\n root_storage_dir = user_config['storage_dir']\n os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir\n elif os.environ.get('MINDSDB_STORAGE_DIR') is not None:\n root_storage_dir = os.environ['MINDSDB_STORAGE_DIR']\n else:\n _, root_storage_dir = get_or_create_dir_struct()\n os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir\n\n if os.path.isdir(root_storage_dir) is False:\n os.makedirs(root_storage_dir)\n\n\n\n if 'storage_db' in user_config:\n os.environ['MINDSDB_DB_CON'] = user_config['storage_db']\n elif os.environ.get('MINDSDB_DB_CON','') == '':\n os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'],'mindsdb.sqlite3.db') + '?check_same_thread=False'\n\n if 'company_id' in user_config:\n os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']\n\n\n from mindsdb.utilities.config import Config\n mindsdb_config = Config()\n create_dirs_recursive(mindsdb_config['paths'])\n\n os.environ['DEFAULT_LOG_LEVEL'] = os.environ.get('DEFAULT_LOG_LEVEL', 'ERROR')\n os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')\n os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']\n\n\n if telemetry_file_exists(mindsdb_config['storage_dir']):\n os.environ['CHECK_FOR_UPDATES'] = '0'\n print('\\n x telemetry disabled! \\n')\n elif os.getenv('CHECK_FOR_UPDATES', '1').lower() in ['0', 'false', 'False']:\n disable_telemetry(mindsdb_config['storage_dir'])\n print('\\n x telemetry disabled \\n')\n else:\n print('\\n \u2713 telemetry enabled \\n')\n\n\n from mindsdb.interfaces.custom.model_interface import ModelInterface\n", "path": "mindsdb/__init__.py"}]}
| 3,313 | 908 |
gh_patches_debug_59413
|
rasdani/github-patches
|
git_diff
|
encode__starlette-88
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CORSMiddleware is sending an extra 'http.response.body'
It seems that even with all tests passing and cors being successfully applied, CORSMiddleware still raises a runtime error.
Code being tested:
```python
app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=["*"])
@app.route("/")
async def homepage(request):
return PlainTextResponse('Hello', status_code=200)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
```
And the error being produced:
```
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/alexbotello/.local/share/virtualenvs/starlette-dshJy1CJ/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 384, in run_asgi
result = await asgi(self.receive, self.send)
File "/home/alexbotello/Code/starlette/starlette/exceptions.py", line 60, in app
raise exc from None
File "/home/alexbotello/Code/starlette/starlette/exceptions.py", line 52, in app
await instance(receive, sender)
File "/home/alexbotello/Code/starlette/starlette/middleware/cors.py", line 116, in simple_response
await inner(receive, send)
File "/home/alexbotello/Code/starlette/starlette/applications.py", line 26, in awaitable
await response(receive, send)
File "/home/alexbotello/Code/starlette/starlette/responses.py", line 100, in __call__
await send({"type": "http.response.body", "body": self.body})
File "/home/alexbotello/Code/starlette/starlette/middleware/cors.py", line 130, in send
await send(message)
File "/home/alexbotello/Code/starlette/starlette/exceptions.py", line 47, in sender
await send(message)
File "/home/alexbotello/.local/share/virtualenvs/starlette-dshJy1CJ/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 518, in send
raise RuntimeError(msg % message_type)
RuntimeError: Unexpected ASGI message 'http.response.body' sent, after response already completed.
```
It seems the issue is originating from `send`. Specifically:
```python
if message["type"] != "http.response.start":
await send(message)
```
Removing this fixes the issue and does not break any tests.
</issue>
<code>
[start of starlette/middleware/cors.py]
1 from starlette.datastructures import Headers, MutableHeaders, URL
2 from starlette.responses import PlainTextResponse
3 from starlette.types import ASGIApp, ASGIInstance, Scope
4 import functools
5 import typing
6
7
8 ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
9
10
11 class CORSMiddleware:
12 def __init__(
13 self,
14 app: ASGIApp,
15 allow_origins: typing.Sequence[str] = (),
16 allow_methods: typing.Sequence[str] = ("GET",),
17 allow_headers: typing.Sequence[str] = (),
18 allow_credentials: bool = False,
19 expose_headers: typing.Sequence[str] = (),
20 max_age: int = 600,
21 ):
22
23 if "*" in allow_methods:
24 allow_methods = ALL_METHODS
25
26 simple_headers = {}
27 if "*" in allow_origins:
28 simple_headers["Access-Control-Allow-Origin"] = "*"
29 if allow_credentials:
30 simple_headers["Access-Control-Allow-Credentials"] = "true"
31 if expose_headers:
32 simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers)
33
34 preflight_headers = {}
35 if "*" in allow_origins:
36 preflight_headers["Access-Control-Allow-Origin"] = "*"
37 else:
38 preflight_headers["Vary"] = "Origin"
39 preflight_headers.update(
40 {
41 "Access-Control-Allow-Methods": ", ".join(allow_methods),
42 "Access-Control-Max-Age": str(max_age),
43 }
44 )
45 if allow_headers and "*" not in allow_headers:
46 preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers)
47 if allow_credentials:
48 preflight_headers["Access-Control-Allow-Credentials"] = "true"
49
50 self.app = app
51 self.allow_origins = allow_origins
52 self.allow_methods = allow_methods
53 self.allow_headers = allow_headers
54 self.allow_all_origins = "*" in allow_origins
55 self.allow_all_headers = "*" in allow_headers
56 self.simple_headers = simple_headers
57 self.preflight_headers = preflight_headers
58
59 def __call__(self, scope: Scope):
60 if scope["type"] == "http":
61 method = scope["method"]
62 headers = Headers(scope["headers"])
63 origin = headers.get("origin")
64
65 if origin is not None:
66 if method == "OPTIONS" and "access-control-request-method" in headers:
67 return self.preflight_response(request_headers=headers)
68 else:
69 return functools.partial(
70 self.simple_response, scope=scope, origin=origin
71 )
72
73 return self.app(scope)
74
75 def preflight_response(self, request_headers):
76 requested_origin = request_headers["origin"]
77 requested_method = request_headers["access-control-request-method"]
78 requested_headers = request_headers.get("access-control-request-headers")
79 requested_cookie = "cookie" in request_headers
80
81 headers = dict(self.preflight_headers)
82 failures = []
83
84 # If we only allow specific origins, then we have to mirror back
85 # the Origin header in the response.
86 if not self.allow_all_origins:
87 if requested_origin in self.allow_origins:
88 headers["Access-Control-Allow-Origin"] = requested_origin
89 else:
90 failures.append("origin")
91
92 if requested_method not in self.allow_methods:
93 failures.append("method")
94
95 # If we allow all headers, then we have to mirror back any requested
96 # headers in the response.
97 if self.allow_all_headers and requested_headers is not None:
98 headers["Access-Control-Allow-Headers"] = requested_headers
99 elif requested_headers is not None:
100 for header in requested_headers.split(","):
101 if header.strip() not in self.allow_headers:
102 failures.append("headers")
103
104 # We don't strictly need to use 400 responses here, since its up to
105 # the browser to enforce the CORS policy, but its more informative
106 # if we do.
107 if failures:
108 failure_text = "Disallowed CORS " + ", ".join(failures)
109 return PlainTextResponse(failure_text, status_code=400, headers=headers)
110
111 return PlainTextResponse("OK", status_code=200, headers=headers)
112
113 async def simple_response(self, receive, send, scope=None, origin=None):
114 inner = self.app(scope)
115 send = functools.partial(self.send, send=send, origin=origin)
116 await inner(receive, send)
117
118 async def send(self, message, send=None, origin=None):
119 if message["type"] != "http.response.start":
120 await send(message)
121
122 message.setdefault("headers", [])
123 headers = MutableHeaders(message["headers"])
124
125 # If we only allow specific origins, then we have to mirror back
126 # the Origin header in the response.
127 if not self.allow_all_origins and origin in self.allow_origins:
128 headers["Access-Control-Allow-Origin"] = origin
129 headers.update(self.simple_headers)
130 await send(message)
131
[end of starlette/middleware/cors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py
--- a/starlette/middleware/cors.py
+++ b/starlette/middleware/cors.py
@@ -118,6 +118,7 @@
async def send(self, message, send=None, origin=None):
if message["type"] != "http.response.start":
await send(message)
+ return
message.setdefault("headers", [])
headers = MutableHeaders(message["headers"])
|
{"golden_diff": "diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py\n--- a/starlette/middleware/cors.py\n+++ b/starlette/middleware/cors.py\n@@ -118,6 +118,7 @@\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n+ return\n \n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n", "issue": "CORSMiddleware is sending an extra 'http.response.body'\nIt seems that even with all tests passing and cors being successfully applied, CORSMiddleware still raises a runtime error.\r\n\r\nCode being tested:\r\n```python\r\napp = Starlette()\r\n\r\napp.add_middleware(CORSMiddleware, allow_origins=[\"*\"])\r\n\r\[email protected](\"/\")\r\nasync def homepage(request):\r\n return PlainTextResponse('Hello', status_code=200)\r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\r\n```\r\n\r\nAnd the error being produced:\r\n```\r\nERROR: Exception in ASGI application\r\nTraceback (most recent call last):\r\n File \"/home/alexbotello/.local/share/virtualenvs/starlette-dshJy1CJ/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 384, in run_asgi\r\n result = await asgi(self.receive, self.send)\r\n File \"/home/alexbotello/Code/starlette/starlette/exceptions.py\", line 60, in app\r\n raise exc from None\r\n File \"/home/alexbotello/Code/starlette/starlette/exceptions.py\", line 52, in app\r\n await instance(receive, sender)\r\n File \"/home/alexbotello/Code/starlette/starlette/middleware/cors.py\", line 116, in simple_response\r\n await inner(receive, send)\r\n File \"/home/alexbotello/Code/starlette/starlette/applications.py\", line 26, in awaitable\r\n await response(receive, send)\r\n File \"/home/alexbotello/Code/starlette/starlette/responses.py\", line 100, in __call__\r\n await send({\"type\": \"http.response.body\", \"body\": self.body})\r\n File \"/home/alexbotello/Code/starlette/starlette/middleware/cors.py\", line 130, in send\r\n await send(message)\r\n File \"/home/alexbotello/Code/starlette/starlette/exceptions.py\", line 47, in sender\r\n await send(message)\r\n File \"/home/alexbotello/.local/share/virtualenvs/starlette-dshJy1CJ/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 518, in send\r\n raise RuntimeError(msg % message_type)\r\nRuntimeError: Unexpected ASGI message 'http.response.body' sent, after response already completed.\r\n```\r\nIt seems the issue is originating from `send`. Specifically:\r\n```python\r\nif message[\"type\"] != \"http.response.start\":\r\n await send(message)\r\n```\r\nRemoving this fixes the issue and does not break any tests.\n", "before_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ):\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, origin=origin\n )\n\n return self.app(scope)\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins:\n if requested_origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, origin=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, origin=origin)\n await inner(receive, send)\n\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins and origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}]}
| 2,469 | 105 |
gh_patches_debug_11392
|
rasdani/github-patches
|
git_diff
|
dj-stripe__dj-stripe-1055
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cancelling a Subscription removes the model instance
I notice at the moment when you cancel a subscription the model instance is deleted too and looking at the code this seems to be by design. Is this the correct action to take - surely having the history would be useful and would there not be foreign keys to invoices etc? What purpose is the subscription status of "cancelled" if the records are deleted? I also noticed that the Customer._sync_subscriptions() method pulls in all subscriptions regardless of their status so the functionality at least seems to be inconsistent.
</issue>
<code>
[start of djstripe/event_handlers.py]
1 """
2 Webhook event handlers for the various models
3
4 Stripe docs for Events: https://stripe.com/docs/api/events
5 Stripe docs for Webhooks: https://stripe.com/docs/webhooks
6
7 TODO: Implement webhook event handlers for all the models that need to
8 respond to webhook events.
9
10 NOTE:
11 Event data is not guaranteed to be in the correct API version format.
12 See #116. When writing a webhook handler, make sure to first
13 re-retrieve the object you wish to process.
14
15 """
16 import logging
17
18 from . import models, webhooks
19 from .enums import SourceType
20 from .utils import convert_tstamp
21
22 logger = logging.getLogger(__name__)
23
24
25 @webhooks.handler("customer")
26 def customer_webhook_handler(event):
27 """Handle updates to customer objects.
28
29 First determines the crud_type and then handles the event if a customer
30 exists locally.
31 As customers are tied to local users, djstripe will not create customers that
32 do not already exist locally.
33
34 Docs and an example customer webhook response:
35 https://stripe.com/docs/api#customer_object
36 """
37 if event.customer:
38 # As customers are tied to local users, djstripe will not create
39 # customers that do not already exist locally.
40 _handle_crud_like_event(
41 target_cls=models.Customer, event=event, crud_exact=True, crud_valid=True
42 )
43
44
45 @webhooks.handler("customer.discount")
46 def customer_discount_webhook_handler(event):
47 """Handle updates to customer discount objects.
48
49 Docs: https://stripe.com/docs/api#discounts
50
51 Because there is no concept of a "Discount" model in dj-stripe (due to the
52 lack of a stripe id on them), this is a little different to the other
53 handlers.
54 """
55
56 crud_type = CrudType.determine(event=event)
57 discount_data = event.data.get("object", {})
58 coupon_data = discount_data.get("coupon", {})
59 customer = event.customer
60
61 if crud_type.created or crud_type.updated:
62 coupon, _ = _handle_crud_like_event(
63 target_cls=models.Coupon,
64 event=event,
65 data=coupon_data,
66 id=coupon_data.get("id"),
67 )
68 coupon_start = discount_data.get("start")
69 coupon_end = discount_data.get("end")
70 else:
71 coupon = None
72 coupon_start = None
73 coupon_end = None
74
75 customer.coupon = coupon
76 customer.coupon_start = convert_tstamp(coupon_start)
77 customer.coupon_end = convert_tstamp(coupon_end)
78 customer.save()
79
80
81 @webhooks.handler("customer.source")
82 def customer_source_webhook_handler(event):
83 """Handle updates to customer payment-source objects.
84
85 Docs: https://stripe.com/docs/api#customer_object-sources.
86 """
87 customer_data = event.data.get("object", {})
88 source_type = customer_data.get("object", {})
89
90 # TODO: handle other types of sources
91 # (https://stripe.com/docs/api#customer_object-sources)
92 if source_type == SourceType.card:
93 if event.verb.endswith("deleted") and customer_data:
94 # On customer.source.deleted, we do not delete the object,
95 # we merely unlink it.
96 # customer = Customer.objects.get(id=customer_data["id"])
97 # NOTE: for now, customer.sources still points to Card
98 # Also, https://github.com/dj-stripe/dj-stripe/issues/576
99 models.Card.objects.filter(id=customer_data.get("id", "")).delete()
100 models.DjstripePaymentMethod.objects.filter(
101 id=customer_data.get("id", "")
102 ).delete()
103 else:
104 _handle_crud_like_event(target_cls=models.Card, event=event)
105
106
107 @webhooks.handler("customer.subscription")
108 def customer_subscription_webhook_handler(event):
109 """Handle updates to customer subscription objects.
110
111 Docs an example subscription webhook response:
112 https://stripe.com/docs/api#subscription_object
113 """
114 _handle_crud_like_event(target_cls=models.Subscription, event=event)
115
116
117 @webhooks.handler("payment_method")
118 def payment_method_handler(event):
119 """
120 Handle updates to payment_method objects
121 :param event:
122 :return:
123
124 Docs for:
125 - payment_method: https://stripe.com/docs/api/payment_methods
126 """
127 id_ = event.data.get("object", {}).get("id", None)
128
129 if (
130 event.parts == ["payment_method", "detached"]
131 and id_
132 and id_.startswith("card_")
133 ):
134 # Special case to handle a quirk in stripe's wrapping of legacy "card" objects
135 # with payment_methods - card objects are deleted on detach, so treat this as
136 # a delete event
137 _handle_crud_like_event(
138 target_cls=models.PaymentMethod,
139 event=event,
140 crud_type=CrudType(deleted=True),
141 )
142 else:
143 _handle_crud_like_event(target_cls=models.PaymentMethod, event=event)
144
145
146 @webhooks.handler(
147 "transfer",
148 "charge",
149 "coupon",
150 "invoice",
151 "invoiceitem",
152 "payment_intent",
153 "plan",
154 "product",
155 "setup_intent",
156 "source",
157 )
158 def other_object_webhook_handler(event):
159 """
160 Handle updates to transfer, charge, coupon, invoice, invoiceitem, payment_intent,
161 plan, product, setup_intent and source objects.
162
163 Docs for:
164 - charge: https://stripe.com/docs/api#charges
165 - coupon: https://stripe.com/docs/api#coupons
166 - invoice: https://stripe.com/docs/api#invoices
167 - invoiceitem: https://stripe.com/docs/api#invoiceitems
168 - plan: https://stripe.com/docs/api#plans
169 - product: https://stripe.com/docs/api#products
170 - source: https://stripe.com/docs/api#sources
171 - payment_intent: https://stripe.com/docs/api/payment_intents
172 """
173
174 if event.parts[:2] == ["charge", "dispute"]:
175 # Do not attempt to handle charge.dispute.* events.
176 # We do not have a Dispute model yet.
177 target_cls = models.Dispute
178 else:
179 target_cls = {
180 "charge": models.Charge,
181 "coupon": models.Coupon,
182 "invoice": models.Invoice,
183 "invoiceitem": models.InvoiceItem,
184 "payment_intent": models.PaymentIntent,
185 "plan": models.Plan,
186 "product": models.Product,
187 "transfer": models.Transfer,
188 "setup_intent": models.SetupIntent,
189 "source": models.Source,
190 }.get(event.category)
191
192 _handle_crud_like_event(target_cls=target_cls, event=event)
193
194
195 #
196 # Helpers
197 #
198
199
200 class CrudType(object):
201 """Helper object to determine CRUD-like event state."""
202
203 created = False
204 updated = False
205 deleted = False
206
207 def __init__(self, **kwargs):
208 """Set attributes."""
209 for k, v in kwargs.items():
210 setattr(self, k, v)
211
212 @property
213 def valid(self):
214 """Return True if this is a CRUD-like event."""
215 return self.created or self.updated or self.deleted
216
217 @classmethod
218 def determine(cls, event, verb=None, exact=False):
219 """
220 Determine if the event verb is a crud_type (without the 'R') event.
221
222 :param event:
223 :type event: models.Event
224 :param verb: The event verb to examine.
225 :type verb: str
226 :param exact: If True, match crud_type to event verb string exactly.
227 :type exact: bool
228 :returns: The CrudType state object.
229 :rtype: CrudType
230 """
231 verb = verb or event.verb
232
233 def check(crud_type_event):
234 if exact:
235 return verb == crud_type_event
236 else:
237 return verb.endswith(crud_type_event)
238
239 created = updated = deleted = False
240
241 if check("updated"):
242 updated = True
243 elif check("created"):
244 created = True
245 elif check("deleted"):
246 deleted = True
247
248 return cls(created=created, updated=updated, deleted=deleted)
249
250
251 def _handle_crud_like_event(
252 target_cls,
253 event,
254 data=None,
255 verb=None,
256 id=None,
257 customer=None,
258 crud_type=None,
259 crud_exact=False,
260 crud_valid=False,
261 ):
262 """
263 Helper to process crud_type-like events for objects.
264
265 Non-deletes (creates, updates and "anything else" events) are treated as
266 update_or_create events - The object will be retrieved locally, then it is
267 synchronised with the Stripe API for parity.
268
269 Deletes only occur for delete events and cause the object to be deleted
270 from the local database, if it existed. If it doesn't exist then it is
271 ignored (but the event processing still succeeds).
272
273 :param target_cls: The djstripe model being handled.
274 :type target_cls: Type[models.StripeModel]
275 :param event: The event object
276 :type event: models.Event
277 :param data: The event object data (defaults to ``event.data``).
278 :param verb: The event verb (defaults to ``event.verb``).
279 :type verb: str
280 :param id: The object Stripe ID (defaults to ``object.id``).
281 :type id: str
282 :param customer: The customer object (defaults to ``event.customer``).
283 :param crud_type: The CrudType object (determined by default).
284 :param crud_exact: If True, match verb against CRUD type exactly.
285 :param crud_valid: If True, CRUD type must match valid type.
286 :returns: The object (if any) and the event CrudType.
287 :rtype: Tuple[models.StripeModel, CrudType]
288 """
289 data = data or event.data
290 id = id or data.get("object", {}).get("id", None)
291
292 if not id:
293 # We require an object when applying CRUD-like events, so if there's
294 # no ID the event is ignored/dropped. This happens in events such as
295 # invoice.upcoming, which refer to a future (non-existant) invoice.
296 logger.debug(
297 "Ignoring %r Stripe event without object ID: %r", event.type, event
298 )
299 return
300
301 verb = verb or event.verb
302 customer = customer or event.customer
303 crud_type = crud_type or CrudType.determine(
304 event=event, verb=verb, exact=crud_exact
305 )
306 obj = None
307
308 if crud_valid and not crud_type.valid:
309 logger.debug(
310 "Ignoring %r Stripe event without valid CRUD type: %r", event.type, event
311 )
312 return
313
314 if crud_type.deleted:
315 qs = target_cls.objects.filter(id=id)
316 if target_cls is models.Customer and qs.exists():
317 qs.get().purge()
318 else:
319 obj = target_cls.objects.filter(id=id).delete()
320 else:
321 # Any other event type (creates, updates, etc.) - This can apply to
322 # verbs that aren't strictly CRUD but Stripe do intend an update. Such
323 # as invoice.payment_failed.
324 kwargs = {"id": id}
325 if hasattr(target_cls, "customer"):
326 kwargs["customer"] = customer
327 data = target_cls(**kwargs).api_retrieve()
328 obj = target_cls.sync_from_stripe_data(data)
329
330 return obj, crud_type
331
[end of djstripe/event_handlers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/djstripe/event_handlers.py b/djstripe/event_handlers.py
--- a/djstripe/event_handlers.py
+++ b/djstripe/event_handlers.py
@@ -111,7 +111,16 @@
Docs an example subscription webhook response:
https://stripe.com/docs/api#subscription_object
"""
- _handle_crud_like_event(target_cls=models.Subscription, event=event)
+
+ # customer.subscription.deleted doesn't actually delete the subscription
+ # on the stripe side, it updates it to canceled status, so override
+ # crud_type to update to match.
+ crud_type = CrudType.determine(event=event)
+ if crud_type.deleted:
+ crud_type = CrudType(updated=True)
+ _handle_crud_like_event(
+ target_cls=models.Subscription, event=event, crud_type=crud_type
+ )
@webhooks.handler("payment_method")
|
{"golden_diff": "diff --git a/djstripe/event_handlers.py b/djstripe/event_handlers.py\n--- a/djstripe/event_handlers.py\n+++ b/djstripe/event_handlers.py\n@@ -111,7 +111,16 @@\n Docs an example subscription webhook response:\n https://stripe.com/docs/api#subscription_object\n \"\"\"\n- _handle_crud_like_event(target_cls=models.Subscription, event=event)\n+\n+ # customer.subscription.deleted doesn't actually delete the subscription\n+ # on the stripe side, it updates it to canceled status, so override\n+ # crud_type to update to match.\n+ crud_type = CrudType.determine(event=event)\n+ if crud_type.deleted:\n+ crud_type = CrudType(updated=True)\n+ _handle_crud_like_event(\n+ target_cls=models.Subscription, event=event, crud_type=crud_type\n+ )\n \n \n @webhooks.handler(\"payment_method\")\n", "issue": "Cancelling a Subscription removes the model instance\nI notice at the moment when you cancel a subscription the model instance is deleted too and looking at the code this seems to be by design. Is this the correct action to take - surely having the history would be useful and would there not be foreign keys to invoices etc? What purpose is the subscription status of \"cancelled\" if the records are deleted? I also noticed that the Customer._sync_subscriptions() method pulls in all subscriptions regardless of their status so the functionality at least seems to be inconsistent.\n", "before_files": [{"content": "\"\"\"\nWebhook event handlers for the various models\n\nStripe docs for Events: https://stripe.com/docs/api/events\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\nTODO: Implement webhook event handlers for all the models that need to\n respond to webhook events.\n\nNOTE:\n Event data is not guaranteed to be in the correct API version format.\n See #116. When writing a webhook handler, make sure to first\n re-retrieve the object you wish to process.\n\n\"\"\"\nimport logging\n\nfrom . import models, webhooks\nfrom .enums import SourceType\nfrom .utils import convert_tstamp\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\"customer\")\ndef customer_webhook_handler(event):\n \"\"\"Handle updates to customer objects.\n\n First determines the crud_type and then handles the event if a customer\n exists locally.\n As customers are tied to local users, djstripe will not create customers that\n do not already exist locally.\n\n Docs and an example customer webhook response:\n https://stripe.com/docs/api#customer_object\n \"\"\"\n if event.customer:\n # As customers are tied to local users, djstripe will not create\n # customers that do not already exist locally.\n _handle_crud_like_event(\n target_cls=models.Customer, event=event, crud_exact=True, crud_valid=True\n )\n\n\[email protected](\"customer.discount\")\ndef customer_discount_webhook_handler(event):\n \"\"\"Handle updates to customer discount objects.\n\n Docs: https://stripe.com/docs/api#discounts\n\n Because there is no concept of a \"Discount\" model in dj-stripe (due to the\n lack of a stripe id on them), this is a little different to the other\n handlers.\n \"\"\"\n\n crud_type = CrudType.determine(event=event)\n discount_data = event.data.get(\"object\", {})\n coupon_data = discount_data.get(\"coupon\", {})\n customer = event.customer\n\n if crud_type.created or crud_type.updated:\n coupon, _ = _handle_crud_like_event(\n target_cls=models.Coupon,\n event=event,\n data=coupon_data,\n id=coupon_data.get(\"id\"),\n )\n coupon_start = discount_data.get(\"start\")\n coupon_end = discount_data.get(\"end\")\n else:\n coupon = None\n coupon_start = None\n coupon_end = None\n\n customer.coupon = coupon\n customer.coupon_start = convert_tstamp(coupon_start)\n customer.coupon_end = convert_tstamp(coupon_end)\n customer.save()\n\n\[email protected](\"customer.source\")\ndef customer_source_webhook_handler(event):\n \"\"\"Handle updates to customer payment-source objects.\n\n Docs: https://stripe.com/docs/api#customer_object-sources.\n \"\"\"\n customer_data = event.data.get(\"object\", {})\n source_type = customer_data.get(\"object\", {})\n\n # TODO: handle other types of sources\n # (https://stripe.com/docs/api#customer_object-sources)\n if source_type == SourceType.card:\n if event.verb.endswith(\"deleted\") and customer_data:\n # On customer.source.deleted, we do not delete the object,\n # we merely unlink it.\n # customer = Customer.objects.get(id=customer_data[\"id\"])\n # NOTE: for now, customer.sources still points to Card\n # Also, https://github.com/dj-stripe/dj-stripe/issues/576\n models.Card.objects.filter(id=customer_data.get(\"id\", \"\")).delete()\n models.DjstripePaymentMethod.objects.filter(\n id=customer_data.get(\"id\", \"\")\n ).delete()\n else:\n _handle_crud_like_event(target_cls=models.Card, event=event)\n\n\[email protected](\"customer.subscription\")\ndef customer_subscription_webhook_handler(event):\n \"\"\"Handle updates to customer subscription objects.\n\n Docs an example subscription webhook response:\n https://stripe.com/docs/api#subscription_object\n \"\"\"\n _handle_crud_like_event(target_cls=models.Subscription, event=event)\n\n\[email protected](\"payment_method\")\ndef payment_method_handler(event):\n \"\"\"\n Handle updates to payment_method objects\n :param event:\n :return:\n\n Docs for:\n - payment_method: https://stripe.com/docs/api/payment_methods\n \"\"\"\n id_ = event.data.get(\"object\", {}).get(\"id\", None)\n\n if (\n event.parts == [\"payment_method\", \"detached\"]\n and id_\n and id_.startswith(\"card_\")\n ):\n # Special case to handle a quirk in stripe's wrapping of legacy \"card\" objects\n # with payment_methods - card objects are deleted on detach, so treat this as\n # a delete event\n _handle_crud_like_event(\n target_cls=models.PaymentMethod,\n event=event,\n crud_type=CrudType(deleted=True),\n )\n else:\n _handle_crud_like_event(target_cls=models.PaymentMethod, event=event)\n\n\[email protected](\n \"transfer\",\n \"charge\",\n \"coupon\",\n \"invoice\",\n \"invoiceitem\",\n \"payment_intent\",\n \"plan\",\n \"product\",\n \"setup_intent\",\n \"source\",\n)\ndef other_object_webhook_handler(event):\n \"\"\"\n Handle updates to transfer, charge, coupon, invoice, invoiceitem, payment_intent,\n plan, product, setup_intent and source objects.\n\n Docs for:\n - charge: https://stripe.com/docs/api#charges\n - coupon: https://stripe.com/docs/api#coupons\n - invoice: https://stripe.com/docs/api#invoices\n - invoiceitem: https://stripe.com/docs/api#invoiceitems\n - plan: https://stripe.com/docs/api#plans\n - product: https://stripe.com/docs/api#products\n - source: https://stripe.com/docs/api#sources\n - payment_intent: https://stripe.com/docs/api/payment_intents\n \"\"\"\n\n if event.parts[:2] == [\"charge\", \"dispute\"]:\n # Do not attempt to handle charge.dispute.* events.\n # We do not have a Dispute model yet.\n target_cls = models.Dispute\n else:\n target_cls = {\n \"charge\": models.Charge,\n \"coupon\": models.Coupon,\n \"invoice\": models.Invoice,\n \"invoiceitem\": models.InvoiceItem,\n \"payment_intent\": models.PaymentIntent,\n \"plan\": models.Plan,\n \"product\": models.Product,\n \"transfer\": models.Transfer,\n \"setup_intent\": models.SetupIntent,\n \"source\": models.Source,\n }.get(event.category)\n\n _handle_crud_like_event(target_cls=target_cls, event=event)\n\n\n#\n# Helpers\n#\n\n\nclass CrudType(object):\n \"\"\"Helper object to determine CRUD-like event state.\"\"\"\n\n created = False\n updated = False\n deleted = False\n\n def __init__(self, **kwargs):\n \"\"\"Set attributes.\"\"\"\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def valid(self):\n \"\"\"Return True if this is a CRUD-like event.\"\"\"\n return self.created or self.updated or self.deleted\n\n @classmethod\n def determine(cls, event, verb=None, exact=False):\n \"\"\"\n Determine if the event verb is a crud_type (without the 'R') event.\n\n :param event:\n :type event: models.Event\n :param verb: The event verb to examine.\n :type verb: str\n :param exact: If True, match crud_type to event verb string exactly.\n :type exact: bool\n :returns: The CrudType state object.\n :rtype: CrudType\n \"\"\"\n verb = verb or event.verb\n\n def check(crud_type_event):\n if exact:\n return verb == crud_type_event\n else:\n return verb.endswith(crud_type_event)\n\n created = updated = deleted = False\n\n if check(\"updated\"):\n updated = True\n elif check(\"created\"):\n created = True\n elif check(\"deleted\"):\n deleted = True\n\n return cls(created=created, updated=updated, deleted=deleted)\n\n\ndef _handle_crud_like_event(\n target_cls,\n event,\n data=None,\n verb=None,\n id=None,\n customer=None,\n crud_type=None,\n crud_exact=False,\n crud_valid=False,\n):\n \"\"\"\n Helper to process crud_type-like events for objects.\n\n Non-deletes (creates, updates and \"anything else\" events) are treated as\n update_or_create events - The object will be retrieved locally, then it is\n synchronised with the Stripe API for parity.\n\n Deletes only occur for delete events and cause the object to be deleted\n from the local database, if it existed. If it doesn't exist then it is\n ignored (but the event processing still succeeds).\n\n :param target_cls: The djstripe model being handled.\n :type target_cls: Type[models.StripeModel]\n :param event: The event object\n :type event: models.Event\n :param data: The event object data (defaults to ``event.data``).\n :param verb: The event verb (defaults to ``event.verb``).\n :type verb: str\n :param id: The object Stripe ID (defaults to ``object.id``).\n :type id: str\n :param customer: The customer object (defaults to ``event.customer``).\n :param crud_type: The CrudType object (determined by default).\n :param crud_exact: If True, match verb against CRUD type exactly.\n :param crud_valid: If True, CRUD type must match valid type.\n :returns: The object (if any) and the event CrudType.\n :rtype: Tuple[models.StripeModel, CrudType]\n \"\"\"\n data = data or event.data\n id = id or data.get(\"object\", {}).get(\"id\", None)\n\n if not id:\n # We require an object when applying CRUD-like events, so if there's\n # no ID the event is ignored/dropped. This happens in events such as\n # invoice.upcoming, which refer to a future (non-existant) invoice.\n logger.debug(\n \"Ignoring %r Stripe event without object ID: %r\", event.type, event\n )\n return\n\n verb = verb or event.verb\n customer = customer or event.customer\n crud_type = crud_type or CrudType.determine(\n event=event, verb=verb, exact=crud_exact\n )\n obj = None\n\n if crud_valid and not crud_type.valid:\n logger.debug(\n \"Ignoring %r Stripe event without valid CRUD type: %r\", event.type, event\n )\n return\n\n if crud_type.deleted:\n qs = target_cls.objects.filter(id=id)\n if target_cls is models.Customer and qs.exists():\n qs.get().purge()\n else:\n obj = target_cls.objects.filter(id=id).delete()\n else:\n # Any other event type (creates, updates, etc.) - This can apply to\n # verbs that aren't strictly CRUD but Stripe do intend an update. Such\n # as invoice.payment_failed.\n kwargs = {\"id\": id}\n if hasattr(target_cls, \"customer\"):\n kwargs[\"customer\"] = customer\n data = target_cls(**kwargs).api_retrieve()\n obj = target_cls.sync_from_stripe_data(data)\n\n return obj, crud_type\n", "path": "djstripe/event_handlers.py"}]}
| 3,991 | 198 |
gh_patches_debug_33067
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1572
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor dependents tests to use library usecase
## Problem
Initially, both DB and API tests were created using dummy data and were quite difficult to read.
## Proposed solution
Refactor them to use a common use case and simplify the cases.
</issue>
<code>
[start of db/dependents/dependents_utils.py]
1 from sqlalchemy import MetaData, Table, any_, column, exists, func, literal, select, text, true, union
2 from sqlalchemy.dialects.postgresql import array
3
4 # OIDs assigned during normal database operation are constrained to be 16384 or higher.
5 USER_DEFINED_OBJECTS_MIN_OID = 16384
6 # automatic and normal dependents
7 PG_DEPENDENT_TYPES = ['a', 'n']
8 PG_CLASS_CATALOGUE_NAME = '\'pg_class\''
9 START_LEVEL = 1
10 MAX_LEVEL = 10
11
12
13 def get_dependents_graph(referenced_object_id, engine):
14 dependency_pairs = _get_typed_dependency_pairs_stmt(engine)
15 dependency_pairs_cte = dependency_pairs.cte(recursive=True, name='dependency_pairs_cte')
16
17 # anchor member which includes all dependents of a requested object
18 anchor = select(
19 dependency_pairs_cte,
20 literal(START_LEVEL).label('level'),
21 array([dependency_pairs_cte.c.refobjid]).label('dependency_chain')) \
22 .where(dependency_pairs_cte.c.refobjid == referenced_object_id) \
23 .where(dependency_pairs_cte.c.objid != referenced_object_id)
24 anchor = anchor.cte('cte')
25
26 # recursive member which includes dependents for each object of the previous level
27 recursive = select(
28 dependency_pairs_cte,
29 (anchor.c.level + 1).label('level'),
30 anchor.c.dependency_chain + array([anchor.c.objid])) \
31 .where(anchor.c.level < MAX_LEVEL) \
32 .where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \
33 .where(dependency_pairs_cte.c.objid != dependency_pairs_cte.c.refobjid)
34 recursive = recursive.join(anchor, dependency_pairs_cte.c.refobjid == anchor.c.objid)
35
36 recursive_stmt = anchor.union(recursive)
37 stmt = select(recursive_stmt)
38
39 with engine.connect() as conn:
40 result = conn.execute(stmt)
41
42 return _get_structured_result(result)
43
44
45 # finding table dependents based on foreign key constraints from the referenced tables
46 def _get_table_dependents(foreign_key_dependents, pg_constraint_table):
47 # TODO: update refobjsubid with actual values when working on columns
48 pg_identify_object = _get_pg_identify_object_lateral_stmt(
49 text(f'{PG_CLASS_CATALOGUE_NAME}::regclass::oid'), pg_constraint_table.c.conrelid, 0)
50
51 pg_identify_refobject = _get_pg_identify_object_lateral_stmt(
52 foreign_key_dependents.c.refclassid, foreign_key_dependents.c.refobjid, 0)
53
54 # conrelid in this case is the oid of the table which a constraint resides in
55 return select(
56 foreign_key_dependents.c.classid,
57 pg_constraint_table.c.conrelid.label('objid'),
58 foreign_key_dependents.c.objsubid,
59 foreign_key_dependents.c.refclassid,
60 foreign_key_dependents.c.refobjid,
61 foreign_key_dependents.c.refobjsubid,
62 foreign_key_dependents.c.deptype,
63 pg_identify_object.c.name.label('objname'),
64 pg_identify_object.c.type.label('objtype'),
65 pg_identify_refobject.c.name.label('refobjname'),
66 pg_identify_refobject.c.type.label('refobjtype')) \
67 .select_from(foreign_key_dependents) \
68 .join(pg_constraint_table, pg_constraint_table.c.oid == foreign_key_dependents.c.objid) \
69 .join(pg_identify_object, true()) \
70 .join(pg_identify_refobject, true()) \
71 .where(pg_constraint_table.c.confrelid != 0) \
72 .group_by(
73 foreign_key_dependents,
74 pg_constraint_table.c.conrelid,
75 pg_identify_object.c.name,
76 pg_identify_object.c.type,
77 pg_identify_refobject.c.name,
78 pg_identify_refobject.c.type)
79
80
81 def _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair):
82 return dependency_pair.where(pg_identify_object.c.type == 'table constraint')
83
84
85 # getting a full list of dependents and identifying them
86 def _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):
87 result = select(
88 pg_depend,
89 pg_identify_object.c.name.label('objname'),
90 pg_identify_object.c.type.label('objtype'),
91 pg_identify_refobject.c.name.label('refobjname'),
92 pg_identify_refobject.c.type.label('refobjtype')) \
93 .select_from(pg_depend) \
94 .join(pg_identify_object, true()) \
95 .join(pg_identify_refobject, true()) \
96 .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES))) \
97 .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID) \
98 .group_by(
99 pg_depend,
100 pg_identify_object.c.name,
101 pg_identify_object.c.type,
102 pg_identify_refobject.c.name,
103 pg_identify_refobject.c.type)
104
105 return result
106
107
108 def _get_pg_depend_table(engine, metadata):
109 return Table("pg_depend", metadata, autoload_with=engine)
110
111
112 def _get_pg_constraint_table(engine, metadata):
113 return Table("pg_constraint", metadata, autoload_with=engine)
114
115
116 def _get_pg_identify_object_lateral_stmt(classid, objid, objsubid):
117 return select(
118 column("name"),
119 column("type")) \
120 .select_from(func.pg_identify_object(
121 classid,
122 objid,
123 objsubid)) \
124 .lateral()
125
126
127 def _get_typed_dependency_pairs_stmt(engine):
128 metadata = MetaData()
129
130 pg_depend = _get_pg_depend_table(engine, metadata)
131 pg_identify_object = _get_pg_identify_object_lateral_stmt(
132 pg_depend.c.classid, pg_depend.c.objid, pg_depend.c.objsubid)
133 pg_identify_refobject = _get_pg_identify_object_lateral_stmt(
134 pg_depend.c.refclassid, pg_depend.c.refobjid, 0)
135 pg_constraint = _get_pg_constraint_table(engine, metadata)
136
137 # each statement filters the base statement extracting dependents of a specific type
138 # so it's easy to exclude particular types or add new
139 dependency_pair = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)
140 foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair).cte('foreign_key_constraint_dependents')
141 table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')
142
143 return union(
144 select(foreign_key_constraint_dependents),
145 select(table_dependents))
146
147
148 def has_dependencies(referenced_object_id, engine):
149 metadata = MetaData()
150
151 pg_depend = _get_pg_depend_table(engine, metadata)
152
153 stmt = select(
154 exists(
155 select().select_from(pg_depend)
156 .where(pg_depend.c.refobjid == referenced_object_id)
157 .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES)))
158 .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID)
159 )
160 )
161
162 with engine.connect() as conn:
163 result = conn.execute(stmt).scalar()
164
165 return result
166
167
168 def _get_structured_result(dependency_graph_result):
169 result = []
170 for dependency_pair in dependency_graph_result:
171 d = {}
172 d['level'] = dependency_pair.level
173 d['obj'] = {'objid': dependency_pair.objid, 'type': dependency_pair.objtype}
174 d['parent_obj'] = {'objid': dependency_pair.refobjid, 'type': dependency_pair.refobjtype}
175 result.append(d)
176
177 return result
178
[end of db/dependents/dependents_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/db/dependents/dependents_utils.py b/db/dependents/dependents_utils.py
--- a/db/dependents/dependents_utils.py
+++ b/db/dependents/dependents_utils.py
@@ -26,7 +26,7 @@
# recursive member which includes dependents for each object of the previous level
recursive = select(
dependency_pairs_cte,
- (anchor.c.level + 1).label('level'),
+ (anchor.c.level + 1),
anchor.c.dependency_chain + array([anchor.c.objid])) \
.where(anchor.c.level < MAX_LEVEL) \
.where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \
@@ -82,7 +82,7 @@
return dependency_pair.where(pg_identify_object.c.type == 'table constraint')
-# getting a full list of dependents and identifying them
+# stmt for getting a full list of dependents and identifying them
def _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):
result = select(
pg_depend,
@@ -136,8 +136,8 @@
# each statement filters the base statement extracting dependents of a specific type
# so it's easy to exclude particular types or add new
- dependency_pair = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)
- foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair).cte('foreign_key_constraint_dependents')
+ dependency_pairs = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)
+ foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pairs).cte('foreign_key_constraint_dependents')
table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')
return union(
|
{"golden_diff": "diff --git a/db/dependents/dependents_utils.py b/db/dependents/dependents_utils.py\n--- a/db/dependents/dependents_utils.py\n+++ b/db/dependents/dependents_utils.py\n@@ -26,7 +26,7 @@\n # recursive member which includes dependents for each object of the previous level\n recursive = select(\n dependency_pairs_cte,\n- (anchor.c.level + 1).label('level'),\n+ (anchor.c.level + 1),\n anchor.c.dependency_chain + array([anchor.c.objid])) \\\n .where(anchor.c.level < MAX_LEVEL) \\\n .where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \\\n@@ -82,7 +82,7 @@\n return dependency_pair.where(pg_identify_object.c.type == 'table constraint')\n \n \n-# getting a full list of dependents and identifying them\n+# stmt for getting a full list of dependents and identifying them\n def _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):\n result = select(\n pg_depend,\n@@ -136,8 +136,8 @@\n \n # each statement filters the base statement extracting dependents of a specific type\n # so it's easy to exclude particular types or add new\n- dependency_pair = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)\n- foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair).cte('foreign_key_constraint_dependents')\n+ dependency_pairs = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)\n+ foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pairs).cte('foreign_key_constraint_dependents')\n table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')\n \n return union(\n", "issue": "Refactor dependents tests to use library usecase\n## Problem\r\nInitially, both DB and API tests were created using dummy data and were quite difficult to read. \r\n\r\n## Proposed solution\r\nRefactor them to use a common use case and simplify the cases.\r\n\n", "before_files": [{"content": "from sqlalchemy import MetaData, Table, any_, column, exists, func, literal, select, text, true, union\nfrom sqlalchemy.dialects.postgresql import array\n\n# OIDs assigned during normal database operation are constrained to be 16384 or higher.\nUSER_DEFINED_OBJECTS_MIN_OID = 16384\n# automatic and normal dependents\nPG_DEPENDENT_TYPES = ['a', 'n']\nPG_CLASS_CATALOGUE_NAME = '\\'pg_class\\''\nSTART_LEVEL = 1\nMAX_LEVEL = 10\n\n\ndef get_dependents_graph(referenced_object_id, engine):\n dependency_pairs = _get_typed_dependency_pairs_stmt(engine)\n dependency_pairs_cte = dependency_pairs.cte(recursive=True, name='dependency_pairs_cte')\n\n # anchor member which includes all dependents of a requested object\n anchor = select(\n dependency_pairs_cte,\n literal(START_LEVEL).label('level'),\n array([dependency_pairs_cte.c.refobjid]).label('dependency_chain')) \\\n .where(dependency_pairs_cte.c.refobjid == referenced_object_id) \\\n .where(dependency_pairs_cte.c.objid != referenced_object_id)\n anchor = anchor.cte('cte')\n\n # recursive member which includes dependents for each object of the previous level\n recursive = select(\n dependency_pairs_cte,\n (anchor.c.level + 1).label('level'),\n anchor.c.dependency_chain + array([anchor.c.objid])) \\\n .where(anchor.c.level < MAX_LEVEL) \\\n .where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \\\n .where(dependency_pairs_cte.c.objid != dependency_pairs_cte.c.refobjid)\n recursive = recursive.join(anchor, dependency_pairs_cte.c.refobjid == anchor.c.objid)\n\n recursive_stmt = anchor.union(recursive)\n stmt = select(recursive_stmt)\n\n with engine.connect() as conn:\n result = conn.execute(stmt)\n\n return _get_structured_result(result)\n\n\n# finding table dependents based on foreign key constraints from the referenced tables\ndef _get_table_dependents(foreign_key_dependents, pg_constraint_table):\n # TODO: update refobjsubid with actual values when working on columns\n pg_identify_object = _get_pg_identify_object_lateral_stmt(\n text(f'{PG_CLASS_CATALOGUE_NAME}::regclass::oid'), pg_constraint_table.c.conrelid, 0)\n\n pg_identify_refobject = _get_pg_identify_object_lateral_stmt(\n foreign_key_dependents.c.refclassid, foreign_key_dependents.c.refobjid, 0)\n\n # conrelid in this case is the oid of the table which a constraint resides in\n return select(\n foreign_key_dependents.c.classid,\n pg_constraint_table.c.conrelid.label('objid'),\n foreign_key_dependents.c.objsubid,\n foreign_key_dependents.c.refclassid,\n foreign_key_dependents.c.refobjid,\n foreign_key_dependents.c.refobjsubid,\n foreign_key_dependents.c.deptype,\n pg_identify_object.c.name.label('objname'),\n pg_identify_object.c.type.label('objtype'),\n pg_identify_refobject.c.name.label('refobjname'),\n pg_identify_refobject.c.type.label('refobjtype')) \\\n .select_from(foreign_key_dependents) \\\n .join(pg_constraint_table, pg_constraint_table.c.oid == foreign_key_dependents.c.objid) \\\n .join(pg_identify_object, true()) \\\n .join(pg_identify_refobject, true()) \\\n .where(pg_constraint_table.c.confrelid != 0) \\\n .group_by(\n foreign_key_dependents,\n pg_constraint_table.c.conrelid,\n pg_identify_object.c.name,\n pg_identify_object.c.type,\n pg_identify_refobject.c.name,\n pg_identify_refobject.c.type)\n\n\ndef _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair):\n return dependency_pair.where(pg_identify_object.c.type == 'table constraint')\n\n\n# getting a full list of dependents and identifying them\ndef _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):\n result = select(\n pg_depend,\n pg_identify_object.c.name.label('objname'),\n pg_identify_object.c.type.label('objtype'),\n pg_identify_refobject.c.name.label('refobjname'),\n pg_identify_refobject.c.type.label('refobjtype')) \\\n .select_from(pg_depend) \\\n .join(pg_identify_object, true()) \\\n .join(pg_identify_refobject, true()) \\\n .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES))) \\\n .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID) \\\n .group_by(\n pg_depend,\n pg_identify_object.c.name,\n pg_identify_object.c.type,\n pg_identify_refobject.c.name,\n pg_identify_refobject.c.type)\n\n return result\n\n\ndef _get_pg_depend_table(engine, metadata):\n return Table(\"pg_depend\", metadata, autoload_with=engine)\n\n\ndef _get_pg_constraint_table(engine, metadata):\n return Table(\"pg_constraint\", metadata, autoload_with=engine)\n\n\ndef _get_pg_identify_object_lateral_stmt(classid, objid, objsubid):\n return select(\n column(\"name\"),\n column(\"type\")) \\\n .select_from(func.pg_identify_object(\n classid,\n objid,\n objsubid)) \\\n .lateral()\n\n\ndef _get_typed_dependency_pairs_stmt(engine):\n metadata = MetaData()\n\n pg_depend = _get_pg_depend_table(engine, metadata)\n pg_identify_object = _get_pg_identify_object_lateral_stmt(\n pg_depend.c.classid, pg_depend.c.objid, pg_depend.c.objsubid)\n pg_identify_refobject = _get_pg_identify_object_lateral_stmt(\n pg_depend.c.refclassid, pg_depend.c.refobjid, 0)\n pg_constraint = _get_pg_constraint_table(engine, metadata)\n\n # each statement filters the base statement extracting dependents of a specific type\n # so it's easy to exclude particular types or add new\n dependency_pair = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)\n foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair).cte('foreign_key_constraint_dependents')\n table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')\n\n return union(\n select(foreign_key_constraint_dependents),\n select(table_dependents))\n\n\ndef has_dependencies(referenced_object_id, engine):\n metadata = MetaData()\n\n pg_depend = _get_pg_depend_table(engine, metadata)\n\n stmt = select(\n exists(\n select().select_from(pg_depend)\n .where(pg_depend.c.refobjid == referenced_object_id)\n .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES)))\n .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID)\n )\n )\n\n with engine.connect() as conn:\n result = conn.execute(stmt).scalar()\n\n return result\n\n\ndef _get_structured_result(dependency_graph_result):\n result = []\n for dependency_pair in dependency_graph_result:\n d = {}\n d['level'] = dependency_pair.level\n d['obj'] = {'objid': dependency_pair.objid, 'type': dependency_pair.objtype}\n d['parent_obj'] = {'objid': dependency_pair.refobjid, 'type': dependency_pair.refobjtype}\n result.append(d)\n\n return result\n", "path": "db/dependents/dependents_utils.py"}]}
| 2,708 | 433 |
gh_patches_debug_24187
|
rasdani/github-patches
|
git_diff
|
vega__altair-1539
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support built-in vega themes
See [vega-themes](https://github.com/vega/vega-themes). Themes should be supported via the current theme infrastructure, maybe something like this:
```python
alt.themes.enable('vega.themes.dark')
```
We'll have to think about how to best populate the list of available themes, and how to make this work cleanly with user-specified themes from within Altair.
</issue>
<code>
[start of altair/vegalite/v3/theme.py]
1 """Tools for enabling and registering chart themes"""
2
3 from ...utils.theme import ThemeRegistry
4
5 # The entry point group that can be used by other packages to declare other
6 # renderers that will be auto-detected. Explicit registration is also
7 # allowed by the PluginRegistery API.
8 ENTRY_POINT_GROUP = 'altair.vegalite.v3.theme' # type: str
9 themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)
10
11 themes.register('default', lambda: {"config": {"view": {"width": 400, "height": 300},
12 "mark": {"tooltip": None}}})
13 themes.register('opaque', lambda: {"config": {"background": "white",
14 "view": {"width": 400, "height": 300},
15 "mark": {"tooltip": None}}})
16 themes.register('none', lambda: {})
17 themes.enable('default')
18
[end of altair/vegalite/v3/theme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/altair/vegalite/v3/theme.py b/altair/vegalite/v3/theme.py
--- a/altair/vegalite/v3/theme.py
+++ b/altair/vegalite/v3/theme.py
@@ -2,6 +2,23 @@
from ...utils.theme import ThemeRegistry
+VEGA_THEMES = ['ggplot2', 'quartz', 'vox', 'fivethirtyeight', 'dark', 'latimes']
+
+
+class VegaTheme(object):
+ """Implementation of a builtin vega theme."""
+ def __init__(self, theme):
+ self.theme = theme
+
+ def __call__(self):
+ return {"usermeta": {"embedOptions": {"theme": self.theme}},
+ "config": {"view": {"width": 400, "height": 300},
+ "mark": {"tooltip": None}}}
+
+ def __repr__(self):
+ return "VegaTheme({!r})".format(self.theme)
+
+
# The entry point group that can be used by other packages to declare other
# renderers that will be auto-detected. Explicit registration is also
# allowed by the PluginRegistery API.
@@ -14,4 +31,8 @@
"view": {"width": 400, "height": 300},
"mark": {"tooltip": None}}})
themes.register('none', lambda: {})
+
+for theme in VEGA_THEMES:
+ themes.register(theme, VegaTheme(theme))
+
themes.enable('default')
|
{"golden_diff": "diff --git a/altair/vegalite/v3/theme.py b/altair/vegalite/v3/theme.py\n--- a/altair/vegalite/v3/theme.py\n+++ b/altair/vegalite/v3/theme.py\n@@ -2,6 +2,23 @@\n \n from ...utils.theme import ThemeRegistry\n \n+VEGA_THEMES = ['ggplot2', 'quartz', 'vox', 'fivethirtyeight', 'dark', 'latimes']\n+\n+\n+class VegaTheme(object):\n+ \"\"\"Implementation of a builtin vega theme.\"\"\"\n+ def __init__(self, theme):\n+ self.theme = theme\n+ \n+ def __call__(self):\n+ return {\"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n+ \"config\": {\"view\": {\"width\": 400, \"height\": 300},\n+ \"mark\": {\"tooltip\": None}}}\n+\n+ def __repr__(self):\n+ return \"VegaTheme({!r})\".format(self.theme)\n+\n+\n # The entry point group that can be used by other packages to declare other\n # renderers that will be auto-detected. Explicit registration is also\n # allowed by the PluginRegistery API.\n@@ -14,4 +31,8 @@\n \"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\n themes.register('none', lambda: {})\n+ \n+for theme in VEGA_THEMES:\n+ themes.register(theme, VegaTheme(theme))\n+\n themes.enable('default')\n", "issue": "Support built-in vega themes\nSee [vega-themes](https://github.com/vega/vega-themes). Themes should be supported via the current theme infrastructure, maybe something like this:\r\n```python\r\nalt.themes.enable('vega.themes.dark')\r\n```\r\nWe'll have to think about how to best populate the list of available themes, and how to make this work cleanly with user-specified themes from within Altair.\n", "before_files": [{"content": "\"\"\"Tools for enabling and registering chart themes\"\"\"\n\nfrom ...utils.theme import ThemeRegistry\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = 'altair.vegalite.v3.theme' # type: str\nthemes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nthemes.register('default', lambda: {\"config\": {\"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\nthemes.register('opaque', lambda: {\"config\": {\"background\": \"white\",\n \"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\nthemes.register('none', lambda: {})\nthemes.enable('default')\n", "path": "altair/vegalite/v3/theme.py"}]}
| 855 | 351 |
gh_patches_debug_4783
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-3280
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
label2rgb index error when using average and background color
When using averaged label2rgb with bg_label the out image indexing fails with error:
```
Traceback (most recent call last):
File "file.py", line 222, in generate
overlay = color.label2rgb(domain, image=img, bg_label=0, bg_color=1.0, kind="avg")
File "[...]/python3.6/site-packages/skimage/color/colorlabel.py", line 116, in label2rgb
return _label2rgb_avg(label, image, bg_label, bg_color)
File "[...]/python3.6/site-packages/skimage/color/colorlabel.py", line 225, in _label2rgb_avg
out[bg] = bg_color
IndexError: boolean index did not match indexed array along dimension 0; dimension is 100 but corresponding boolean dimension is 2
```
</issue>
<code>
[start of skimage/color/colorlabel.py]
1 import itertools
2
3 import numpy as np
4
5 from .._shared.utils import warn
6 from ..util import img_as_float
7 from . import rgb_colors
8 from .colorconv import rgb2gray, gray2rgb
9
10
11 __all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS']
12
13
14 DEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green',
15 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen')
16
17
18 color_dict = {k: v for k, v in rgb_colors.__dict__.items()
19 if isinstance(v, tuple)}
20
21
22 def _rgb_vector(color):
23 """Return RGB color as (1, 3) array.
24
25 This RGB array gets multiplied by masked regions of an RGB image, which are
26 partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).
27
28 Parameters
29 ----------
30 color : str or array
31 Color name in `color_dict` or RGB float values between [0, 1].
32 """
33 if isinstance(color, str):
34 color = color_dict[color]
35 # Slice to handle RGBA colors.
36 return np.array(color[:3])
37
38
39 def _match_label_with_color(label, colors, bg_label, bg_color):
40 """Return `unique_labels` and `color_cycle` for label array and color list.
41
42 Colors are cycled for normal labels, but the background color should only
43 be used for the background.
44 """
45 # Temporarily set background color; it will be removed later.
46 if bg_color is None:
47 bg_color = (0, 0, 0)
48 bg_color = _rgb_vector([bg_color])
49
50 # map labels to their ranks among all labels from small to large
51 unique_labels, mapped_labels = np.unique(label, return_inverse=True)
52
53 # get rank of bg_label
54 bg_label_rank_list = mapped_labels[label.flat == bg_label]
55
56 # The rank of each label is the index of the color it is matched to in
57 # color cycle. bg_label should always be mapped to the first color, so
58 # its rank must be 0. Other labels should be ranked from small to large
59 # from 1.
60 if len(bg_label_rank_list) > 0:
61 bg_label_rank = bg_label_rank_list[0]
62 mapped_labels[mapped_labels < bg_label_rank] += 1
63 mapped_labels[label.flat == bg_label] = 0
64 else:
65 mapped_labels += 1
66
67 # Modify labels and color cycle so background color is used only once.
68 color_cycle = itertools.cycle(colors)
69 color_cycle = itertools.chain(bg_color, color_cycle)
70
71 return mapped_labels, color_cycle
72
73
74 def label2rgb(label, image=None, colors=None, alpha=0.3,
75 bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'):
76 """Return an RGB image where color-coded labels are painted over the image.
77
78 Parameters
79 ----------
80 label : array, shape (M, N)
81 Integer array of labels with the same shape as `image`.
82 image : array, shape (M, N, 3), optional
83 Image used as underlay for labels. If the input is an RGB image, it's
84 converted to grayscale before coloring.
85 colors : list, optional
86 List of colors. If the number of labels exceeds the number of colors,
87 then the colors are cycled.
88 alpha : float [0, 1], optional
89 Opacity of colorized labels. Ignored if image is `None`.
90 bg_label : int, optional
91 Label that's treated as the background.
92 bg_color : str or array, optional
93 Background color. Must be a name in `color_dict` or RGB float values
94 between [0, 1].
95 image_alpha : float [0, 1], optional
96 Opacity of the image.
97 kind : string, one of {'overlay', 'avg'}
98 The kind of color image desired. 'overlay' cycles over defined colors
99 and overlays the colored labels over the original image. 'avg' replaces
100 each labeled segment with its average color, for a stained-class or
101 pastel painting appearance.
102
103 Returns
104 -------
105 result : array of float, shape (M, N, 3)
106 The result of blending a cycling colormap (`colors`) for each distinct
107 value in `label` with the image, at a certain alpha value.
108 """
109 if kind == 'overlay':
110 return _label2rgb_overlay(label, image, colors, alpha, bg_label,
111 bg_color, image_alpha)
112 else:
113 return _label2rgb_avg(label, image, bg_label, bg_color)
114
115
116 def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3,
117 bg_label=-1, bg_color=None, image_alpha=1):
118 """Return an RGB image where color-coded labels are painted over the image.
119
120 Parameters
121 ----------
122 label : array, shape (M, N)
123 Integer array of labels with the same shape as `image`.
124 image : array, shape (M, N, 3), optional
125 Image used as underlay for labels. If the input is an RGB image, it's
126 converted to grayscale before coloring.
127 colors : list, optional
128 List of colors. If the number of labels exceeds the number of colors,
129 then the colors are cycled.
130 alpha : float [0, 1], optional
131 Opacity of colorized labels. Ignored if image is `None`.
132 bg_label : int, optional
133 Label that's treated as the background.
134 bg_color : str or array, optional
135 Background color. Must be a name in `color_dict` or RGB float values
136 between [0, 1].
137 image_alpha : float [0, 1], optional
138 Opacity of the image.
139
140 Returns
141 -------
142 result : array of float, shape (M, N, 3)
143 The result of blending a cycling colormap (`colors`) for each distinct
144 value in `label` with the image, at a certain alpha value.
145 """
146 if colors is None:
147 colors = DEFAULT_COLORS
148 colors = [_rgb_vector(c) for c in colors]
149
150 if image is None:
151 image = np.zeros(label.shape + (3,), dtype=np.float64)
152 # Opacity doesn't make sense if no image exists.
153 alpha = 1
154 else:
155 if not image.shape[:2] == label.shape:
156 raise ValueError("`image` and `label` must be the same shape")
157
158 if image.min() < 0:
159 warn("Negative intensities in `image` are not supported")
160
161 image = img_as_float(rgb2gray(image))
162 image = gray2rgb(image) * image_alpha + (1 - image_alpha)
163
164 # Ensure that all labels are non-negative so we can index into
165 # `label_to_color` correctly.
166 offset = min(label.min(), bg_label)
167 if offset != 0:
168 label = label - offset # Make sure you don't modify the input array.
169 bg_label -= offset
170
171 new_type = np.min_scalar_type(int(label.max()))
172 if new_type == np.bool:
173 new_type = np.uint8
174 label = label.astype(new_type)
175
176 mapped_labels_flat, color_cycle = _match_label_with_color(label, colors,
177 bg_label, bg_color)
178
179 if len(mapped_labels_flat) == 0:
180 return image
181
182 dense_labels = range(max(mapped_labels_flat) + 1)
183
184 label_to_color = np.array([c for i, c in zip(dense_labels, color_cycle)])
185
186 mapped_labels = label
187 mapped_labels.flat = mapped_labels_flat
188 result = label_to_color[mapped_labels] * alpha + image * (1 - alpha)
189
190 # Remove background label if its color was not specified.
191 remove_background = 0 in mapped_labels_flat and bg_color is None
192 if remove_background:
193 result[label == bg_label] = image[label == bg_label]
194
195 return result
196
197
198 def _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):
199 """Visualise each segment in `label_field` with its mean color in `image`.
200
201 Parameters
202 ----------
203 label_field : array of int
204 A segmentation of an image.
205 image : array, shape ``label_field.shape + (3,)``
206 A color image of the same spatial shape as `label_field`.
207 bg_label : int, optional
208 A value in `label_field` to be treated as background.
209 bg_color : 3-tuple of int, optional
210 The color for the background label
211
212 Returns
213 -------
214 out : array, same shape and type as `image`
215 The output visualization.
216 """
217 out = np.zeros_like(image)
218 labels = np.unique(label_field)
219 bg = (labels == bg_label)
220 if bg.any():
221 labels = labels[labels != bg_label]
222 out[bg] = bg_color
223 for label in labels:
224 mask = (label_field == label).nonzero()
225 color = image[mask].mean(axis=0)
226 out[mask] = color
227 return out
228
[end of skimage/color/colorlabel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/skimage/color/colorlabel.py b/skimage/color/colorlabel.py
--- a/skimage/color/colorlabel.py
+++ b/skimage/color/colorlabel.py
@@ -219,7 +219,8 @@
bg = (labels == bg_label)
if bg.any():
labels = labels[labels != bg_label]
- out[bg] = bg_color
+ mask = (label_field == bg_label).nonzero()
+ out[mask] = bg_color
for label in labels:
mask = (label_field == label).nonzero()
color = image[mask].mean(axis=0)
|
{"golden_diff": "diff --git a/skimage/color/colorlabel.py b/skimage/color/colorlabel.py\n--- a/skimage/color/colorlabel.py\n+++ b/skimage/color/colorlabel.py\n@@ -219,7 +219,8 @@\n bg = (labels == bg_label)\n if bg.any():\n labels = labels[labels != bg_label]\n- out[bg] = bg_color\n+ mask = (label_field == bg_label).nonzero()\n+ out[mask] = bg_color\n for label in labels:\n mask = (label_field == label).nonzero()\n color = image[mask].mean(axis=0)\n", "issue": "label2rgb index error when using average and background color\nWhen using averaged label2rgb with bg_label the out image indexing fails with error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"file.py\", line 222, in generate\r\n overlay = color.label2rgb(domain, image=img, bg_label=0, bg_color=1.0, kind=\"avg\")\r\n File \"[...]/python3.6/site-packages/skimage/color/colorlabel.py\", line 116, in label2rgb\r\n return _label2rgb_avg(label, image, bg_label, bg_color)\r\n File \"[...]/python3.6/site-packages/skimage/color/colorlabel.py\", line 225, in _label2rgb_avg\r\n out[bg] = bg_color\r\nIndexError: boolean index did not match indexed array along dimension 0; dimension is 100 but corresponding boolean dimension is 2\r\n```\n", "before_files": [{"content": "import itertools\n\nimport numpy as np\n\nfrom .._shared.utils import warn\nfrom ..util import img_as_float\nfrom . import rgb_colors\nfrom .colorconv import rgb2gray, gray2rgb\n\n\n__all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS']\n\n\nDEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green',\n 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen')\n\n\ncolor_dict = {k: v for k, v in rgb_colors.__dict__.items()\n if isinstance(v, tuple)}\n\n\ndef _rgb_vector(color):\n \"\"\"Return RGB color as (1, 3) array.\n\n This RGB array gets multiplied by masked regions of an RGB image, which are\n partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).\n\n Parameters\n ----------\n color : str or array\n Color name in `color_dict` or RGB float values between [0, 1].\n \"\"\"\n if isinstance(color, str):\n color = color_dict[color]\n # Slice to handle RGBA colors.\n return np.array(color[:3])\n\n\ndef _match_label_with_color(label, colors, bg_label, bg_color):\n \"\"\"Return `unique_labels` and `color_cycle` for label array and color list.\n\n Colors are cycled for normal labels, but the background color should only\n be used for the background.\n \"\"\"\n # Temporarily set background color; it will be removed later.\n if bg_color is None:\n bg_color = (0, 0, 0)\n bg_color = _rgb_vector([bg_color])\n\n # map labels to their ranks among all labels from small to large\n unique_labels, mapped_labels = np.unique(label, return_inverse=True)\n\n # get rank of bg_label\n bg_label_rank_list = mapped_labels[label.flat == bg_label]\n\n # The rank of each label is the index of the color it is matched to in\n # color cycle. bg_label should always be mapped to the first color, so\n # its rank must be 0. Other labels should be ranked from small to large\n # from 1.\n if len(bg_label_rank_list) > 0:\n bg_label_rank = bg_label_rank_list[0]\n mapped_labels[mapped_labels < bg_label_rank] += 1\n mapped_labels[label.flat == bg_label] = 0\n else:\n mapped_labels += 1\n\n # Modify labels and color cycle so background color is used only once.\n color_cycle = itertools.cycle(colors)\n color_cycle = itertools.chain(bg_color, color_cycle)\n\n return mapped_labels, color_cycle\n\n\ndef label2rgb(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n kind : string, one of {'overlay', 'avg'}\n The kind of color image desired. 'overlay' cycles over defined colors\n and overlays the colored labels over the original image. 'avg' replaces\n each labeled segment with its average color, for a stained-class or\n pastel painting appearance.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if kind == 'overlay':\n return _label2rgb_overlay(label, image, colors, alpha, bg_label,\n bg_color, image_alpha)\n else:\n return _label2rgb_avg(label, image, bg_label, bg_color)\n\n\ndef _label2rgb_overlay(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=None, image_alpha=1):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if colors is None:\n colors = DEFAULT_COLORS\n colors = [_rgb_vector(c) for c in colors]\n\n if image is None:\n image = np.zeros(label.shape + (3,), dtype=np.float64)\n # Opacity doesn't make sense if no image exists.\n alpha = 1\n else:\n if not image.shape[:2] == label.shape:\n raise ValueError(\"`image` and `label` must be the same shape\")\n\n if image.min() < 0:\n warn(\"Negative intensities in `image` are not supported\")\n\n image = img_as_float(rgb2gray(image))\n image = gray2rgb(image) * image_alpha + (1 - image_alpha)\n\n # Ensure that all labels are non-negative so we can index into\n # `label_to_color` correctly.\n offset = min(label.min(), bg_label)\n if offset != 0:\n label = label - offset # Make sure you don't modify the input array.\n bg_label -= offset\n\n new_type = np.min_scalar_type(int(label.max()))\n if new_type == np.bool:\n new_type = np.uint8\n label = label.astype(new_type)\n\n mapped_labels_flat, color_cycle = _match_label_with_color(label, colors,\n bg_label, bg_color)\n\n if len(mapped_labels_flat) == 0:\n return image\n\n dense_labels = range(max(mapped_labels_flat) + 1)\n\n label_to_color = np.array([c for i, c in zip(dense_labels, color_cycle)])\n\n mapped_labels = label\n mapped_labels.flat = mapped_labels_flat\n result = label_to_color[mapped_labels] * alpha + image * (1 - alpha)\n\n # Remove background label if its color was not specified.\n remove_background = 0 in mapped_labels_flat and bg_color is None\n if remove_background:\n result[label == bg_label] = image[label == bg_label]\n\n return result\n\n\ndef _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):\n \"\"\"Visualise each segment in `label_field` with its mean color in `image`.\n\n Parameters\n ----------\n label_field : array of int\n A segmentation of an image.\n image : array, shape ``label_field.shape + (3,)``\n A color image of the same spatial shape as `label_field`.\n bg_label : int, optional\n A value in `label_field` to be treated as background.\n bg_color : 3-tuple of int, optional\n The color for the background label\n\n Returns\n -------\n out : array, same shape and type as `image`\n The output visualization.\n \"\"\"\n out = np.zeros_like(image)\n labels = np.unique(label_field)\n bg = (labels == bg_label)\n if bg.any():\n labels = labels[labels != bg_label]\n out[bg] = bg_color\n for label in labels:\n mask = (label_field == label).nonzero()\n color = image[mask].mean(axis=0)\n out[mask] = color\n return out\n", "path": "skimage/color/colorlabel.py"}]}
| 3,335 | 140 |
gh_patches_debug_22033
|
rasdani/github-patches
|
git_diff
|
searx__searx-1689
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Google Images & DeviantArt don't work anymore
From one day to another, Google Images and DeviantArt stopped to show me anything, even with simple searches.


They are of course activated in the engines. It has been a few days, with a restart every day (local instance using Docker), without modifying engines nor any other setting. Using searx 0.15.0
</issue>
<code>
[start of searx/engines/google_images.py]
1 """
2 Google (Images)
3
4 @website https://www.google.com
5 @provide-api yes (https://developers.google.com/custom-search/)
6
7 @using-api no
8 @results HTML chunks with JSON inside
9 @stable no
10 @parse url, title, img_src
11 """
12
13 from datetime import date, timedelta
14 from json import loads
15 from lxml import html
16 from searx.url_utils import urlencode
17
18 # engine dependent config
19 categories = ['images']
20 paging = True
21 safesearch = True
22 time_range_support = True
23 number_of_results = 100
24
25 search_url = 'https://www.google.com/search'\
26 '?{query}'\
27 '&tbm=isch'\
28 '&yv=2'\
29 '&{search_options}'
30 time_range_attr = "qdr:{range}"
31 time_range_custom_attr = "cdr:1,cd_min:{start},cd_max{end}"
32 time_range_dict = {'day': 'd',
33 'week': 'w',
34 'month': 'm'}
35
36
37 # do search-request
38 def request(query, params):
39 search_options = {
40 'ijn': params['pageno'] - 1,
41 'start': (params['pageno'] - 1) * number_of_results
42 }
43
44 if params['time_range'] in time_range_dict:
45 search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])
46 elif params['time_range'] == 'year':
47 now = date.today()
48 then = now - timedelta(days=365)
49 start = then.strftime('%m/%d/%Y')
50 end = now.strftime('%m/%d/%Y')
51 search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)
52
53 if safesearch and params['safesearch']:
54 search_options['safe'] = 'on'
55
56 params['url'] = search_url.format(query=urlencode({'q': query}),
57 search_options=urlencode(search_options))
58
59 return params
60
61
62 # get response from search-request
63 def response(resp):
64 results = []
65
66 dom = html.fromstring(resp.text)
67
68 # parse results
69 for result in dom.xpath('//div[contains(@class, "rg_meta")]/text()'):
70
71 try:
72 metadata = loads(result)
73 img_format = "{0} {1}x{2}".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))
74 source = "{0} ({1})".format(metadata['st'], metadata['isu'])
75 results.append({'url': metadata['ru'],
76 'title': metadata['pt'],
77 'content': metadata['s'],
78 'source': source,
79 'img_format': img_format,
80 'thumbnail_src': metadata['tu'],
81 'img_src': metadata['ou'],
82 'template': 'images.html'})
83
84 except:
85 continue
86
87 return results
88
[end of searx/engines/google_images.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -70,11 +70,21 @@
try:
metadata = loads(result)
- img_format = "{0} {1}x{2}".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))
- source = "{0} ({1})".format(metadata['st'], metadata['isu'])
+
+ img_format = metadata.get('ity', '')
+ img_width = metadata.get('ow', '')
+ img_height = metadata.get('oh', '')
+ if img_width and img_height:
+ img_format += " {0}x{1}".format(img_width, img_height)
+
+ source = metadata.get('st', '')
+ source_url = metadata.get('isu', '')
+ if source_url:
+ source += " ({0})".format(source_url)
+
results.append({'url': metadata['ru'],
'title': metadata['pt'],
- 'content': metadata['s'],
+ 'content': metadata.get('s', ''),
'source': source,
'img_format': img_format,
'thumbnail_src': metadata['tu'],
|
{"golden_diff": "diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py\n--- a/searx/engines/google_images.py\n+++ b/searx/engines/google_images.py\n@@ -70,11 +70,21 @@\n \n try:\n metadata = loads(result)\n- img_format = \"{0} {1}x{2}\".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))\n- source = \"{0} ({1})\".format(metadata['st'], metadata['isu'])\n+\n+ img_format = metadata.get('ity', '')\n+ img_width = metadata.get('ow', '')\n+ img_height = metadata.get('oh', '')\n+ if img_width and img_height:\n+ img_format += \" {0}x{1}\".format(img_width, img_height)\n+\n+ source = metadata.get('st', '')\n+ source_url = metadata.get('isu', '')\n+ if source_url:\n+ source += \" ({0})\".format(source_url)\n+\n results.append({'url': metadata['ru'],\n 'title': metadata['pt'],\n- 'content': metadata['s'],\n+ 'content': metadata.get('s', ''),\n 'source': source,\n 'img_format': img_format,\n 'thumbnail_src': metadata['tu'],\n", "issue": "Google Images & DeviantArt don't work anymore\nFrom one day to another, Google Images and DeviantArt stopped to show me anything, even with simple searches.\r\n\r\n\r\nThey are of course activated in the engines. It has been a few days, with a restart every day (local instance using Docker), without modifying engines nor any other setting. Using searx 0.15.0\n", "before_files": [{"content": "\"\"\"\n Google (Images)\n\n @website https://www.google.com\n @provide-api yes (https://developers.google.com/custom-search/)\n\n @using-api no\n @results HTML chunks with JSON inside\n @stable no\n @parse url, title, img_src\n\"\"\"\n\nfrom datetime import date, timedelta\nfrom json import loads\nfrom lxml import html\nfrom searx.url_utils import urlencode\n\n# engine dependent config\ncategories = ['images']\npaging = True\nsafesearch = True\ntime_range_support = True\nnumber_of_results = 100\n\nsearch_url = 'https://www.google.com/search'\\\n '?{query}'\\\n '&tbm=isch'\\\n '&yv=2'\\\n '&{search_options}'\ntime_range_attr = \"qdr:{range}\"\ntime_range_custom_attr = \"cdr:1,cd_min:{start},cd_max{end}\"\ntime_range_dict = {'day': 'd',\n 'week': 'w',\n 'month': 'm'}\n\n\n# do search-request\ndef request(query, params):\n search_options = {\n 'ijn': params['pageno'] - 1,\n 'start': (params['pageno'] - 1) * number_of_results\n }\n\n if params['time_range'] in time_range_dict:\n search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])\n elif params['time_range'] == 'year':\n now = date.today()\n then = now - timedelta(days=365)\n start = then.strftime('%m/%d/%Y')\n end = now.strftime('%m/%d/%Y')\n search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)\n\n if safesearch and params['safesearch']:\n search_options['safe'] = 'on'\n\n params['url'] = search_url.format(query=urlencode({'q': query}),\n search_options=urlencode(search_options))\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in dom.xpath('//div[contains(@class, \"rg_meta\")]/text()'):\n\n try:\n metadata = loads(result)\n img_format = \"{0} {1}x{2}\".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))\n source = \"{0} ({1})\".format(metadata['st'], metadata['isu'])\n results.append({'url': metadata['ru'],\n 'title': metadata['pt'],\n 'content': metadata['s'],\n 'source': source,\n 'img_format': img_format,\n 'thumbnail_src': metadata['tu'],\n 'img_src': metadata['ou'],\n 'template': 'images.html'})\n\n except:\n continue\n\n return results\n", "path": "searx/engines/google_images.py"}]}
| 1,548 | 288 |
gh_patches_debug_39059
|
rasdani/github-patches
|
git_diff
|
pypa__pip-8910
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to clean http cache with pip
<!--
If you're reporting an issue for `--use-feature=2020-resolver`, use the "Dependency resolver failures / errors" template instead.
-->
**Environment**
* pip version: pip 20.2.3
* Python version: Python 2.7 (32bit)
* OS: Windows 10 (64bit)
<!-- Feel free to add more information about your environment here -->
**Description**
<!-- A clear and concise description of what the bug is. -->
We noticed pip will cache the http response for future installation. However if the cached http response includes corruped data (network issue for example), installing the same package will result in pip cache error as follow:
```
(venv2) H:\>pip install pyside
DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality.
Collecting pyside
Using cached PySide-1.2.4-cp27-none-win32.whl (41.0 MB)
ERROR: THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE. If you have updated the package versions, please update the hashes. Otherwise, examine the package contents carefully; someone may have tampered with them.
pyside from https://files.pythonhosted.org/packages/39/78/2b5fcd3b4ff4fc891f1c3a8bee09616d59fa6d644b0dc6252d46d8fbb423/PySide-1.2.4-cp27-none-win32.whl#sha256=104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908:
Expected sha256 104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908
Got 9c9bdae0c8b07e8e834b3bd9f35ab93f3ddbae8365047b1596553eb70e996427
```
The symption of this issue has already been reported in https://github.com/pypa/warehouse/issues/8330.
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
If the error is caused by the data corruption in http response cache then pip should invalidate the cache and try to re-download for best attempt.
Currently `pip cache purge` will not clear http response cache. The only solution to this case is to remove http directory from the file system forcing pip to rebuild http cache.
**How to Reproduce**
<!-- Describe the steps to reproduce this bug. -->
1. Create a Python 2 virtual environment
2. Download the example corruped file https://mega.nz/file/WsthyLTS#AWD7NmS-w9B62Q3Y8Lb4SvCalqCb1d83a5FniKPmFqY
3. Overwrite http folder in C:\Users\<account name>\AppData\Local\pip\cache\http
4. Uninstall pyside and install pyside by `pip install pyside`
**Output**
```
(venv2) H:\>pip install pyside
DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality.
Collecting pyside
Using cached PySide-1.2.4-cp27-none-win32.whl (41.0 MB)
ERROR: THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE. If you have updated the package versions, please update the hashes. Otherwise, examine the package contents carefully; someone may have tampered with them.
pyside from https://files.pythonhosted.org/packages/39/78/2b5fcd3b4ff4fc891f1c3a8bee09616d59fa6d644b0dc6252d46d8fbb423/PySide-1.2.4-cp27-none-win32.whl#sha256=104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908:
Expected sha256 104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908
Got 9c9bdae0c8b07e8e834b3bd9f35ab93f3ddbae8365047b1596553eb70e996427
```
</issue>
<code>
[start of src/pip/_internal/commands/cache.py]
1 from __future__ import absolute_import
2
3 import logging
4 import os
5 import textwrap
6
7 import pip._internal.utils.filesystem as filesystem
8 from pip._internal.cli.base_command import Command
9 from pip._internal.cli.status_codes import ERROR, SUCCESS
10 from pip._internal.exceptions import CommandError, PipError
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from optparse import Values
15 from typing import Any, List
16
17
18 logger = logging.getLogger(__name__)
19
20
21 class CacheCommand(Command):
22 """
23 Inspect and manage pip's wheel cache.
24
25 Subcommands:
26
27 - dir: Show the cache directory.
28 - info: Show information about the cache.
29 - list: List filenames of packages stored in the cache.
30 - remove: Remove one or more package from the cache.
31 - purge: Remove all items from the cache.
32
33 ``<pattern>`` can be a glob expression or a package name.
34 """
35
36 ignore_require_venv = True
37 usage = """
38 %prog dir
39 %prog info
40 %prog list [<pattern>] [--format=[human, abspath]]
41 %prog remove <pattern>
42 %prog purge
43 """
44
45 def add_options(self):
46 # type: () -> None
47
48 self.cmd_opts.add_option(
49 '--format',
50 action='store',
51 dest='list_format',
52 default="human",
53 choices=('human', 'abspath'),
54 help="Select the output format among: human (default) or abspath"
55 )
56
57 self.parser.insert_option_group(0, self.cmd_opts)
58
59 def run(self, options, args):
60 # type: (Values, List[Any]) -> int
61 handlers = {
62 "dir": self.get_cache_dir,
63 "info": self.get_cache_info,
64 "list": self.list_cache_items,
65 "remove": self.remove_cache_items,
66 "purge": self.purge_cache,
67 }
68
69 if not options.cache_dir:
70 logger.error("pip cache commands can not "
71 "function since cache is disabled.")
72 return ERROR
73
74 # Determine action
75 if not args or args[0] not in handlers:
76 logger.error(
77 "Need an action (%s) to perform.",
78 ", ".join(sorted(handlers)),
79 )
80 return ERROR
81
82 action = args[0]
83
84 # Error handling happens here, not in the action-handlers.
85 try:
86 handlers[action](options, args[1:])
87 except PipError as e:
88 logger.error(e.args[0])
89 return ERROR
90
91 return SUCCESS
92
93 def get_cache_dir(self, options, args):
94 # type: (Values, List[Any]) -> None
95 if args:
96 raise CommandError('Too many arguments')
97
98 logger.info(options.cache_dir)
99
100 def get_cache_info(self, options, args):
101 # type: (Values, List[Any]) -> None
102 if args:
103 raise CommandError('Too many arguments')
104
105 num_packages = len(self._find_wheels(options, '*'))
106
107 cache_location = self._wheels_cache_dir(options)
108 cache_size = filesystem.format_directory_size(cache_location)
109
110 message = textwrap.dedent("""
111 Location: {location}
112 Size: {size}
113 Number of wheels: {package_count}
114 """).format(
115 location=cache_location,
116 package_count=num_packages,
117 size=cache_size,
118 ).strip()
119
120 logger.info(message)
121
122 def list_cache_items(self, options, args):
123 # type: (Values, List[Any]) -> None
124 if len(args) > 1:
125 raise CommandError('Too many arguments')
126
127 if args:
128 pattern = args[0]
129 else:
130 pattern = '*'
131
132 files = self._find_wheels(options, pattern)
133 if options.list_format == 'human':
134 self.format_for_human(files)
135 else:
136 self.format_for_abspath(files)
137
138 def format_for_human(self, files):
139 # type: (List[str]) -> None
140 if not files:
141 logger.info('Nothing cached.')
142 return
143
144 results = []
145 for filename in files:
146 wheel = os.path.basename(filename)
147 size = filesystem.format_file_size(filename)
148 results.append(' - {} ({})'.format(wheel, size))
149 logger.info('Cache contents:\n')
150 logger.info('\n'.join(sorted(results)))
151
152 def format_for_abspath(self, files):
153 # type: (List[str]) -> None
154 if not files:
155 return
156
157 results = []
158 for filename in files:
159 results.append(filename)
160
161 logger.info('\n'.join(sorted(results)))
162
163 def remove_cache_items(self, options, args):
164 # type: (Values, List[Any]) -> None
165 if len(args) > 1:
166 raise CommandError('Too many arguments')
167
168 if not args:
169 raise CommandError('Please provide a pattern')
170
171 files = self._find_wheels(options, args[0])
172 if not files:
173 raise CommandError('No matching packages')
174
175 for filename in files:
176 os.unlink(filename)
177 logger.debug('Removed %s', filename)
178 logger.info('Files removed: %s', len(files))
179
180 def purge_cache(self, options, args):
181 # type: (Values, List[Any]) -> None
182 if args:
183 raise CommandError('Too many arguments')
184
185 return self.remove_cache_items(options, ['*'])
186
187 def _wheels_cache_dir(self, options):
188 # type: (Values) -> str
189 return os.path.join(options.cache_dir, 'wheels')
190
191 def _find_wheels(self, options, pattern):
192 # type: (Values, str) -> List[str]
193 wheel_dir = self._wheels_cache_dir(options)
194
195 # The wheel filename format, as specified in PEP 427, is:
196 # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
197 #
198 # Additionally, non-alphanumeric values in the distribution are
199 # normalized to underscores (_), meaning hyphens can never occur
200 # before `-{version}`.
201 #
202 # Given that information:
203 # - If the pattern we're given contains a hyphen (-), the user is
204 # providing at least the version. Thus, we can just append `*.whl`
205 # to match the rest of it.
206 # - If the pattern we're given doesn't contain a hyphen (-), the
207 # user is only providing the name. Thus, we append `-*.whl` to
208 # match the hyphen before the version, followed by anything else.
209 #
210 # PEP 427: https://www.python.org/dev/peps/pep-0427/
211 pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
212
213 return filesystem.find_files(wheel_dir, pattern)
214
[end of src/pip/_internal/commands/cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py
--- a/src/pip/_internal/commands/cache.py
+++ b/src/pip/_internal/commands/cache.py
@@ -102,19 +102,30 @@
if args:
raise CommandError('Too many arguments')
+ num_http_files = len(self._find_http_files(options))
num_packages = len(self._find_wheels(options, '*'))
- cache_location = self._wheels_cache_dir(options)
- cache_size = filesystem.format_directory_size(cache_location)
+ http_cache_location = self._cache_dir(options, 'http')
+ wheels_cache_location = self._cache_dir(options, 'wheels')
+ http_cache_size = filesystem.format_directory_size(http_cache_location)
+ wheels_cache_size = filesystem.format_directory_size(
+ wheels_cache_location
+ )
message = textwrap.dedent("""
- Location: {location}
- Size: {size}
+ Package index page cache location: {http_cache_location}
+ Package index page cache size: {http_cache_size}
+ Number of HTTP files: {num_http_files}
+ Wheels location: {wheels_cache_location}
+ Wheels size: {wheels_cache_size}
Number of wheels: {package_count}
""").format(
- location=cache_location,
+ http_cache_location=http_cache_location,
+ http_cache_size=http_cache_size,
+ num_http_files=num_http_files,
+ wheels_cache_location=wheels_cache_location,
package_count=num_packages,
- size=cache_size,
+ wheels_cache_size=wheels_cache_size,
).strip()
logger.info(message)
@@ -169,6 +180,11 @@
raise CommandError('Please provide a pattern')
files = self._find_wheels(options, args[0])
+
+ # Only fetch http files if no specific pattern given
+ if args[0] == '*':
+ files += self._find_http_files(options)
+
if not files:
raise CommandError('No matching packages')
@@ -184,13 +200,18 @@
return self.remove_cache_items(options, ['*'])
- def _wheels_cache_dir(self, options):
- # type: (Values) -> str
- return os.path.join(options.cache_dir, 'wheels')
+ def _cache_dir(self, options, subdir):
+ # type: (Values, str) -> str
+ return os.path.join(options.cache_dir, subdir)
+
+ def _find_http_files(self, options):
+ # type: (Values) -> List[str]
+ http_dir = self._cache_dir(options, 'http')
+ return filesystem.find_files(http_dir, '*')
def _find_wheels(self, options, pattern):
# type: (Values, str) -> List[str]
- wheel_dir = self._wheels_cache_dir(options)
+ wheel_dir = self._cache_dir(options, 'wheels')
# The wheel filename format, as specified in PEP 427, is:
# {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
|
{"golden_diff": "diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py\n--- a/src/pip/_internal/commands/cache.py\n+++ b/src/pip/_internal/commands/cache.py\n@@ -102,19 +102,30 @@\n if args:\n raise CommandError('Too many arguments')\n \n+ num_http_files = len(self._find_http_files(options))\n num_packages = len(self._find_wheels(options, '*'))\n \n- cache_location = self._wheels_cache_dir(options)\n- cache_size = filesystem.format_directory_size(cache_location)\n+ http_cache_location = self._cache_dir(options, 'http')\n+ wheels_cache_location = self._cache_dir(options, 'wheels')\n+ http_cache_size = filesystem.format_directory_size(http_cache_location)\n+ wheels_cache_size = filesystem.format_directory_size(\n+ wheels_cache_location\n+ )\n \n message = textwrap.dedent(\"\"\"\n- Location: {location}\n- Size: {size}\n+ Package index page cache location: {http_cache_location}\n+ Package index page cache size: {http_cache_size}\n+ Number of HTTP files: {num_http_files}\n+ Wheels location: {wheels_cache_location}\n+ Wheels size: {wheels_cache_size}\n Number of wheels: {package_count}\n \"\"\").format(\n- location=cache_location,\n+ http_cache_location=http_cache_location,\n+ http_cache_size=http_cache_size,\n+ num_http_files=num_http_files,\n+ wheels_cache_location=wheels_cache_location,\n package_count=num_packages,\n- size=cache_size,\n+ wheels_cache_size=wheels_cache_size,\n ).strip()\n \n logger.info(message)\n@@ -169,6 +180,11 @@\n raise CommandError('Please provide a pattern')\n \n files = self._find_wheels(options, args[0])\n+\n+ # Only fetch http files if no specific pattern given\n+ if args[0] == '*':\n+ files += self._find_http_files(options)\n+\n if not files:\n raise CommandError('No matching packages')\n \n@@ -184,13 +200,18 @@\n \n return self.remove_cache_items(options, ['*'])\n \n- def _wheels_cache_dir(self, options):\n- # type: (Values) -> str\n- return os.path.join(options.cache_dir, 'wheels')\n+ def _cache_dir(self, options, subdir):\n+ # type: (Values, str) -> str\n+ return os.path.join(options.cache_dir, subdir)\n+\n+ def _find_http_files(self, options):\n+ # type: (Values) -> List[str]\n+ http_dir = self._cache_dir(options, 'http')\n+ return filesystem.find_files(http_dir, '*')\n \n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n- wheel_dir = self._wheels_cache_dir(options)\n+ wheel_dir = self._cache_dir(options, 'wheels')\n \n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n", "issue": "Unable to clean http cache with pip\n<!--\r\nIf you're reporting an issue for `--use-feature=2020-resolver`, use the \"Dependency resolver failures / errors\" template instead.\r\n-->\r\n\r\n**Environment**\r\n\r\n* pip version: pip 20.2.3\r\n* Python version: Python 2.7 (32bit)\r\n* OS: Windows 10 (64bit)\r\n\r\n<!-- Feel free to add more information about your environment here -->\r\n\r\n**Description**\r\n<!-- A clear and concise description of what the bug is. -->\r\nWe noticed pip will cache the http response for future installation. However if the cached http response includes corruped data (network issue for example), installing the same package will result in pip cache error as follow: \r\n\r\n```\r\n(venv2) H:\\>pip install pyside\r\nDEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality.\r\nCollecting pyside\r\n Using cached PySide-1.2.4-cp27-none-win32.whl (41.0 MB)\r\nERROR: THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE. If you have updated the package versions, please update the hashes. Otherwise, examine the package contents carefully; someone may have tampered with them.\r\n pyside from https://files.pythonhosted.org/packages/39/78/2b5fcd3b4ff4fc891f1c3a8bee09616d59fa6d644b0dc6252d46d8fbb423/PySide-1.2.4-cp27-none-win32.whl#sha256=104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908:\r\n Expected sha256 104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908\r\n Got 9c9bdae0c8b07e8e834b3bd9f35ab93f3ddbae8365047b1596553eb70e996427\r\n```\r\n\r\n\r\nThe symption of this issue has already been reported in https://github.com/pypa/warehouse/issues/8330. \r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nIf the error is caused by the data corruption in http response cache then pip should invalidate the cache and try to re-download for best attempt. \r\n\r\nCurrently `pip cache purge` will not clear http response cache. The only solution to this case is to remove http directory from the file system forcing pip to rebuild http cache. \r\n\r\n**How to Reproduce**\r\n<!-- Describe the steps to reproduce this bug. -->\r\n\r\n1. Create a Python 2 virtual environment\r\n2. Download the example corruped file https://mega.nz/file/WsthyLTS#AWD7NmS-w9B62Q3Y8Lb4SvCalqCb1d83a5FniKPmFqY\r\n3. Overwrite http folder in C:\\Users\\<account name>\\AppData\\Local\\pip\\cache\\http\r\n4. Uninstall pyside and install pyside by `pip install pyside`\r\n\r\n\r\n**Output**\r\n\r\n```\r\n(venv2) H:\\>pip install pyside\r\nDEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality.\r\nCollecting pyside\r\n Using cached PySide-1.2.4-cp27-none-win32.whl (41.0 MB)\r\nERROR: THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE. If you have updated the package versions, please update the hashes. Otherwise, examine the package contents carefully; someone may have tampered with them.\r\n pyside from https://files.pythonhosted.org/packages/39/78/2b5fcd3b4ff4fc891f1c3a8bee09616d59fa6d644b0dc6252d46d8fbb423/PySide-1.2.4-cp27-none-win32.whl#sha256=104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908:\r\n Expected sha256 104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908\r\n Got 9c9bdae0c8b07e8e834b3bd9f35ab93f3ddbae8365047b1596553eb70e996427\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n - dir: Show the cache directory.\n - info: Show information about the cache.\n - list: List filenames of packages stored in the cache.\n - remove: Remove one or more package from the cache.\n - purge: Remove all items from the cache.\n\n ``<pattern>`` can be a glob expression or a package name.\n \"\"\"\n\n ignore_require_venv = True\n usage = \"\"\"\n %prog dir\n %prog info\n %prog list [<pattern>] [--format=[human, abspath]]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def add_options(self):\n # type: () -> None\n\n self.cmd_opts.add_option(\n '--format',\n action='store',\n dest='list_format',\n default=\"human\",\n choices=('human', 'abspath'),\n help=\"Select the output format among: human (default) or abspath\"\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"dir\": self.get_cache_dir,\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n if not options.cache_dir:\n logger.error(\"pip cache commands can not \"\n \"function since cache is disabled.\")\n return ERROR\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\n \"Need an action (%s) to perform.\",\n \", \".join(sorted(handlers)),\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_dir(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n logger.info(options.cache_dir)\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n if options.list_format == 'human':\n self.format_for_human(files)\n else:\n self.format_for_abspath(files)\n\n def format_for_human(self, files):\n # type: (List[str]) -> None\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def format_for_abspath(self, files):\n # type: (List[str]) -> None\n if not files:\n return\n\n results = []\n for filename in files:\n results.append(filename)\n\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py"}]}
| 3,953 | 712 |
gh_patches_debug_20225
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-2228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Student T tests fail on GPU [bug]
```self = MultivariateStudentT(df: 1.5, loc: torch.Size([2]), scale_tril: torch.Size([2, 2]))
sample_shape = torch.Size([])
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()
Z = self._chi2.rsample(sample_shape)
Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)
> return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)
E RuntimeError: Expected object of device type cuda but got device type cpu for argument #2 'mat2' in call to _th_mm
pyro/distributions/multivariate_studentt.py:74: RuntimeError
```
This issue was discussed in #2226 - running `make test` on the dev branch errors out for me if running on a machine with cuda. I am guessing this hasn't shown up in the CI because it uses a cpu only machine.
I think this bug is pretty simple - it happens because, as we can see in the above snippet, y inherits its device from self.df, and in the fixture, self.df is set to a scalar value. This is not converted into a tensor by the tensors_default_to context manager, and so isn't sent to the gpu.
I fixed this in #2226 by changing the fixture, but @fritzo suggested that it might suggest a missing coercion rather than a change to the fixture, so that change in the PR was reverted and I am opening this issue instead.
</issue>
<code>
[start of pyro/distributions/multivariate_studentt.py]
1 import math
2
3 import torch
4 from torch.distributions import constraints
5 from torch.distributions.utils import broadcast_all, lazy_property
6
7 from pyro.distributions.torch import Chi2
8 from pyro.distributions.torch_distribution import TorchDistribution
9 from pyro.distributions.util import broadcast_shape
10
11
12 class MultivariateStudentT(TorchDistribution):
13 """
14 Creates a multivariate Student's t-distribution parameterized by degree of
15 freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale_tril`.
16
17 :param ~torch.Tensor df: degrees of freedom
18 :param ~torch.Tensor loc: mean of the distribution
19 :param ~torch.Tensor scale_tril: scale of the distribution, which is
20 a lower triangular matrix with positive diagonal entries
21 """
22 arg_constraints = {'df': constraints.positive,
23 'loc': constraints.real_vector,
24 'scale_tril': constraints.lower_cholesky}
25 support = constraints.real_vector
26 has_rsample = True
27
28 def __init__(self, df, loc, scale_tril, validate_args=None):
29 dim = loc.size(-1)
30 assert scale_tril.shape[-2:] == (dim, dim)
31 df, = broadcast_all(df)
32 batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])
33 event_shape = (dim,)
34 self.df = df.expand(batch_shape)
35 self.loc = loc
36 self.scale_tril = scale_tril
37 self._chi2 = Chi2(self.df)
38 super(MultivariateStudentT, self).__init__(batch_shape, event_shape, validate_args=validate_args)
39
40 @lazy_property
41 def covariance_matrix(self):
42 # NB: this is not covariance of this distribution;
43 # the actual covariance is df / (df - 2) * covariance_matrix
44 return torch.matmul(self.scale_tril, self.scale_tril.transpose(-1, -2))
45
46 @lazy_property
47 def precision_matrix(self):
48 identity = torch.eye(self.loc.size(-1), device=self.loc.device, dtype=self.loc.dtype)
49 scale_inv = identity.triangular_solve(self.scale_tril, upper=False).solution.transpose(-1, -2)
50 return torch.matmul(scale_inv.transpose(-1, -2), scale_inv)
51
52 def expand(self, batch_shape, _instance=None):
53 new = self._get_checked_instance(MultivariateStudentT, _instance)
54 batch_shape = torch.Size(batch_shape)
55 loc_shape = batch_shape + self.event_shape
56 scale_shape = loc_shape + self.event_shape
57 new.df = self.df.expand(batch_shape)
58 new.loc = self.loc.expand(loc_shape)
59 new.scale_tril = self.scale_tril.expand(scale_shape)
60 if 'covariance_matrix' in self.__dict__:
61 new.covariance_matrix = self.covariance_matrix.expand(scale_shape)
62 if 'precision_matrix' in self.__dict__:
63 new.precision_matrix = self.precision_matrix.expand(scale_shape)
64 new._chi2 = self._chi2.expand(batch_shape)
65 super(MultivariateStudentT, new).__init__(batch_shape, self.event_shape, validate_args=False)
66 new._validate_args = self._validate_args
67 return new
68
69 def rsample(self, sample_shape=torch.Size()):
70 shape = self._extended_shape(sample_shape)
71 X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()
72 Z = self._chi2.rsample(sample_shape)
73 Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)
74 return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)
75
76 def log_prob(self, value):
77 if self._validate_args:
78 self._validate_sample(value)
79 n = self.loc.size(-1)
80 y = (value - self.loc).unsqueeze(-1).triangular_solve(self.scale_tril, upper=False).solution.squeeze(-1)
81 Z = (self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) +
82 0.5 * n * self.df.log() +
83 0.5 * n * math.log(math.pi) +
84 torch.lgamma(0.5 * self.df) -
85 torch.lgamma(0.5 * (self.df + n)))
86 return -0.5 * (self.df + n) * torch.log1p(y.pow(2).sum(-1) / self.df) - Z
87
88 @property
89 def mean(self):
90 m = self.loc.clone()
91 m[self.df <= 1, :] = float('nan')
92 return m
93
94 @property
95 def variance(self):
96 m = self.scale_tril.pow(2).sum(-1) * (self.df / (self.df - 2)).unsqueeze(-1)
97 m[(self.df <= 2) & (self.df > 1), :] = float('inf')
98 m[self.df <= 1, :] = float('nan')
99 return m
100
[end of pyro/distributions/multivariate_studentt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyro/distributions/multivariate_studentt.py b/pyro/distributions/multivariate_studentt.py
--- a/pyro/distributions/multivariate_studentt.py
+++ b/pyro/distributions/multivariate_studentt.py
@@ -2,7 +2,7 @@
import torch
from torch.distributions import constraints
-from torch.distributions.utils import broadcast_all, lazy_property
+from torch.distributions.utils import lazy_property
from pyro.distributions.torch import Chi2
from pyro.distributions.torch_distribution import TorchDistribution
@@ -28,7 +28,8 @@
def __init__(self, df, loc, scale_tril, validate_args=None):
dim = loc.size(-1)
assert scale_tril.shape[-2:] == (dim, dim)
- df, = broadcast_all(df)
+ if not isinstance(df, torch.Tensor):
+ df = loc.new_tensor(df)
batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])
event_shape = (dim,)
self.df = df.expand(batch_shape)
|
{"golden_diff": "diff --git a/pyro/distributions/multivariate_studentt.py b/pyro/distributions/multivariate_studentt.py\n--- a/pyro/distributions/multivariate_studentt.py\n+++ b/pyro/distributions/multivariate_studentt.py\n@@ -2,7 +2,7 @@\n \n import torch\n from torch.distributions import constraints\n-from torch.distributions.utils import broadcast_all, lazy_property\n+from torch.distributions.utils import lazy_property\n \n from pyro.distributions.torch import Chi2\n from pyro.distributions.torch_distribution import TorchDistribution\n@@ -28,7 +28,8 @@\n def __init__(self, df, loc, scale_tril, validate_args=None):\n dim = loc.size(-1)\n assert scale_tril.shape[-2:] == (dim, dim)\n- df, = broadcast_all(df)\n+ if not isinstance(df, torch.Tensor):\n+ df = loc.new_tensor(df)\n batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])\n event_shape = (dim,)\n self.df = df.expand(batch_shape)\n", "issue": "Student T tests fail on GPU [bug]\n```self = MultivariateStudentT(df: 1.5, loc: torch.Size([2]), scale_tril: torch.Size([2, 2]))\r\nsample_shape = torch.Size([])\r\n\r\n def rsample(self, sample_shape=torch.Size()):\r\n shape = self._extended_shape(sample_shape)\r\n X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()\r\n Z = self._chi2.rsample(sample_shape)\r\n Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)\r\n> return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)\r\nE RuntimeError: Expected object of device type cuda but got device type cpu for argument #2 'mat2' in call to _th_mm\r\n\r\npyro/distributions/multivariate_studentt.py:74: RuntimeError\r\n```\r\n\r\nThis issue was discussed in #2226 - running `make test` on the dev branch errors out for me if running on a machine with cuda. I am guessing this hasn't shown up in the CI because it uses a cpu only machine. \r\n\r\nI think this bug is pretty simple - it happens because, as we can see in the above snippet, y inherits its device from self.df, and in the fixture, self.df is set to a scalar value. This is not converted into a tensor by the tensors_default_to context manager, and so isn't sent to the gpu. \r\n\r\nI fixed this in #2226 by changing the fixture, but @fritzo suggested that it might suggest a missing coercion rather than a change to the fixture, so that change in the PR was reverted and I am opening this issue instead.\r\n\n", "before_files": [{"content": "import math\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.utils import broadcast_all, lazy_property\n\nfrom pyro.distributions.torch import Chi2\nfrom pyro.distributions.torch_distribution import TorchDistribution\nfrom pyro.distributions.util import broadcast_shape\n\n\nclass MultivariateStudentT(TorchDistribution):\n \"\"\"\n Creates a multivariate Student's t-distribution parameterized by degree of\n freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale_tril`.\n\n :param ~torch.Tensor df: degrees of freedom\n :param ~torch.Tensor loc: mean of the distribution\n :param ~torch.Tensor scale_tril: scale of the distribution, which is\n a lower triangular matrix with positive diagonal entries\n \"\"\"\n arg_constraints = {'df': constraints.positive,\n 'loc': constraints.real_vector,\n 'scale_tril': constraints.lower_cholesky}\n support = constraints.real_vector\n has_rsample = True\n\n def __init__(self, df, loc, scale_tril, validate_args=None):\n dim = loc.size(-1)\n assert scale_tril.shape[-2:] == (dim, dim)\n df, = broadcast_all(df)\n batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])\n event_shape = (dim,)\n self.df = df.expand(batch_shape)\n self.loc = loc\n self.scale_tril = scale_tril\n self._chi2 = Chi2(self.df)\n super(MultivariateStudentT, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n\n @lazy_property\n def covariance_matrix(self):\n # NB: this is not covariance of this distribution;\n # the actual covariance is df / (df - 2) * covariance_matrix\n return torch.matmul(self.scale_tril, self.scale_tril.transpose(-1, -2))\n\n @lazy_property\n def precision_matrix(self):\n identity = torch.eye(self.loc.size(-1), device=self.loc.device, dtype=self.loc.dtype)\n scale_inv = identity.triangular_solve(self.scale_tril, upper=False).solution.transpose(-1, -2)\n return torch.matmul(scale_inv.transpose(-1, -2), scale_inv)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(MultivariateStudentT, _instance)\n batch_shape = torch.Size(batch_shape)\n loc_shape = batch_shape + self.event_shape\n scale_shape = loc_shape + self.event_shape\n new.df = self.df.expand(batch_shape)\n new.loc = self.loc.expand(loc_shape)\n new.scale_tril = self.scale_tril.expand(scale_shape)\n if 'covariance_matrix' in self.__dict__:\n new.covariance_matrix = self.covariance_matrix.expand(scale_shape)\n if 'precision_matrix' in self.__dict__:\n new.precision_matrix = self.precision_matrix.expand(scale_shape)\n new._chi2 = self._chi2.expand(batch_shape)\n super(MultivariateStudentT, new).__init__(batch_shape, self.event_shape, validate_args=False)\n new._validate_args = self._validate_args\n return new\n\n def rsample(self, sample_shape=torch.Size()):\n shape = self._extended_shape(sample_shape)\n X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()\n Z = self._chi2.rsample(sample_shape)\n Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)\n return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n n = self.loc.size(-1)\n y = (value - self.loc).unsqueeze(-1).triangular_solve(self.scale_tril, upper=False).solution.squeeze(-1)\n Z = (self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) +\n 0.5 * n * self.df.log() +\n 0.5 * n * math.log(math.pi) +\n torch.lgamma(0.5 * self.df) -\n torch.lgamma(0.5 * (self.df + n)))\n return -0.5 * (self.df + n) * torch.log1p(y.pow(2).sum(-1) / self.df) - Z\n\n @property\n def mean(self):\n m = self.loc.clone()\n m[self.df <= 1, :] = float('nan')\n return m\n\n @property\n def variance(self):\n m = self.scale_tril.pow(2).sum(-1) * (self.df / (self.df - 2)).unsqueeze(-1)\n m[(self.df <= 2) & (self.df > 1), :] = float('inf')\n m[self.df <= 1, :] = float('nan')\n return m\n", "path": "pyro/distributions/multivariate_studentt.py"}]}
| 2,169 | 237 |
gh_patches_debug_22396
|
rasdani/github-patches
|
git_diff
|
freqtrade__freqtrade-5847
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Kucoin] {"code":"429000","msg":"Too Many Requests"}. Able to ignore this error and prevent DDOS protection?
<!--
Have you searched for similar issues before posting it?
If you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue).
If it hasn't been reported, please create a new issue.
Please do not use bug reports to request new features.
-->
## Describe your environment
* Operating system: Canonical Ubuntu
* Python Version:
* CCXT version: 1.56.86
* Freqtrade Version:
Note: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out.
## Describe the problem:
{"code":"429000","msg":"Too Many Requests"} error triggering DDOS protection delay.
### Steps to reproduce:
Randomly effect pairs on Kucoin
### Observed Results:
DDOS protection triggering when it doesn't need to which may cause unintended and financially ruinous behavior in the bot.
Admin on telegram says we can retry the request instantly without fear of DDOS protection triggering.

### Relevant code exceptions or logs
freqtrade | 2021-10-11 04:55:04,274 - freqtrade.exchange.common - WARNING - _async_get_candle_history() returned exception: "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?symbol=DYDX-USDT&type=5min&startAt=1633778101&endAt=1633928101 429 Too Many Requests {"code":"429000","msg":"Too Many Requests"}"
freqtrade | 2021-10-11 04:55:04,275 - freqtrade.exchange.common - WARNING - retrying _async_get_candle_history() still for 4 times
freqtrade | 2021-10-11 04:55:04,275 - freqtrade.exchange.common - INFO - Applying DDosProtection backoff delay: 1
</issue>
<code>
[start of freqtrade/exchange/common.py]
1 import asyncio
2 import logging
3 import time
4 from functools import wraps
5
6 from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
7
8
9 logger = logging.getLogger(__name__)
10
11
12 # Maximum default retry count.
13 # Functions are always called RETRY_COUNT + 1 times (for the original call)
14 API_RETRY_COUNT = 4
15 API_FETCH_ORDER_RETRY_COUNT = 5
16
17 BAD_EXCHANGES = {
18 "bitmex": "Various reasons.",
19 "phemex": "Does not provide history. ",
20 "poloniex": "Does not provide fetch_order endpoint to fetch both open and closed orders.",
21 }
22
23 MAP_EXCHANGE_CHILDCLASS = {
24 'binanceus': 'binance',
25 'binanceje': 'binance',
26 }
27
28
29 EXCHANGE_HAS_REQUIRED = [
30 # Required / private
31 'fetchOrder',
32 'cancelOrder',
33 'createOrder',
34 # 'createLimitOrder', 'createMarketOrder',
35 'fetchBalance',
36
37 # Public endpoints
38 'loadMarkets',
39 'fetchOHLCV',
40 ]
41
42 EXCHANGE_HAS_OPTIONAL = [
43 # Private
44 'fetchMyTrades', # Trades for order - fee detection
45 # Public
46 'fetchOrderBook', 'fetchL2OrderBook', 'fetchTicker', # OR for pricing
47 'fetchTickers', # For volumepairlist?
48 'fetchTrades', # Downloading trades data
49 ]
50
51
52 def remove_credentials(config) -> None:
53 """
54 Removes exchange keys from the configuration and specifies dry-run
55 Used for backtesting / hyperopt / edge and utils.
56 Modifies the input dict!
57 """
58 if config.get('dry_run', False):
59 config['exchange']['key'] = ''
60 config['exchange']['secret'] = ''
61 config['exchange']['password'] = ''
62 config['exchange']['uid'] = ''
63
64
65 def calculate_backoff(retrycount, max_retries):
66 """
67 Calculate backoff
68 """
69 return (max_retries - retrycount) ** 2 + 1
70
71
72 def retrier_async(f):
73 async def wrapper(*args, **kwargs):
74 count = kwargs.pop('count', API_RETRY_COUNT)
75 try:
76 return await f(*args, **kwargs)
77 except TemporaryError as ex:
78 logger.warning('%s() returned exception: "%s"', f.__name__, ex)
79 if count > 0:
80 logger.warning('retrying %s() still for %s times', f.__name__, count)
81 count -= 1
82 kwargs.update({'count': count})
83 if isinstance(ex, DDosProtection):
84 backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)
85 logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
86 await asyncio.sleep(backoff_delay)
87 return await wrapper(*args, **kwargs)
88 else:
89 logger.warning('Giving up retrying: %s()', f.__name__)
90 raise ex
91 return wrapper
92
93
94 def retrier(_func=None, retries=API_RETRY_COUNT):
95 def decorator(f):
96 @wraps(f)
97 def wrapper(*args, **kwargs):
98 count = kwargs.pop('count', retries)
99 try:
100 return f(*args, **kwargs)
101 except (TemporaryError, RetryableOrderError) as ex:
102 logger.warning('%s() returned exception: "%s"', f.__name__, ex)
103 if count > 0:
104 logger.warning('retrying %s() still for %s times', f.__name__, count)
105 count -= 1
106 kwargs.update({'count': count})
107 if isinstance(ex, (DDosProtection, RetryableOrderError)):
108 # increasing backoff
109 backoff_delay = calculate_backoff(count + 1, retries)
110 logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
111 time.sleep(backoff_delay)
112 return wrapper(*args, **kwargs)
113 else:
114 logger.warning('Giving up retrying: %s()', f.__name__)
115 raise ex
116 return wrapper
117 # Support both @retrier and @retrier(retries=2) syntax
118 if _func is None:
119 return decorator
120 else:
121 return decorator(_func)
122
[end of freqtrade/exchange/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/freqtrade/exchange/common.py b/freqtrade/exchange/common.py
--- a/freqtrade/exchange/common.py
+++ b/freqtrade/exchange/common.py
@@ -81,9 +81,16 @@
count -= 1
kwargs.update({'count': count})
if isinstance(ex, DDosProtection):
- backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)
- logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
- await asyncio.sleep(backoff_delay)
+ if "kucoin" in str(ex) and "429000" in str(ex):
+ # Temporary fix for 429000 error on kucoin
+ # see https://github.com/freqtrade/freqtrade/issues/5700 for details.
+ logger.warning(
+ f"Kucoin 429 error, avoid triggering DDosProtection backoff delay. "
+ f"{count} tries left before giving up")
+ else:
+ backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)
+ logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
+ await asyncio.sleep(backoff_delay)
return await wrapper(*args, **kwargs)
else:
logger.warning('Giving up retrying: %s()', f.__name__)
|
{"golden_diff": "diff --git a/freqtrade/exchange/common.py b/freqtrade/exchange/common.py\n--- a/freqtrade/exchange/common.py\n+++ b/freqtrade/exchange/common.py\n@@ -81,9 +81,16 @@\n count -= 1\n kwargs.update({'count': count})\n if isinstance(ex, DDosProtection):\n- backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)\n- logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n- await asyncio.sleep(backoff_delay)\n+ if \"kucoin\" in str(ex) and \"429000\" in str(ex):\n+ # Temporary fix for 429000 error on kucoin\n+ # see https://github.com/freqtrade/freqtrade/issues/5700 for details.\n+ logger.warning(\n+ f\"Kucoin 429 error, avoid triggering DDosProtection backoff delay. \"\n+ f\"{count} tries left before giving up\")\n+ else:\n+ backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)\n+ logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n+ await asyncio.sleep(backoff_delay)\n return await wrapper(*args, **kwargs)\n else:\n logger.warning('Giving up retrying: %s()', f.__name__)\n", "issue": "[Kucoin] {\"code\":\"429000\",\"msg\":\"Too Many Requests\"}. Able to ignore this error and prevent DDOS protection?\n<!-- \r\nHave you searched for similar issues before posting it?\r\n\r\nIf you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\nPlease do not use bug reports to request new features.\r\n-->\r\n\r\n## Describe your environment\r\n\r\n * Operating system: Canonical Ubuntu\r\n * Python Version: \r\n * CCXT version: 1.56.86\r\n * Freqtrade Version: \r\n \r\nNote: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out.\r\n\r\n## Describe the problem:\r\n\r\n{\"code\":\"429000\",\"msg\":\"Too Many Requests\"} error triggering DDOS protection delay. \r\n\r\n### Steps to reproduce:\r\nRandomly effect pairs on Kucoin\r\n\r\n### Observed Results:\r\n\r\nDDOS protection triggering when it doesn't need to which may cause unintended and financially ruinous behavior in the bot. \r\n\r\nAdmin on telegram says we can retry the request instantly without fear of DDOS protection triggering. \r\n\r\n\r\n\r\n\r\n\r\n\r\n### Relevant code exceptions or logs\r\n\r\nfreqtrade | 2021-10-11 04:55:04,274 - freqtrade.exchange.common - WARNING - _async_get_candle_history() returned exception: \"kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?symbol=DYDX-USDT&type=5min&startAt=1633778101&endAt=1633928101 429 Too Many Requests {\"code\":\"429000\",\"msg\":\"Too Many Requests\"}\"\r\nfreqtrade | 2021-10-11 04:55:04,275 - freqtrade.exchange.common - WARNING - retrying _async_get_candle_history() still for 4 times\r\nfreqtrade | 2021-10-11 04:55:04,275 - freqtrade.exchange.common - INFO - Applying DDosProtection backoff delay: 1\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\nimport time\nfrom functools import wraps\n\nfrom freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Maximum default retry count.\n# Functions are always called RETRY_COUNT + 1 times (for the original call)\nAPI_RETRY_COUNT = 4\nAPI_FETCH_ORDER_RETRY_COUNT = 5\n\nBAD_EXCHANGES = {\n \"bitmex\": \"Various reasons.\",\n \"phemex\": \"Does not provide history. \",\n \"poloniex\": \"Does not provide fetch_order endpoint to fetch both open and closed orders.\",\n}\n\nMAP_EXCHANGE_CHILDCLASS = {\n 'binanceus': 'binance',\n 'binanceje': 'binance',\n}\n\n\nEXCHANGE_HAS_REQUIRED = [\n # Required / private\n 'fetchOrder',\n 'cancelOrder',\n 'createOrder',\n # 'createLimitOrder', 'createMarketOrder',\n 'fetchBalance',\n\n # Public endpoints\n 'loadMarkets',\n 'fetchOHLCV',\n]\n\nEXCHANGE_HAS_OPTIONAL = [\n # Private\n 'fetchMyTrades', # Trades for order - fee detection\n # Public\n 'fetchOrderBook', 'fetchL2OrderBook', 'fetchTicker', # OR for pricing\n 'fetchTickers', # For volumepairlist?\n 'fetchTrades', # Downloading trades data\n]\n\n\ndef remove_credentials(config) -> None:\n \"\"\"\n Removes exchange keys from the configuration and specifies dry-run\n Used for backtesting / hyperopt / edge and utils.\n Modifies the input dict!\n \"\"\"\n if config.get('dry_run', False):\n config['exchange']['key'] = ''\n config['exchange']['secret'] = ''\n config['exchange']['password'] = ''\n config['exchange']['uid'] = ''\n\n\ndef calculate_backoff(retrycount, max_retries):\n \"\"\"\n Calculate backoff\n \"\"\"\n return (max_retries - retrycount) ** 2 + 1\n\n\ndef retrier_async(f):\n async def wrapper(*args, **kwargs):\n count = kwargs.pop('count', API_RETRY_COUNT)\n try:\n return await f(*args, **kwargs)\n except TemporaryError as ex:\n logger.warning('%s() returned exception: \"%s\"', f.__name__, ex)\n if count > 0:\n logger.warning('retrying %s() still for %s times', f.__name__, count)\n count -= 1\n kwargs.update({'count': count})\n if isinstance(ex, DDosProtection):\n backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)\n logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n await asyncio.sleep(backoff_delay)\n return await wrapper(*args, **kwargs)\n else:\n logger.warning('Giving up retrying: %s()', f.__name__)\n raise ex\n return wrapper\n\n\ndef retrier(_func=None, retries=API_RETRY_COUNT):\n def decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n count = kwargs.pop('count', retries)\n try:\n return f(*args, **kwargs)\n except (TemporaryError, RetryableOrderError) as ex:\n logger.warning('%s() returned exception: \"%s\"', f.__name__, ex)\n if count > 0:\n logger.warning('retrying %s() still for %s times', f.__name__, count)\n count -= 1\n kwargs.update({'count': count})\n if isinstance(ex, (DDosProtection, RetryableOrderError)):\n # increasing backoff\n backoff_delay = calculate_backoff(count + 1, retries)\n logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n time.sleep(backoff_delay)\n return wrapper(*args, **kwargs)\n else:\n logger.warning('Giving up retrying: %s()', f.__name__)\n raise ex\n return wrapper\n # Support both @retrier and @retrier(retries=2) syntax\n if _func is None:\n return decorator\n else:\n return decorator(_func)\n", "path": "freqtrade/exchange/common.py"}]}
| 2,271 | 308 |
gh_patches_debug_6905
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-4932
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in bookshelves.py add method when changing edition
When calling the add method in openlibrary/core/bookshelves.py (e.g. POSTing to bookshelves.json) with a specific edition_id, the result is not as expected if another edition of the same work already exists on the user's bookshelves.
### Steps to Reproduce
Submit a POST to bookshelves.json in a work context (e.g. https://openlibrary.org/works/OL460810W/bookshelves.json) with an add action and a specific edition_id: {'action':'add', 'redir':false, 'bookshelf_id':1, 'edition_id':'/books/OL7656518M', 'dont_remove':true}
If the user already has a different edition of the work on any bookshelf:
* If dont_remove is true, no change is made
* If dont_remove is false, the existing edition is removed from the bookshelf
Expected behavior:
The record for the work on the user's bookshelf should be updated to reflect the passed edition_id if dont_remove is true.
### Details
- **Logged in (Y/N)?** Y
- **Browser type/version?** N/A
- **Operating system?** N/A
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
Changing the update call at line 189 from
`return oldb.update('bookshelves_books', where=where, bookshelf_id=bookshelf_id, vars=data)`
to
`return oldb.update('bookshelves_books', where=where, bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)`
should create the expected behavior without affecting other use cases.
### Stakeholders
@mheiman
@mekarpeles
</issue>
<code>
[start of openlibrary/core/bookshelves.py]
1 from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO
2
3 from . import db
4
5
6 class Bookshelves(object):
7
8 PRESET_BOOKSHELVES = {
9 'Want to Read': 1,
10 'Currently Reading': 2,
11 'Already Read': 3
12 }
13
14 PRESET_BOOKSHELVES_JSON = {
15 'want_to_read': 1,
16 'currently_reading': 2,
17 'already_read': 3,
18 }
19
20 @classmethod
21 def summary(cls):
22 return {
23 'total_books_logged': {
24 'total': Bookshelves.total_books_logged(),
25 'month': Bookshelves.total_books_logged(since=DATE_ONE_MONTH_AGO),
26 'week': Bookshelves.total_books_logged(since=DATE_ONE_WEEK_AGO)
27 },
28 'total_users_logged': {
29 'total': Bookshelves.total_unique_users(),
30 'month': Bookshelves.total_unique_users(since=DATE_ONE_MONTH_AGO),
31 'week': Bookshelves.total_unique_users(since=DATE_ONE_WEEK_AGO)
32 }
33 }
34
35 @classmethod
36 def total_books_logged(cls, shelf_ids=None, since=None):
37 """Returns (int) number of books logged across all Reading Log shelves (e.g. those
38 specified in PRESET_BOOKSHELVES). One may alternatively specify a
39 `list` of `shelf_ids` to isolate or span multiple
40 shelves. `since` may be used to limit the result to those
41 books logged since a specific date. Any python datetime.date
42 type should work.
43
44 Args:
45 shelf_ids (list) - one or more bookshelf_id values, see
46 also the default values specified in PRESET_BOOKSHELVES
47 since (datetime.date) - returns all logged books after date
48
49 """
50
51 oldb = db.get_db()
52 query = "SELECT count(*) from bookshelves_books"
53 if shelf_ids:
54 query += " WHERE bookshelf_id IN ($shelf_ids)"
55 if since:
56 query += " AND created >= $since"
57 elif since:
58 query += " WHERE created >= $since"
59 results = oldb.query(query, vars={'since': since, 'shelf_ids': shelf_ids})
60 return results[0] if results else None
61
62 @classmethod
63 def total_unique_users(cls, since=None):
64 """Returns the total number of unique users who have logged a
65 book. `since` may be provided to only return the number of users after
66 a certain datetime.date.
67 """
68 oldb = db.get_db()
69 query = "select count(DISTINCT username) from bookshelves_books"
70 if since:
71 query += " WHERE created >= $since"
72 results = oldb.query(query, vars={'since': since})
73 return results[0] if results else None
74
75 @classmethod
76 def most_logged_books(cls, shelf_id, limit=10, since=False):
77 """Returns a ranked list of work OLIDs (in the form of an integer --
78 i.e. OL123W would be 123) which have been most logged by
79 users. This query is limited to a specific shelf_id (e.g. 1
80 for "Want to Read").
81 """
82 oldb = db.get_db()
83 query = 'select work_id, count(*) as cnt from bookshelves_books WHERE bookshelf_id=$shelf_id '
84 if since:
85 query += " AND created >= $since"
86 query += ' group by work_id order by cnt desc limit $limit'
87 return list(oldb.query(query, vars={'shelf_id': shelf_id, 'limit': limit, 'since': since}))
88
89 @classmethod
90 def count_total_books_logged_by_user(cls, username, bookshelf_ids=None):
91 """Counts the (int) total number of books logged by this `username`,
92 with the option of limiting the count to specific bookshelves
93 by `bookshelf_id`
94 """
95 return sum(cls.count_total_books_logged_by_user_per_shelf(
96 username, bookshelf_ids=bookshelf_ids).values())
97
98 @classmethod
99 def count_total_books_logged_by_user_per_shelf(cls, username, bookshelf_ids=None):
100 """Returns a dict mapping the specified user's bookshelves_ids to the
101 number of number of books logged per each shelf, i.e. {bookshelf_id:
102 count}. By default, we limit bookshelf_ids to those in PRESET_BOOKSHELVES
103
104 TODO: add `since` to fetch books logged after a certain
105 date. Useful for following/subscribing-to users and being
106 notified of books they log. Also add to
107 count_total_books_logged_by_user
108 """
109 oldb = db.get_db()
110 data = {'username': username}
111 _bookshelf_ids = ','.join([str(x) for x in bookshelf_ids or cls.PRESET_BOOKSHELVES.values()])
112 query = ("SELECT bookshelf_id, count(*) from bookshelves_books WHERE "
113 "bookshelf_id=ANY('{" + _bookshelf_ids + "}'::int[]) "
114 "AND username=$username GROUP BY bookshelf_id")
115 result = oldb.query(query, vars=data)
116 return dict([(i['bookshelf_id'], i['count']) for i in result]) if result else {}
117
118 @classmethod
119 def get_users_logged_books(cls, username, bookshelf_id=None, limit=100, page=1):
120 """Returns a list of Reading Log database records for books which
121 the user has logged. Records are described in core/schema.py
122 and include:
123
124 username (str) - who logged this book
125 work_id (int) - the Open Library work ID as an int (e.g. OL123W becomes 123)
126 bookshelf_id (int) - the ID of the bookshelf, see: PRESET_BOOKSHELVES.
127 If bookshelf_id is None, return books from all bookshelves.
128 edition_id (int) [optional] - the specific edition logged, if applicable
129 created (datetime) - date the book was logged
130
131 """
132 oldb = db.get_db()
133 page = int(page) if page else 1
134 data = {
135 'username': username,
136 'limit': limit,
137 'offset': limit * (page - 1),
138 'bookshelf_id': bookshelf_id
139 }
140 query = ("SELECT * from bookshelves_books WHERE "
141 "bookshelf_id=$bookshelf_id AND username=$username "
142 "LIMIT $limit OFFSET $offset")
143 if bookshelf_id is None:
144 query = ("SELECT * from bookshelves_books WHERE "
145 "username=$username")
146 # XXX Removing limit, offset, etc from data looks like a bug
147 # unrelated / not fixing in this PR.
148 data = { 'username': username }
149 return list(oldb.query(query, vars=data))
150
151 @classmethod
152 def get_users_read_status_of_work(cls, username, work_id):
153 """A user can mark a book as (1) want to read, (2) currently reading,
154 or (3) already read. Each of these states is mutually
155 exclusive. Returns the user's read state of this work, if one
156 exists.
157 """
158 oldb = db.get_db()
159 data = {
160 'username': username,
161 'work_id': int(work_id)
162 }
163 bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])
164 query = ("SELECT bookshelf_id from bookshelves_books WHERE "
165 "bookshelf_id=ANY('{" + bookshelf_ids + "}'::int[]) "
166 "AND username=$username AND work_id=$work_id")
167 result = list(oldb.query(query, vars=data))
168 return result[0].bookshelf_id if result else None
169
170 @classmethod
171 def add(cls, username, bookshelf_id, work_id, edition_id=None):
172 """Adds a book with `work_id` to user's bookshelf designated by
173 `bookshelf_id`"""
174 oldb = db.get_db()
175 work_id = int(work_id)
176 bookshelf_id = int(bookshelf_id)
177 data = {
178 'work_id': work_id,
179 'username': username,
180 }
181
182 users_status = cls.get_users_read_status_of_work(username, work_id)
183 if not users_status:
184 return oldb.insert('bookshelves_books', username=username,
185 bookshelf_id=bookshelf_id,
186 work_id=work_id, edition_id=edition_id)
187 else:
188 where = "work_id=$work_id AND username=$username"
189 return oldb.update('bookshelves_books', where=where,
190 bookshelf_id=bookshelf_id, vars=data)
191
192 @classmethod
193 def remove(cls, username, work_id, bookshelf_id=None):
194 oldb = db.get_db()
195 where = {
196 'username': username,
197 'work_id': int(work_id)
198 }
199 if bookshelf_id:
200 where['bookshelf_id'] = int(bookshelf_id)
201
202 try:
203 return oldb.delete('bookshelves_books',
204 where=('work_id=$work_id AND username=$username'), vars=where)
205 except: # we want to catch no entry exists
206 return None
207
208 @classmethod
209 def get_works_shelves(cls, work_id, lazy=False):
210 """Bookshelves this work is on"""
211 oldb = db.get_db()
212 query = "SELECT * from bookshelves_books where work_id=$work_id"
213 try:
214 result = oldb.query(query, vars={'work_id': int(work_id)})
215 return result if lazy else list(result)
216 except:
217 return None
218
219 @classmethod
220 def get_num_users_by_bookshelf_by_work_id(cls, work_id):
221 """Returns a dict mapping a work_id to the
222 number of number of users who have placed that work_id in each shelf, i.e. {bookshelf_id:
223 count}.
224 """
225 oldb = db.get_db()
226 query = ("SELECT bookshelf_id, count(DISTINCT username) as user_count from bookshelves_books where"
227 " work_id=$work_id"
228 " GROUP BY bookshelf_id")
229 result = oldb.query(query, vars={'work_id': int(work_id)})
230 return dict([(i['bookshelf_id'], i['user_count']) for i in result]) if result else {}
231
232 @classmethod
233 def user_with_most_books(cls):
234 """
235 Which super patrons have the most books logged?
236
237 SELECT username, count(*) AS counted from bookshelves_books WHERE bookshelf_id=ANY('{1,3,2}'::int[]) GROUP BY username ORDER BY counted DESC, username LIMIT 10
238 """
239 oldb = db.get_db()
240 _bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])
241 query = ("SELECT username, count(*) AS counted "
242 "FROM bookshelves_books WHERE "
243 "bookshelf_id=ANY('{" + _bookshelf_ids + "}'::int[]) "
244 "GROUP BY username "
245 "ORDER BY counted DESC, username LIMIT 100")
246 result = oldb.query(query)
247 return list(result)
248
[end of openlibrary/core/bookshelves.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openlibrary/core/bookshelves.py b/openlibrary/core/bookshelves.py
--- a/openlibrary/core/bookshelves.py
+++ b/openlibrary/core/bookshelves.py
@@ -187,7 +187,7 @@
else:
where = "work_id=$work_id AND username=$username"
return oldb.update('bookshelves_books', where=where,
- bookshelf_id=bookshelf_id, vars=data)
+ bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)
@classmethod
def remove(cls, username, work_id, bookshelf_id=None):
|
{"golden_diff": "diff --git a/openlibrary/core/bookshelves.py b/openlibrary/core/bookshelves.py\n--- a/openlibrary/core/bookshelves.py\n+++ b/openlibrary/core/bookshelves.py\n@@ -187,7 +187,7 @@\n else:\n where = \"work_id=$work_id AND username=$username\"\n return oldb.update('bookshelves_books', where=where,\n- bookshelf_id=bookshelf_id, vars=data)\n+ bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)\n \n @classmethod\n def remove(cls, username, work_id, bookshelf_id=None):\n", "issue": "Bug in bookshelves.py add method when changing edition\nWhen calling the add method in openlibrary/core/bookshelves.py (e.g. POSTing to bookshelves.json) with a specific edition_id, the result is not as expected if another edition of the same work already exists on the user's bookshelves. \r\n\r\n### Steps to Reproduce\r\nSubmit a POST to bookshelves.json in a work context (e.g. https://openlibrary.org/works/OL460810W/bookshelves.json) with an add action and a specific edition_id: {'action':'add', 'redir':false, 'bookshelf_id':1, 'edition_id':'/books/OL7656518M', 'dont_remove':true}\r\n\r\nIf the user already has a different edition of the work on any bookshelf:\r\n\r\n* If dont_remove is true, no change is made\r\n* If dont_remove is false, the existing edition is removed from the bookshelf\r\n\r\nExpected behavior:\r\n\r\nThe record for the work on the user's bookshelf should be updated to reflect the passed edition_id if dont_remove is true.\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** Y\r\n- **Browser type/version?** N/A\r\n- **Operating system?** N/A\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n\r\nChanging the update call at line 189 from\r\n\r\n`return oldb.update('bookshelves_books', where=where, bookshelf_id=bookshelf_id, vars=data)`\r\n\r\nto \r\n\r\n`return oldb.update('bookshelves_books', where=where, bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)`\r\n\r\nshould create the expected behavior without affecting other use cases.\r\n\r\n### Stakeholders\r\n@mheiman\r\n@mekarpeles\r\n\n", "before_files": [{"content": "from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO\n\nfrom . import db\n\n\nclass Bookshelves(object):\n\n PRESET_BOOKSHELVES = {\n 'Want to Read': 1,\n 'Currently Reading': 2,\n 'Already Read': 3\n }\n\n PRESET_BOOKSHELVES_JSON = {\n 'want_to_read': 1,\n 'currently_reading': 2,\n 'already_read': 3,\n }\n\n @classmethod\n def summary(cls):\n return {\n 'total_books_logged': {\n 'total': Bookshelves.total_books_logged(),\n 'month': Bookshelves.total_books_logged(since=DATE_ONE_MONTH_AGO),\n 'week': Bookshelves.total_books_logged(since=DATE_ONE_WEEK_AGO)\n },\n 'total_users_logged': {\n 'total': Bookshelves.total_unique_users(),\n 'month': Bookshelves.total_unique_users(since=DATE_ONE_MONTH_AGO),\n 'week': Bookshelves.total_unique_users(since=DATE_ONE_WEEK_AGO)\n }\n }\n\n @classmethod\n def total_books_logged(cls, shelf_ids=None, since=None):\n \"\"\"Returns (int) number of books logged across all Reading Log shelves (e.g. those\n specified in PRESET_BOOKSHELVES). One may alternatively specify a\n `list` of `shelf_ids` to isolate or span multiple\n shelves. `since` may be used to limit the result to those\n books logged since a specific date. Any python datetime.date\n type should work.\n\n Args:\n shelf_ids (list) - one or more bookshelf_id values, see\n also the default values specified in PRESET_BOOKSHELVES\n since (datetime.date) - returns all logged books after date\n\n \"\"\"\n\n oldb = db.get_db()\n query = \"SELECT count(*) from bookshelves_books\"\n if shelf_ids:\n query += \" WHERE bookshelf_id IN ($shelf_ids)\"\n if since:\n query += \" AND created >= $since\"\n elif since:\n query += \" WHERE created >= $since\"\n results = oldb.query(query, vars={'since': since, 'shelf_ids': shelf_ids})\n return results[0] if results else None\n\n @classmethod\n def total_unique_users(cls, since=None):\n \"\"\"Returns the total number of unique users who have logged a\n book. `since` may be provided to only return the number of users after\n a certain datetime.date.\n \"\"\"\n oldb = db.get_db()\n query = \"select count(DISTINCT username) from bookshelves_books\"\n if since:\n query += \" WHERE created >= $since\"\n results = oldb.query(query, vars={'since': since})\n return results[0] if results else None\n\n @classmethod\n def most_logged_books(cls, shelf_id, limit=10, since=False):\n \"\"\"Returns a ranked list of work OLIDs (in the form of an integer --\n i.e. OL123W would be 123) which have been most logged by\n users. This query is limited to a specific shelf_id (e.g. 1\n for \"Want to Read\").\n \"\"\"\n oldb = db.get_db()\n query = 'select work_id, count(*) as cnt from bookshelves_books WHERE bookshelf_id=$shelf_id '\n if since:\n query += \" AND created >= $since\"\n query += ' group by work_id order by cnt desc limit $limit'\n return list(oldb.query(query, vars={'shelf_id': shelf_id, 'limit': limit, 'since': since}))\n\n @classmethod\n def count_total_books_logged_by_user(cls, username, bookshelf_ids=None):\n \"\"\"Counts the (int) total number of books logged by this `username`,\n with the option of limiting the count to specific bookshelves\n by `bookshelf_id`\n \"\"\"\n return sum(cls.count_total_books_logged_by_user_per_shelf(\n username, bookshelf_ids=bookshelf_ids).values())\n\n @classmethod\n def count_total_books_logged_by_user_per_shelf(cls, username, bookshelf_ids=None):\n \"\"\"Returns a dict mapping the specified user's bookshelves_ids to the\n number of number of books logged per each shelf, i.e. {bookshelf_id:\n count}. By default, we limit bookshelf_ids to those in PRESET_BOOKSHELVES\n\n TODO: add `since` to fetch books logged after a certain\n date. Useful for following/subscribing-to users and being\n notified of books they log. Also add to\n count_total_books_logged_by_user\n \"\"\"\n oldb = db.get_db()\n data = {'username': username}\n _bookshelf_ids = ','.join([str(x) for x in bookshelf_ids or cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT bookshelf_id, count(*) from bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + _bookshelf_ids + \"}'::int[]) \"\n \"AND username=$username GROUP BY bookshelf_id\")\n result = oldb.query(query, vars=data)\n return dict([(i['bookshelf_id'], i['count']) for i in result]) if result else {}\n\n @classmethod\n def get_users_logged_books(cls, username, bookshelf_id=None, limit=100, page=1):\n \"\"\"Returns a list of Reading Log database records for books which\n the user has logged. Records are described in core/schema.py\n and include:\n\n username (str) - who logged this book\n work_id (int) - the Open Library work ID as an int (e.g. OL123W becomes 123)\n bookshelf_id (int) - the ID of the bookshelf, see: PRESET_BOOKSHELVES.\n If bookshelf_id is None, return books from all bookshelves.\n edition_id (int) [optional] - the specific edition logged, if applicable\n created (datetime) - date the book was logged\n\n \"\"\"\n oldb = db.get_db()\n page = int(page) if page else 1\n data = {\n 'username': username,\n 'limit': limit,\n 'offset': limit * (page - 1),\n 'bookshelf_id': bookshelf_id\n }\n query = (\"SELECT * from bookshelves_books WHERE \"\n \"bookshelf_id=$bookshelf_id AND username=$username \"\n \"LIMIT $limit OFFSET $offset\")\n if bookshelf_id is None:\n query = (\"SELECT * from bookshelves_books WHERE \"\n \"username=$username\")\n # XXX Removing limit, offset, etc from data looks like a bug\n # unrelated / not fixing in this PR.\n data = { 'username': username }\n return list(oldb.query(query, vars=data))\n\n @classmethod\n def get_users_read_status_of_work(cls, username, work_id):\n \"\"\"A user can mark a book as (1) want to read, (2) currently reading,\n or (3) already read. Each of these states is mutually\n exclusive. Returns the user's read state of this work, if one\n exists.\n \"\"\"\n oldb = db.get_db()\n data = {\n 'username': username,\n 'work_id': int(work_id)\n }\n bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT bookshelf_id from bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + bookshelf_ids + \"}'::int[]) \"\n \"AND username=$username AND work_id=$work_id\")\n result = list(oldb.query(query, vars=data))\n return result[0].bookshelf_id if result else None\n\n @classmethod\n def add(cls, username, bookshelf_id, work_id, edition_id=None):\n \"\"\"Adds a book with `work_id` to user's bookshelf designated by\n `bookshelf_id`\"\"\"\n oldb = db.get_db()\n work_id = int(work_id)\n bookshelf_id = int(bookshelf_id)\n data = {\n 'work_id': work_id,\n 'username': username,\n }\n\n users_status = cls.get_users_read_status_of_work(username, work_id)\n if not users_status:\n return oldb.insert('bookshelves_books', username=username,\n bookshelf_id=bookshelf_id,\n work_id=work_id, edition_id=edition_id)\n else:\n where = \"work_id=$work_id AND username=$username\"\n return oldb.update('bookshelves_books', where=where,\n bookshelf_id=bookshelf_id, vars=data)\n\n @classmethod\n def remove(cls, username, work_id, bookshelf_id=None):\n oldb = db.get_db()\n where = {\n 'username': username,\n 'work_id': int(work_id)\n }\n if bookshelf_id:\n where['bookshelf_id'] = int(bookshelf_id)\n\n try:\n return oldb.delete('bookshelves_books',\n where=('work_id=$work_id AND username=$username'), vars=where)\n except: # we want to catch no entry exists\n return None\n\n @classmethod\n def get_works_shelves(cls, work_id, lazy=False):\n \"\"\"Bookshelves this work is on\"\"\"\n oldb = db.get_db()\n query = \"SELECT * from bookshelves_books where work_id=$work_id\"\n try:\n result = oldb.query(query, vars={'work_id': int(work_id)})\n return result if lazy else list(result)\n except:\n return None\n\n @classmethod\n def get_num_users_by_bookshelf_by_work_id(cls, work_id):\n \"\"\"Returns a dict mapping a work_id to the\n number of number of users who have placed that work_id in each shelf, i.e. {bookshelf_id:\n count}.\n \"\"\"\n oldb = db.get_db()\n query = (\"SELECT bookshelf_id, count(DISTINCT username) as user_count from bookshelves_books where\"\n \" work_id=$work_id\"\n \" GROUP BY bookshelf_id\")\n result = oldb.query(query, vars={'work_id': int(work_id)})\n return dict([(i['bookshelf_id'], i['user_count']) for i in result]) if result else {}\n\n @classmethod\n def user_with_most_books(cls):\n \"\"\"\n Which super patrons have the most books logged?\n\n SELECT username, count(*) AS counted from bookshelves_books WHERE bookshelf_id=ANY('{1,3,2}'::int[]) GROUP BY username ORDER BY counted DESC, username LIMIT 10\n \"\"\"\n oldb = db.get_db()\n _bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT username, count(*) AS counted \"\n \"FROM bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + _bookshelf_ids + \"}'::int[]) \"\n \"GROUP BY username \"\n \"ORDER BY counted DESC, username LIMIT 100\")\n result = oldb.query(query)\n return list(result)\n", "path": "openlibrary/core/bookshelves.py"}]}
| 4,010 | 138 |
gh_patches_debug_11817
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-3139
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sparse SGD + momentum = cuda memory issue.
When using classic SGD optimizer with momentum with sparse embeddings the memory keeps garbage collecting / allocating leading to slow down and out of memory error eventually. [Here is a minimal exemple to reproduce the issue](https://gist.github.com/cedias/946a380807b7e1bf92d738268b71415a)


The issue dissapears when momentum is not used

or when embeddings are not sparse

I'm using the last pytorch version on conda: `'0.2.0_4'`
</issue>
<code>
[start of torch/optim/sgd.py]
1 from .optimizer import Optimizer, required
2
3
4 class SGD(Optimizer):
5 r"""Implements stochastic gradient descent (optionally with momentum).
6
7 Nesterov momentum is based on the formula from
8 `On the importance of initialization and momentum in deep learning`__.
9
10 Args:
11 params (iterable): iterable of parameters to optimize or dicts defining
12 parameter groups
13 lr (float): learning rate
14 momentum (float, optional): momentum factor (default: 0)
15 weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
16 dampening (float, optional): dampening for momentum (default: 0)
17 nesterov (bool, optional): enables Nesterov momentum (default: False)
18
19 Example:
20 >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
21 >>> optimizer.zero_grad()
22 >>> loss_fn(model(input), target).backward()
23 >>> optimizer.step()
24
25 __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
26
27 .. note::
28 The implementation of SGD with Momentum/Nesterov subtly differs from
29 Sutskever et. al. and implementations in some other frameworks.
30
31 Considering the specific case of Momentum, the update can be written as
32
33 .. math::
34 v = \rho * v + g \\
35 p = p - lr * v
36
37 where p, g, v and :math:`\rho` denote the parameters, gradient,
38 velocity, and momentum respectively.
39
40 This is in contrast to Sutskever et. al. and
41 other frameworks which employ an update of the form
42
43 .. math::
44 v = \rho * v + lr * g \\
45 p = p - v
46
47 The Nesterov version is analogously modified.
48 """
49
50 def __init__(self, params, lr=required, momentum=0, dampening=0,
51 weight_decay=0, nesterov=False):
52 defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
53 weight_decay=weight_decay, nesterov=nesterov)
54 if nesterov and (momentum <= 0 or dampening != 0):
55 raise ValueError("Nesterov momentum requires a momentum and zero dampening")
56 super(SGD, self).__init__(params, defaults)
57
58 def __setstate__(self, state):
59 super(SGD, self).__setstate__(state)
60 for group in self.param_groups:
61 group.setdefault('nesterov', False)
62
63 def step(self, closure=None):
64 """Performs a single optimization step.
65
66 Arguments:
67 closure (callable, optional): A closure that reevaluates the model
68 and returns the loss.
69 """
70 loss = None
71 if closure is not None:
72 loss = closure()
73
74 for group in self.param_groups:
75 weight_decay = group['weight_decay']
76 momentum = group['momentum']
77 dampening = group['dampening']
78 nesterov = group['nesterov']
79
80 for p in group['params']:
81 if p.grad is None:
82 continue
83 d_p = p.grad.data
84 if weight_decay != 0:
85 d_p.add_(weight_decay, p.data)
86 if momentum != 0:
87 param_state = self.state[p]
88 if 'momentum_buffer' not in param_state:
89 buf = param_state['momentum_buffer'] = d_p.clone()
90 else:
91 buf = param_state['momentum_buffer']
92 buf.mul_(momentum).add_(1 - dampening, d_p)
93 if nesterov:
94 d_p = d_p.add(momentum, buf)
95 else:
96 d_p = buf
97
98 p.data.add_(-group['lr'], d_p)
99
100 return loss
101
[end of torch/optim/sgd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py
--- a/torch/optim/sgd.py
+++ b/torch/optim/sgd.py
@@ -86,7 +86,8 @@
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
- buf = param_state['momentum_buffer'] = d_p.clone()
+ buf = param_state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()
+ buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
|
{"golden_diff": "diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py\n--- a/torch/optim/sgd.py\n+++ b/torch/optim/sgd.py\n@@ -86,7 +86,8 @@\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n- buf = param_state['momentum_buffer'] = d_p.clone()\n+ buf = param_state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()\n+ buf.mul_(momentum).add_(d_p)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n", "issue": "Sparse SGD + momentum = cuda memory issue.\nWhen using classic SGD optimizer with momentum with sparse embeddings the memory keeps garbage collecting / allocating leading to slow down and out of memory error eventually. [Here is a minimal exemple to reproduce the issue](https://gist.github.com/cedias/946a380807b7e1bf92d738268b71415a)\r\n\r\n\r\n\r\n\r\n\r\nThe issue dissapears when momentum is not used\r\n\r\n\r\nor when embeddings are not sparse\r\n\r\n\r\n\r\nI'm using the last pytorch version on conda: `'0.2.0_4'`\r\n\n", "before_files": [{"content": "from .optimizer import Optimizer, required\n\n\nclass SGD(Optimizer):\n r\"\"\"Implements stochastic gradient descent (optionally with momentum).\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, the update can be written as\n\n .. math::\n v = \\rho * v + g \\\\\n p = p - lr * v\n\n where p, g, v and :math:`\\rho` denote the parameters, gradient,\n velocity, and momentum respectively.\n\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n\n .. math::\n v = \\rho * v + lr * g \\\\\n p = p - v\n\n The Nesterov version is analogously modified.\n \"\"\"\n\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False):\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(SGD, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGD, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = d_p.clone()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n p.data.add_(-group['lr'], d_p)\n\n return loss\n", "path": "torch/optim/sgd.py"}]}
| 1,947 | 166 |
gh_patches_debug_19947
|
rasdani/github-patches
|
git_diff
|
cowrie__cowrie-1463
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CEF spaces between each character
I followed the 7 steps https://cowrie.readthedocs.io/en/latest/INSTALL.html#step-1-install-dependencies
I set up cef in the config file
`
[output_localsyslog]
enabled = true
facility = USER
format = cef`
But the output I'm getting has a bunch of spaces it seems to have a space between each character below
`
Nov 26 04:42:45 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . s e s s i o n . c o n n e c t | c o w r i e . s e s s i o n . c o n n e c t | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = N e w c o n n e c t i o n : 1 9 2 . 1 6 8 . 2 . 5 7 : 3 3 6 2 6 ( 1 9 2 . 1 6 8 . 2 . 6 4 : 2 2 2 2 ) [ s e s s i o n : 8 a 9 0 7 9 8 c 8 9 f d ] s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p s p t = 3 3 6 2 6 d p t = 2 2 2 2 d s t = 1 9 2 . 1 6 8 . 2 . 6 4
Nov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . v e r s i o n | c o w r i e . c l i e n t . v e r s i o n | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = R e m o t e S S H v e r s i o n : b ' S S H - 2 . 0 - O p e n S S H _ 8 . 2 p 1 U b u n t u - 4 u b u n t u 0 . 1 ' s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p
Nov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . k e x | c o w r i e . c l i e n t . k e x | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = S S H c l i e n t h a s s h f i n g e r p r i n t : a e 8 b d 7 d d 0 9 9 7 0 5 5 5 a a 4 c 6 e d 2 2 a d b b f 5 6 s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p
`
</issue>
<code>
[start of src/cowrie/output/localsyslog.py]
1 # Copyright (c) 2015 Michel Oosterhof <[email protected]>
2 # All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions
6 # are met:
7 #
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
13 # 3. The names of the author(s) may not be used to endorse or promote
14 # products derived from this software without specific prior written
15 # permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
18 # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 # SUCH DAMAGE.
28
29 from __future__ import absolute_import, division
30
31 import syslog
32
33 import twisted.python.syslog
34
35 import cowrie.core.cef
36 import cowrie.core.output
37 from cowrie.core.config import CowrieConfig
38
39
40 class Output(cowrie.core.output.Output):
41 """
42 localsyslog output
43 """
44
45 def start(self):
46 self.format = CowrieConfig().get('output_localsyslog', 'format')
47 facilityString = CowrieConfig().get('output_localsyslog', 'facility')
48 self.facility = vars(syslog)['LOG_' + facilityString]
49 self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)
50
51 def stop(self):
52 pass
53
54 def write(self, logentry):
55 if 'isError' not in logentry:
56 logentry['isError'] = False
57
58 if self.format == 'cef':
59 self.syslog.emit({
60 'message': cowrie.core.cef.formatCef(logentry),
61 'isError': False,
62 'system': 'cowrie'
63 })
64 else:
65 # message appears with additional spaces if message key is defined
66 logentry['message'] = [logentry['message']]
67 self.syslog.emit(logentry)
68
[end of src/cowrie/output/localsyslog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cowrie/output/localsyslog.py b/src/cowrie/output/localsyslog.py
--- a/src/cowrie/output/localsyslog.py
+++ b/src/cowrie/output/localsyslog.py
@@ -45,6 +45,7 @@
def start(self):
self.format = CowrieConfig().get('output_localsyslog', 'format')
facilityString = CowrieConfig().get('output_localsyslog', 'facility')
+ levelString = CowrieConfig().get('output_localsyslog', 'level')
self.facility = vars(syslog)['LOG_' + facilityString]
self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)
@@ -57,7 +58,7 @@
if self.format == 'cef':
self.syslog.emit({
- 'message': cowrie.core.cef.formatCef(logentry),
+ 'message': [cowrie.core.cef.formatCef(logentry)],
'isError': False,
'system': 'cowrie'
})
|
{"golden_diff": "diff --git a/src/cowrie/output/localsyslog.py b/src/cowrie/output/localsyslog.py\n--- a/src/cowrie/output/localsyslog.py\n+++ b/src/cowrie/output/localsyslog.py\n@@ -45,6 +45,7 @@\n def start(self):\n self.format = CowrieConfig().get('output_localsyslog', 'format')\n facilityString = CowrieConfig().get('output_localsyslog', 'facility')\n+ levelString = CowrieConfig().get('output_localsyslog', 'level')\n self.facility = vars(syslog)['LOG_' + facilityString]\n self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)\n \n@@ -57,7 +58,7 @@\n \n if self.format == 'cef':\n self.syslog.emit({\n- 'message': cowrie.core.cef.formatCef(logentry),\n+ 'message': [cowrie.core.cef.formatCef(logentry)],\n 'isError': False,\n 'system': 'cowrie'\n })\n", "issue": "CEF spaces between each character\nI followed the 7 steps https://cowrie.readthedocs.io/en/latest/INSTALL.html#step-1-install-dependencies\r\n\r\nI set up cef in the config file \r\n`\r\n[output_localsyslog]\r\nenabled = true\r\nfacility = USER\r\nformat = cef`\r\n\r\nBut the output I'm getting has a bunch of spaces it seems to have a space between each character below \r\n\r\n`\r\nNov 26 04:42:45 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . s e s s i o n . c o n n e c t | c o w r i e . s e s s i o n . c o n n e c t | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = N e w c o n n e c t i o n : 1 9 2 . 1 6 8 . 2 . 5 7 : 3 3 6 2 6 ( 1 9 2 . 1 6 8 . 2 . 6 4 : 2 2 2 2 ) [ s e s s i o n : 8 a 9 0 7 9 8 c 8 9 f d ] s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p s p t = 3 3 6 2 6 d p t = 2 2 2 2 d s t = 1 9 2 . 1 6 8 . 2 . 6 4\r\n\r\nNov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . v e r s i o n | c o w r i e . c l i e n t . v e r s i o n | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = R e m o t e S S H v e r s i o n : b ' S S H - 2 . 0 - O p e n S S H _ 8 . 2 p 1 U b u n t u - 4 u b u n t u 0 . 1 ' s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p\r\n\r\nNov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . k e x | c o w r i e . c l i e n t . k e x | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = S S H c l i e n t h a s s h f i n g e r p r i n t : a e 8 b d 7 d d 0 9 9 7 0 5 5 5 a a 4 c 6 e d 2 2 a d b b f 5 6 s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p\r\n`\n", "before_files": [{"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\nfrom __future__ import absolute_import, division\n\nimport syslog\n\nimport twisted.python.syslog\n\nimport cowrie.core.cef\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n localsyslog output\n \"\"\"\n\n def start(self):\n self.format = CowrieConfig().get('output_localsyslog', 'format')\n facilityString = CowrieConfig().get('output_localsyslog', 'facility')\n self.facility = vars(syslog)['LOG_' + facilityString]\n self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)\n\n def stop(self):\n pass\n\n def write(self, logentry):\n if 'isError' not in logentry:\n logentry['isError'] = False\n\n if self.format == 'cef':\n self.syslog.emit({\n 'message': cowrie.core.cef.formatCef(logentry),\n 'isError': False,\n 'system': 'cowrie'\n })\n else:\n # message appears with additional spaces if message key is defined\n logentry['message'] = [logentry['message']]\n self.syslog.emit(logentry)\n", "path": "src/cowrie/output/localsyslog.py"}]}
| 2,254 | 239 |
gh_patches_debug_942
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-1578
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
prompt.read_user_dict() is broken due to click upgrade from 7.1.2 to 8.0.0
* Cookiecutter version: 1.7.3
* Template project url: -
* Python version: 3.9.5
* Operating System: macOS Catalina 10.15.7
### Description:
Apparently, there is a breaking change in `click==8.0.0` affecting dictionary values in cookiecutter.json
cookiecutter.json example:
```json
{
"project_name": "",
"project_policy": {"project_policy_example": "yes"}
}
```
```
% python -m cookiecutter ../Projects/project-configs
devplatform_project_name [infra-dev]:
project_name []: t
project_policy [default]:
Error: Unable to decode to JSON.
```
Looking closer at the cookiecutter.promt, I can see that in `read_user_dict()`, click passes `user_value='default'` to `process_json()`, instead of passing an actual default value from the cookiecutter.json as it was in `click 7.1.2`.
Link to the `process_json()` code: https://github.com/cookiecutter/cookiecutter/blob/master/cookiecutter/prompt.py#L81

As far as I can suppose, that issue could have been introduced by this PR https://github.com/pallets/click/pull/1517/
### Quick local fix
Install click first and specify version older than 8.0.0
```
pip install click==7.1.2
pip install cookiecutter
```
### Quick fix for cookiecutter library
in `setup.py` replace 'click>=7.0' with `'click>=7,<8.0.0'`
### What I've run:
```shell
% python3.9 -m venv test39
% source test39/bin/activate
% python -V
Python 3.9.5
% python -m pip install click==7.1.2
Collecting click==7.1.2
Using cached click-7.1.2-py2.py3-none-any.whl (82 kB)
Installing collected packages: click
Successfully installed click-7.1.2
(test39) ro.solyanik@macbook-ro Environments % python -m pip install cookiecutter
Collecting cookiecutter
Using cached cookiecutter-1.7.3-py2.py3-none-any.whl (34 kB)
Collecting six>=1.10
................................................
Installing collected packages: six, python-dateutil, MarkupSafe, urllib3, text-unidecode, Jinja2, idna, chardet, certifi, arrow, requests, python-slugify, poyo, jinja2-time, binaryornot, cookiecutter
Successfully installed Jinja2-3.0.1 MarkupSafe-2.0.1 arrow-1.1.0 binaryornot-0.4.4 certifi-2020.12.5 chardet-4.0.0 cookiecutter-1.7.3 idna-2.10 jinja2-time-0.2.0 poyo-0.5.0 python-dateutil-2.8.1 python-slugify-5.0.2 requests-2.25.1 six-1.16.0 text-unidecode-1.3 urllib3-1.26.4
% python -m cookiecutter ../Projects/project-configs
project_name []: t
project_policy [default]:
% ls t
Makefile README.md t tests
% rm -rf t
% python -m pip install click==8.0.0
Collecting click==8.0.0
Using cached click-8.0.0-py3-none-any.whl (96 kB)
Installing collected packages: click
Attempting uninstall: click
Found existing installation: click 7.1.2
Uninstalling click-7.1.2:
Successfully uninstalled click-7.1.2
Successfully installed click-8.0.0
% python -m cookiecutter ../Projects/project-configs
devplatform_project_name [infra-dev]:
project_name []: t
project_policy [default]:
Error: Unable to decode to JSON.
project_policy [default]:
Error: Unable to decode to JSON.
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """cookiecutter distutils configuration."""
5
6 import os
7 import io
8 import sys
9
10 from setuptools import setup
11
12 version = "1.7.3"
13
14 if sys.argv[-1] == 'publish':
15 os.system('python setup.py sdist upload')
16 os.system('python setup.py bdist_wheel upload')
17 sys.exit()
18
19 if sys.argv[-1] == 'tag':
20 os.system("git tag -a %s -m 'version %s'" % (version, version))
21 os.system("git push --tags")
22 sys.exit()
23
24 with io.open('README.md', 'r', encoding='utf-8') as readme_file:
25 readme = readme_file.read()
26
27 requirements = [
28 'binaryornot>=0.4.4',
29 'Jinja2>=2.7,<4.0.0',
30 'click>=7.0',
31 'poyo>=0.5.0',
32 'jinja2-time>=0.2.0',
33 'python-slugify>=4.0.0',
34 'requests>=2.23.0',
35 'six>=1.10',
36 ]
37
38 if sys.argv[-1] == 'readme':
39 print(readme)
40 sys.exit()
41
42
43 setup(
44 name='cookiecutter',
45 version=version,
46 description=(
47 'A command-line utility that creates projects from project '
48 'templates, e.g. creating a Python package project from a '
49 'Python package project template.'
50 ),
51 long_description=readme,
52 long_description_content_type='text/markdown',
53 author='Audrey Roy',
54 author_email='[email protected]',
55 url='https://github.com/cookiecutter/cookiecutter',
56 packages=['cookiecutter'],
57 package_dir={'cookiecutter': 'cookiecutter'},
58 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
59 include_package_data=True,
60 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
61 install_requires=requirements,
62 extras_require={':python_version<"3.3"': ['whichcraft>=0.4.0']},
63 license='BSD',
64 zip_safe=False,
65 classifiers=[
66 "Development Status :: 5 - Production/Stable",
67 "Environment :: Console",
68 "Intended Audience :: Developers",
69 "Natural Language :: English",
70 "License :: OSI Approved :: BSD License",
71 "Programming Language :: Python",
72 "Programming Language :: Python :: 2",
73 "Programming Language :: Python :: 2.7",
74 "Programming Language :: Python :: 3",
75 "Programming Language :: Python :: 3.5",
76 "Programming Language :: Python :: 3.6",
77 "Programming Language :: Python :: 3.7",
78 "Programming Language :: Python :: 3.8",
79 "Programming Language :: Python :: Implementation :: CPython",
80 "Programming Language :: Python :: Implementation :: PyPy",
81 "Topic :: Software Development",
82 ],
83 keywords=(
84 'cookiecutter, Python, projects, project templates, Jinja2, '
85 'skeleton, scaffolding, project directory, setup.py, package, '
86 'packaging'
87 ),
88 )
89
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@
requirements = [
'binaryornot>=0.4.4',
'Jinja2>=2.7,<4.0.0',
- 'click>=7.0',
+ 'click>=7.0,<8.0.0',
'poyo>=0.5.0',
'jinja2-time>=0.2.0',
'python-slugify>=4.0.0',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,7 @@\n requirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n- 'click>=7.0',\n+ 'click>=7.0,<8.0.0',\n 'poyo>=0.5.0',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n", "issue": "prompt.read_user_dict() is broken due to click upgrade from 7.1.2 to 8.0.0\n* Cookiecutter version: 1.7.3\r\n* Template project url: -\r\n* Python version: 3.9.5\r\n* Operating System: macOS Catalina 10.15.7\r\n\r\n### Description:\r\n\r\nApparently, there is a breaking change in `click==8.0.0` affecting dictionary values in cookiecutter.json\r\ncookiecutter.json example:\r\n```json\r\n{\r\n \"project_name\": \"\",\r\n \"project_policy\": {\"project_policy_example\": \"yes\"}\r\n}\r\n```\r\n \r\n```\r\n% python -m cookiecutter ../Projects/project-configs\r\ndevplatform_project_name [infra-dev]: \r\nproject_name []: t\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\n```\r\n\r\nLooking closer at the cookiecutter.promt, I can see that in `read_user_dict()`, click passes `user_value='default'` to `process_json()`, instead of passing an actual default value from the cookiecutter.json as it was in `click 7.1.2`. \r\nLink to the `process_json()` code: https://github.com/cookiecutter/cookiecutter/blob/master/cookiecutter/prompt.py#L81\r\n\r\n\r\nAs far as I can suppose, that issue could have been introduced by this PR https://github.com/pallets/click/pull/1517/\r\n\r\n### Quick local fix\r\nInstall click first and specify version older than 8.0.0\r\n```\r\npip install click==7.1.2\r\npip install cookiecutter\r\n```\r\n\r\n### Quick fix for cookiecutter library\r\nin `setup.py` replace 'click>=7.0' with `'click>=7,<8.0.0'`\r\n\r\n### What I've run:\r\n\r\n```shell\r\n% python3.9 -m venv test39 \r\n \r\n% source test39/bin/activate\r\n\r\n% python -V\r\nPython 3.9.5\r\n\r\n\r\n% python -m pip install click==7.1.2\r\nCollecting click==7.1.2\r\n Using cached click-7.1.2-py2.py3-none-any.whl (82 kB)\r\nInstalling collected packages: click\r\nSuccessfully installed click-7.1.2\r\n(test39) ro.solyanik@macbook-ro Environments % python -m pip install cookiecutter\r\nCollecting cookiecutter\r\n Using cached cookiecutter-1.7.3-py2.py3-none-any.whl (34 kB)\r\nCollecting six>=1.10\r\n................................................\r\nInstalling collected packages: six, python-dateutil, MarkupSafe, urllib3, text-unidecode, Jinja2, idna, chardet, certifi, arrow, requests, python-slugify, poyo, jinja2-time, binaryornot, cookiecutter\r\nSuccessfully installed Jinja2-3.0.1 MarkupSafe-2.0.1 arrow-1.1.0 binaryornot-0.4.4 certifi-2020.12.5 chardet-4.0.0 cookiecutter-1.7.3 idna-2.10 jinja2-time-0.2.0 poyo-0.5.0 python-dateutil-2.8.1 python-slugify-5.0.2 requests-2.25.1 six-1.16.0 text-unidecode-1.3 urllib3-1.26.4\r\n\r\n% python -m cookiecutter ../Projects/project-configs\r\nproject_name []: t\r\nproject_policy [default]: \r\n\r\n% ls t \r\nMakefile README.md t tests\r\n\r\n% rm -rf t\r\n\r\n% python -m pip install click==8.0.0 \r\nCollecting click==8.0.0\r\n Using cached click-8.0.0-py3-none-any.whl (96 kB)\r\nInstalling collected packages: click\r\n Attempting uninstall: click\r\n Found existing installation: click 7.1.2\r\n Uninstalling click-7.1.2:\r\n Successfully uninstalled click-7.1.2\r\nSuccessfully installed click-8.0.0\r\n\r\n% python -m cookiecutter ../Projects/project-configs\r\ndevplatform_project_name [infra-dev]: \r\nproject_name []: t\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration.\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.3\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0',\n 'poyo>=0.5.0',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={':python_version<\"3.3\"': ['whichcraft>=0.4.0']},\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]}
| 2,457 | 124 |
gh_patches_debug_14833
|
rasdani/github-patches
|
git_diff
|
conda__conda-build-734
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CPU_COUNT reliability in MacOSX
Hello!
After a talk with @ccordoba12 in [conda-recipes/pr427](https://github.com/conda/conda-recipes/pull/427), it seems that CPU_COUNT is not reliable in MacOSX and he suggests to use "sysctl -n hw.ncpu".
Have you encountered this bug ?
I see that in python3.5, multiprocessing.cpu_count uses os.cpu_count and this builtin command uses also sysctl : posixmodule.c, line 11220:
``` c
#elif defined(__DragonFly__) || \
defined(__OpenBSD__) || \
defined(__FreeBSD__) || \
defined(__NetBSD__) || \
defined(__APPLE__)
int mib[2];
size_t len = sizeof(ncpu);
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
if (sysctl(mib, 2, &ncpu, &len, NULL, 0) != 0)
ncpu = 0;
```
To fix it, I can create a patch that add an OSX case in in [conda-build/environ.py:109](https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L109) with something like:
``` python
import subprocess
import sys
if sys.platform == 'darwin':
out, err = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE).communicate()
d['CPU_COUNT'] = str(out).strip()
```
Do you agree on this approach ?
Thank you very much
CPU_COUNT reliability in MacOSX
Hello!
After a talk with @ccordoba12 in [conda-recipes/pr427](https://github.com/conda/conda-recipes/pull/427), it seems that CPU_COUNT is not reliable in MacOSX and he suggests to use "sysctl -n hw.ncpu".
Have you encountered this bug ?
I see that in python3.5, multiprocessing.cpu_count uses os.cpu_count and this builtin command uses also sysctl : posixmodule.c, line 11220:
``` c
#elif defined(__DragonFly__) || \
defined(__OpenBSD__) || \
defined(__FreeBSD__) || \
defined(__NetBSD__) || \
defined(__APPLE__)
int mib[2];
size_t len = sizeof(ncpu);
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
if (sysctl(mib, 2, &ncpu, &len, NULL, 0) != 0)
ncpu = 0;
```
To fix it, I can create a patch that add an OSX case in in [conda-build/environ.py:109](https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L109) with something like:
``` python
import subprocess
import sys
if sys.platform == 'darwin':
out, err = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE).communicate()
d['CPU_COUNT'] = str(out).strip()
```
Do you agree on this approach ?
Thank you very much
</issue>
<code>
[start of conda_build/environ.py]
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import sys
5 from os.path import join
6 import subprocess
7 import multiprocessing
8
9 import conda.config as cc
10
11 from conda_build.config import config
12
13 from conda_build import source
14 from conda_build.scripts import prepend_bin_path
15
16
17 def get_perl_ver():
18 return str(config.CONDA_PERL)
19
20 def get_py_ver():
21 return '.'.join(str(config.CONDA_PY))
22
23 def get_npy_ver():
24 if config.CONDA_NPY:
25 # Convert int -> string, e.g.
26 # 17 -> '1.7'
27 # 110 -> '1.10'
28 conda_npy = str(config.CONDA_NPY)
29 return conda_npy[0] + '.' + conda_npy[1:]
30 return ''
31
32 def get_stdlib_dir():
33 return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else
34 'lib/python%s' % get_py_ver())
35
36 def get_sp_dir():
37 return join(get_stdlib_dir(), 'site-packages')
38
39 def get_git_build_info(src_dir):
40 env = os.environ.copy()
41 d = {}
42 git_dir = join(src_dir, '.git')
43 if os.path.exists(git_dir):
44 env['GIT_DIR'] = git_dir
45 else:
46 return d
47
48 # grab information from describe
49 key_name = lambda a: "GIT_DESCRIBE_{}".format(a)
50 keys = [key_name("TAG"), key_name("NUMBER"), key_name("HASH")]
51 env = {str(key): str(value) for key, value in env.items()}
52 process = subprocess.Popen(["git", "describe", "--tags", "--long", "HEAD"],
53 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
54 env=env)
55 output = process.communicate()[0].strip()
56 output = output.decode('utf-8')
57 parts = output.rsplit('-', 2)
58 parts_length = len(parts)
59 if parts_length == 3:
60 d.update(dict(zip(keys, parts)))
61 # get the _full_ hash of the current HEAD
62 process = subprocess.Popen(["git", "rev-parse", "HEAD"],
63 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
64 env=env)
65 output = process.communicate()[0].strip()
66 output = output.decode('utf-8')
67 d['GIT_FULL_HASH'] = output
68 # set up the build string
69 if key_name('NUMBER') in d and key_name('HASH') in d:
70 d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],
71 d[key_name('HASH')])
72
73 return d
74
75 def get_dict(m=None, prefix=None):
76 if not prefix:
77 prefix = config.build_prefix
78
79 python = config.build_python
80 d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}
81 d['CONDA_DEFAULT_ENV'] = config.build_prefix
82 d['ARCH'] = str(cc.bits)
83 d['PREFIX'] = prefix
84 d['PYTHON'] = python
85 d['PY3K'] = str(config.PY3K)
86 d['STDLIB_DIR'] = get_stdlib_dir()
87 d['SP_DIR'] = get_sp_dir()
88 d['SYS_PREFIX'] = sys.prefix
89 d['SYS_PYTHON'] = sys.executable
90 d['PERL_VER'] = get_perl_ver()
91 d['PY_VER'] = get_py_ver()
92 if get_npy_ver():
93 d['NPY_VER'] = get_npy_ver()
94 d['SRC_DIR'] = source.get_dir()
95 if "LANG" in os.environ:
96 d['LANG'] = os.environ['LANG']
97 if "HTTPS_PROXY" in os.environ:
98 d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']
99 if "HTTP_PROXY" in os.environ:
100 d['HTTP_PROXY'] = os.environ['HTTP_PROXY']
101
102 if m:
103 for var_name in m.get_value('build/script_env', []):
104 value = os.getenv(var_name)
105 if value is None:
106 value = '<UNDEFINED>'
107 d[var_name] = value
108
109 try:
110 d['CPU_COUNT'] = str(multiprocessing.cpu_count())
111 except NotImplementedError:
112 d['CPU_COUNT'] = "1"
113
114 d.update(**get_git_build_info(d['SRC_DIR']))
115 d['PATH'] = dict(os.environ)['PATH']
116 d = prepend_bin_path(d, prefix)
117
118 if sys.platform == 'win32': # -------- Windows
119 d['SCRIPTS'] = join(prefix, 'Scripts')
120 d['LIBRARY_PREFIX'] = join(prefix, 'Library')
121 d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')
122 d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')
123 d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')
124 # This probably should be done more generally
125 d['CYGWIN_PREFIX'] = prefix.replace('\\', '/').replace('C:', '/cygdrive/c')
126
127 d['R'] = join(prefix, 'Scripts', 'R.exe')
128 else: # -------- Unix
129 d['HOME'] = os.getenv('HOME', 'UNKNOWN')
130 d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')
131 d['R'] = join(prefix, 'bin', 'R')
132
133 if sys.platform == 'darwin': # -------- OSX
134 d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'
135 d['CFLAGS'] = '-arch %(OSX_ARCH)s' % d
136 d['CXXFLAGS'] = d['CFLAGS']
137 d['LDFLAGS'] = d['CFLAGS']
138 d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
139
140 elif sys.platform.startswith('linux'): # -------- Linux
141 d['LD_RUN_PATH'] = prefix + '/lib'
142
143 if m:
144 d['PKG_NAME'] = m.name()
145 d['PKG_VERSION'] = m.version()
146 d['PKG_BUILDNUM'] = str(m.build_number())
147 d['PKG_BUILD_STRING'] = str(m.build_id())
148 d['RECIPE_DIR'] = m.path
149
150 return d
151
152
153 if __name__ == '__main__':
154 e = get_dict()
155 for k in sorted(e):
156 assert isinstance(e[k], str), k
157 print('%s=%s' % (k, e[k]))
158
[end of conda_build/environ.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda_build/environ.py b/conda_build/environ.py
--- a/conda_build/environ.py
+++ b/conda_build/environ.py
@@ -106,10 +106,16 @@
value = '<UNDEFINED>'
d[var_name] = value
- try:
- d['CPU_COUNT'] = str(multiprocessing.cpu_count())
- except NotImplementedError:
- d['CPU_COUNT'] = "1"
+ if sys.platform == "darwin":
+ # multiprocessing.cpu_count() is not reliable on OSX
+ # See issue #645 on github.com/conda/conda-build
+ out, err = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True, stdout=subprocess.PIPE).communicate()
+ d['CPU_COUNT'] = out.decode('utf-8').strip()
+ else:
+ try:
+ d['CPU_COUNT'] = str(multiprocessing.cpu_count())
+ except NotImplementedError:
+ d['CPU_COUNT'] = "1"
d.update(**get_git_build_info(d['SRC_DIR']))
d['PATH'] = dict(os.environ)['PATH']
|
{"golden_diff": "diff --git a/conda_build/environ.py b/conda_build/environ.py\n--- a/conda_build/environ.py\n+++ b/conda_build/environ.py\n@@ -106,10 +106,16 @@\n value = '<UNDEFINED>'\n d[var_name] = value\n \n- try:\n- d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n- except NotImplementedError:\n- d['CPU_COUNT'] = \"1\"\n+ if sys.platform == \"darwin\":\n+ # multiprocessing.cpu_count() is not reliable on OSX\n+ # See issue #645 on github.com/conda/conda-build\n+ out, err = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True, stdout=subprocess.PIPE).communicate()\n+ d['CPU_COUNT'] = out.decode('utf-8').strip()\n+ else:\n+ try:\n+ d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n+ except NotImplementedError:\n+ d['CPU_COUNT'] = \"1\"\n \n d.update(**get_git_build_info(d['SRC_DIR']))\n d['PATH'] = dict(os.environ)['PATH']\n", "issue": "CPU_COUNT reliability in MacOSX\nHello!\nAfter a talk with @ccordoba12 in [conda-recipes/pr427](https://github.com/conda/conda-recipes/pull/427), it seems that CPU_COUNT is not reliable in MacOSX and he suggests to use \"sysctl -n hw.ncpu\".\n\nHave you encountered this bug ?\nI see that in python3.5, multiprocessing.cpu_count uses os.cpu_count and this builtin command uses also sysctl : posixmodule.c, line 11220:\n\n``` c\n#elif defined(__DragonFly__) || \\\n defined(__OpenBSD__) || \\\n defined(__FreeBSD__) || \\\n defined(__NetBSD__) || \\\n defined(__APPLE__)\n int mib[2];\n size_t len = sizeof(ncpu);\n mib[0] = CTL_HW;\n mib[1] = HW_NCPU;\n if (sysctl(mib, 2, &ncpu, &len, NULL, 0) != 0)\n ncpu = 0;\n```\n\nTo fix it, I can create a patch that add an OSX case in in [conda-build/environ.py:109](https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L109) with something like:\n\n``` python\nimport subprocess\nimport sys\n\nif sys.platform == 'darwin':\n out, err = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE).communicate()\n d['CPU_COUNT'] = str(out).strip()\n```\n\nDo you agree on this approach ?\nThank you very much\n\nCPU_COUNT reliability in MacOSX\nHello!\nAfter a talk with @ccordoba12 in [conda-recipes/pr427](https://github.com/conda/conda-recipes/pull/427), it seems that CPU_COUNT is not reliable in MacOSX and he suggests to use \"sysctl -n hw.ncpu\".\n\nHave you encountered this bug ?\nI see that in python3.5, multiprocessing.cpu_count uses os.cpu_count and this builtin command uses also sysctl : posixmodule.c, line 11220:\n\n``` c\n#elif defined(__DragonFly__) || \\\n defined(__OpenBSD__) || \\\n defined(__FreeBSD__) || \\\n defined(__NetBSD__) || \\\n defined(__APPLE__)\n int mib[2];\n size_t len = sizeof(ncpu);\n mib[0] = CTL_HW;\n mib[1] = HW_NCPU;\n if (sysctl(mib, 2, &ncpu, &len, NULL, 0) != 0)\n ncpu = 0;\n```\n\nTo fix it, I can create a patch that add an OSX case in in [conda-build/environ.py:109](https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L109) with something like:\n\n``` python\nimport subprocess\nimport sys\n\nif sys.platform == 'darwin':\n out, err = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE).communicate()\n d['CPU_COUNT'] = str(out).strip()\n```\n\nDo you agree on this approach ?\nThank you very much\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import join\nimport subprocess\nimport multiprocessing\n\nimport conda.config as cc\n\nfrom conda_build.config import config\n\nfrom conda_build import source\nfrom conda_build.scripts import prepend_bin_path\n\n\ndef get_perl_ver():\n return str(config.CONDA_PERL)\n\ndef get_py_ver():\n return '.'.join(str(config.CONDA_PY))\n\ndef get_npy_ver():\n if config.CONDA_NPY:\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n conda_npy = str(config.CONDA_NPY)\n return conda_npy[0] + '.' + conda_npy[1:]\n return ''\n\ndef get_stdlib_dir():\n return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else\n 'lib/python%s' % get_py_ver())\n\ndef get_sp_dir():\n return join(get_stdlib_dir(), 'site-packages')\n\ndef get_git_build_info(src_dir):\n env = os.environ.copy()\n d = {}\n git_dir = join(src_dir, '.git')\n if os.path.exists(git_dir):\n env['GIT_DIR'] = git_dir\n else:\n return d\n\n # grab information from describe\n key_name = lambda a: \"GIT_DESCRIBE_{}\".format(a)\n keys = [key_name(\"TAG\"), key_name(\"NUMBER\"), key_name(\"HASH\")]\n env = {str(key): str(value) for key, value in env.items()}\n process = subprocess.Popen([\"git\", \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n parts_length = len(parts)\n if parts_length == 3:\n d.update(dict(zip(keys, parts)))\n # get the _full_ hash of the current HEAD\n process = subprocess.Popen([\"git\", \"rev-parse\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n d['GIT_FULL_HASH'] = output\n # set up the build string\n if key_name('NUMBER') in d and key_name('HASH') in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],\n d[key_name('HASH')])\n\n return d\n\ndef get_dict(m=None, prefix=None):\n if not prefix:\n prefix = config.build_prefix\n\n python = config.build_python\n d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}\n d['CONDA_DEFAULT_ENV'] = config.build_prefix\n d['ARCH'] = str(cc.bits)\n d['PREFIX'] = prefix\n d['PYTHON'] = python\n d['PY3K'] = str(config.PY3K)\n d['STDLIB_DIR'] = get_stdlib_dir()\n d['SP_DIR'] = get_sp_dir()\n d['SYS_PREFIX'] = sys.prefix\n d['SYS_PYTHON'] = sys.executable\n d['PERL_VER'] = get_perl_ver()\n d['PY_VER'] = get_py_ver()\n if get_npy_ver():\n d['NPY_VER'] = get_npy_ver()\n d['SRC_DIR'] = source.get_dir()\n if \"LANG\" in os.environ:\n d['LANG'] = os.environ['LANG']\n if \"HTTPS_PROXY\" in os.environ:\n d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']\n if \"HTTP_PROXY\" in os.environ:\n d['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n\n if m:\n for var_name in m.get_value('build/script_env', []):\n value = os.getenv(var_name)\n if value is None:\n value = '<UNDEFINED>'\n d[var_name] = value\n\n try:\n d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n except NotImplementedError:\n d['CPU_COUNT'] = \"1\"\n\n d.update(**get_git_build_info(d['SRC_DIR']))\n d['PATH'] = dict(os.environ)['PATH']\n d = prepend_bin_path(d, prefix)\n\n if sys.platform == 'win32': # -------- Windows\n d['SCRIPTS'] = join(prefix, 'Scripts')\n d['LIBRARY_PREFIX'] = join(prefix, 'Library')\n d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')\n d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')\n d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')\n # This probably should be done more generally\n d['CYGWIN_PREFIX'] = prefix.replace('\\\\', '/').replace('C:', '/cygdrive/c')\n\n d['R'] = join(prefix, 'Scripts', 'R.exe')\n else: # -------- Unix\n d['HOME'] = os.getenv('HOME', 'UNKNOWN')\n d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')\n d['R'] = join(prefix, 'bin', 'R')\n\n if sys.platform == 'darwin': # -------- OSX\n d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'\n d['CFLAGS'] = '-arch %(OSX_ARCH)s' % d\n d['CXXFLAGS'] = d['CFLAGS']\n d['LDFLAGS'] = d['CFLAGS']\n d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'\n\n elif sys.platform.startswith('linux'): # -------- Linux\n d['LD_RUN_PATH'] = prefix + '/lib'\n\n if m:\n d['PKG_NAME'] = m.name()\n d['PKG_VERSION'] = m.version()\n d['PKG_BUILDNUM'] = str(m.build_number())\n d['PKG_BUILD_STRING'] = str(m.build_id())\n d['RECIPE_DIR'] = m.path\n\n return d\n\n\nif __name__ == '__main__':\n e = get_dict()\n for k in sorted(e):\n assert isinstance(e[k], str), k\n print('%s=%s' % (k, e[k]))\n", "path": "conda_build/environ.py"}]}
| 3,032 | 251 |
gh_patches_debug_30442
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-3324
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve the logging of SSH tokens
If a user has many different SSH keys assigned on one machine for different ssh-users on this machine, this line gets logged for each SSH key, that is not used:
https://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L84
We should change this to "debug".
On the other hand we should add a log here
https://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L73
like
~~~~python
log.info(u"Using SSH key {0!s} for user {1!s}".format(tokclass.token.serial, options.get("user")))
~~~~
</issue>
<code>
[start of privacyidea/lib/applications/ssh.py]
1 # -*- coding: utf-8 -*-
2 #
3 # privacyIDEA
4 # Jul 18, 2014 Cornelius Kölbel
5 # License: AGPLv3
6 # contact: http://www.privacyidea.org
7 #
8 # This code is free software; you can redistribute it and/or
9 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
10 # License as published by the Free Software Foundation; either
11 # version 3 of the License, or any later version.
12 #
13 # This code is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
17 #
18 # You should have received a copy of the GNU Affero General Public
19 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21 """
22 This file is tested in tests/test_lib_machinetokens.py
23 """
24 from privacyidea.lib.applications import MachineApplicationBase
25 import logging
26 from privacyidea.lib.token import get_tokens
27 log = logging.getLogger(__name__)
28
29
30 class MachineApplication(MachineApplicationBase):
31 """
32 This is the application for SSH.
33
34 Possible options:
35 user
36
37 """
38 application_name = "ssh"
39 '''as the authentication item is no sensitive information,
40 we can set bulk_call to True. Thus the admin can call
41 all public keys to distribute them via salt.
42 FIXME: This is only true for SSH pub keys.
43 If we would support OTP with SSH, this might be sensitive information!
44 '''
45 allow_bulk_call = True
46
47 @staticmethod
48 def get_authentication_item(token_type,
49 serial,
50 challenge=None, options=None,
51 filter_param=None):
52 """
53 :param token_type: the type of the token. At the moment
54 we support the tokenype "sshkey"
55 :param serial: the serial number of the token.
56 :return auth_item: Return the SSH pub keys.
57 """
58 options = options or {}
59 ret = {}
60 filter_param = filter_param or {}
61 user_filter = filter_param.get("user")
62 if token_type.lower() == "sshkey":
63 toks = get_tokens(serial=serial, active=True)
64 if len(toks) == 1:
65 # We return this entry, either if no user_filter is requested
66 # or if the user_filter matches the user
67 if (user_filter and user_filter == options.get("user")) or \
68 not user_filter:
69 # tokenclass is a SSHkeyTokenClass
70 tokclass = toks[0]
71 # We just return the ssh public key, so that
72 # it can be included into authorized keys.
73 ret["sshkey"] = tokclass.get_sshkey()
74 # We return the username if the token is assigned to a
75 # user, so that this username could be used to save
76 # the ssh key accordingly
77 user_object = toks[0].user
78 if user_object:
79 uInfo = user_object.info
80 if "username" in uInfo:
81 ret["username"] = uInfo.get("username")
82 # ret["info"] = uInfo
83 else:
84 log.info("The requested user %s does not match the user "
85 "option (%s) of the SSH application." % (
86 user_filter, options.get("user")))
87 else:
88 log.info("Token %r, type %r is not supported by "
89 "SSH application module" % (serial, token_type))
90
91 return ret
92
93 @staticmethod
94 def get_options():
95 """
96 returns a dictionary with a list of required and optional options
97 """
98 return {'required': [],
99 'optional': ['user']}
100
[end of privacyidea/lib/applications/ssh.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/privacyidea/lib/applications/ssh.py b/privacyidea/lib/applications/ssh.py
--- a/privacyidea/lib/applications/ssh.py
+++ b/privacyidea/lib/applications/ssh.py
@@ -70,6 +70,8 @@
tokclass = toks[0]
# We just return the ssh public key, so that
# it can be included into authorized keys.
+ log.info(u"Using SSH key {0!s} for SSH user {1!s}".format(tokclass.token.serial,
+ options.get("user")))
ret["sshkey"] = tokclass.get_sshkey()
# We return the username if the token is assigned to a
# user, so that this username could be used to save
@@ -79,14 +81,13 @@
uInfo = user_object.info
if "username" in uInfo:
ret["username"] = uInfo.get("username")
- # ret["info"] = uInfo
else:
- log.info("The requested user %s does not match the user "
- "option (%s) of the SSH application." % (
+ log.debug(u"The requested user {0!s} does not match the user "
+ "option ({0!s}) of the SSH application.".format(
user_filter, options.get("user")))
else:
- log.info("Token %r, type %r is not supported by "
- "SSH application module" % (serial, token_type))
+ log.info(u"Token {0!r}, type {0!r} is not supported by "
+ "SSH application module".format(serial, token_type))
return ret
|
{"golden_diff": "diff --git a/privacyidea/lib/applications/ssh.py b/privacyidea/lib/applications/ssh.py\n--- a/privacyidea/lib/applications/ssh.py\n+++ b/privacyidea/lib/applications/ssh.py\n@@ -70,6 +70,8 @@\n tokclass = toks[0]\n # We just return the ssh public key, so that\n # it can be included into authorized keys.\n+ log.info(u\"Using SSH key {0!s} for SSH user {1!s}\".format(tokclass.token.serial,\n+ options.get(\"user\")))\n ret[\"sshkey\"] = tokclass.get_sshkey()\n # We return the username if the token is assigned to a\n # user, so that this username could be used to save\n@@ -79,14 +81,13 @@\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"username\"] = uInfo.get(\"username\")\n- # ret[\"info\"] = uInfo\n else:\n- log.info(\"The requested user %s does not match the user \"\n- \"option (%s) of the SSH application.\" % (\n+ log.debug(u\"The requested user {0!s} does not match the user \"\n+ \"option ({0!s}) of the SSH application.\".format(\n user_filter, options.get(\"user\")))\n else:\n- log.info(\"Token %r, type %r is not supported by \"\n- \"SSH application module\" % (serial, token_type))\n+ log.info(u\"Token {0!r}, type {0!r} is not supported by \"\n+ \"SSH application module\".format(serial, token_type))\n \n return ret\n", "issue": "Improve the logging of SSH tokens\nIf a user has many different SSH keys assigned on one machine for different ssh-users on this machine, this line gets logged for each SSH key, that is not used:\r\n\r\nhttps://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L84\r\n\r\nWe should change this to \"debug\".\r\n\r\nOn the other hand we should add a log here \r\nhttps://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L73\r\nlike\r\n\r\n~~~~python\r\nlog.info(u\"Using SSH key {0!s} for user {1!s}\".format(tokclass.token.serial, options.get(\"user\")))\r\n~~~~\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Jul 18, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file is tested in tests/test_lib_machinetokens.py\n\"\"\"\nfrom privacyidea.lib.applications import MachineApplicationBase\nimport logging\nfrom privacyidea.lib.token import get_tokens\nlog = logging.getLogger(__name__)\n\n\nclass MachineApplication(MachineApplicationBase):\n \"\"\"\n This is the application for SSH.\n\n Possible options:\n user\n\n \"\"\"\n application_name = \"ssh\"\n '''as the authentication item is no sensitive information,\n we can set bulk_call to True. Thus the admin can call\n all public keys to distribute them via salt.\n FIXME: This is only true for SSH pub keys.\n If we would support OTP with SSH, this might be sensitive information!\n '''\n allow_bulk_call = True\n\n @staticmethod\n def get_authentication_item(token_type,\n serial,\n challenge=None, options=None,\n filter_param=None):\n \"\"\"\n :param token_type: the type of the token. At the moment\n we support the tokenype \"sshkey\"\n :param serial: the serial number of the token.\n :return auth_item: Return the SSH pub keys.\n \"\"\"\n options = options or {}\n ret = {}\n filter_param = filter_param or {}\n user_filter = filter_param.get(\"user\")\n if token_type.lower() == \"sshkey\":\n toks = get_tokens(serial=serial, active=True)\n if len(toks) == 1:\n # We return this entry, either if no user_filter is requested\n # or if the user_filter matches the user\n if (user_filter and user_filter == options.get(\"user\")) or \\\n not user_filter:\n # tokenclass is a SSHkeyTokenClass\n tokclass = toks[0]\n # We just return the ssh public key, so that\n # it can be included into authorized keys.\n ret[\"sshkey\"] = tokclass.get_sshkey()\n # We return the username if the token is assigned to a\n # user, so that this username could be used to save\n # the ssh key accordingly\n user_object = toks[0].user\n if user_object:\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"username\"] = uInfo.get(\"username\")\n # ret[\"info\"] = uInfo\n else:\n log.info(\"The requested user %s does not match the user \"\n \"option (%s) of the SSH application.\" % (\n user_filter, options.get(\"user\")))\n else:\n log.info(\"Token %r, type %r is not supported by \"\n \"SSH application module\" % (serial, token_type))\n\n return ret\n\n @staticmethod\n def get_options():\n \"\"\"\n returns a dictionary with a list of required and optional options\n \"\"\"\n return {'required': [],\n 'optional': ['user']}\n", "path": "privacyidea/lib/applications/ssh.py"}]}
| 1,777 | 374 |
gh_patches_debug_20826
|
rasdani/github-patches
|
git_diff
|
dask__dask-1231
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add tests to package
In `setup.py`, make sure `tests` subdirectories are included in the package (otherwise, it is not possible for a user with an installed version of dask to verify its integrity).
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from os.path import exists
4 from setuptools import setup
5 import dask
6
7 extras_require = {
8 'array': ['numpy', 'toolz >= 0.7.2'],
9 'bag': ['cloudpickle >= 0.2.1', 'toolz >= 0.7.2', 'partd >= 0.3.3'],
10 'dataframe': ['numpy', 'pandas >= 0.18.0', 'toolz >= 0.7.2',
11 'partd >= 0.3.3', 'cloudpickle >= 0.2.1'],
12 'distributed': ['distributed >= 1.9'],
13 'imperative': ['toolz >= 0.7.2'],
14 }
15 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
16
17 setup(name='dask',
18 version=dask.__version__,
19 description='Minimal task scheduling abstraction',
20 url='http://github.com/dask/dask/',
21 maintainer='Matthew Rocklin',
22 maintainer_email='[email protected]',
23 license='BSD',
24 keywords='task-scheduling parallelism',
25 packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
26 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],
27 long_description=(open('README.rst').read() if exists('README.rst')
28 else ''),
29 extras_require=extras_require,
30 zip_safe=False)
31
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,12 @@
}
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
+packages = ['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
+ 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics']
+
+tests = [p + '.tests' for p in packages]
+
+
setup(name='dask',
version=dask.__version__,
description='Minimal task scheduling abstraction',
@@ -22,8 +28,7 @@
maintainer_email='[email protected]',
license='BSD',
keywords='task-scheduling parallelism',
- packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
- 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],
+ packages=packages + tests,
long_description=(open('README.rst').read() if exists('README.rst')
else ''),
extras_require=extras_require,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,12 @@\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n \n+packages = ['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n+ 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics']\n+\n+tests = [p + '.tests' for p in packages]\n+\n+\n setup(name='dask',\n version=dask.__version__,\n description='Minimal task scheduling abstraction',\n@@ -22,8 +28,7 @@\n maintainer_email='[email protected]',\n license='BSD',\n keywords='task-scheduling parallelism',\n- packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n- 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],\n+ packages=packages + tests,\n long_description=(open('README.rst').read() if exists('README.rst')\n else ''),\n extras_require=extras_require,\n", "issue": "Add tests to package\nIn `setup.py`, make sure `tests` subdirectories are included in the package (otherwise, it is not possible for a user with an installed version of dask to verify its integrity).\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom os.path import exists\nfrom setuptools import setup\nimport dask\n\nextras_require = {\n 'array': ['numpy', 'toolz >= 0.7.2'],\n 'bag': ['cloudpickle >= 0.2.1', 'toolz >= 0.7.2', 'partd >= 0.3.3'],\n 'dataframe': ['numpy', 'pandas >= 0.18.0', 'toolz >= 0.7.2',\n 'partd >= 0.3.3', 'cloudpickle >= 0.2.1'],\n 'distributed': ['distributed >= 1.9'],\n 'imperative': ['toolz >= 0.7.2'],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(name='dask',\n version=dask.__version__,\n description='Minimal task scheduling abstraction',\n url='http://github.com/dask/dask/',\n maintainer='Matthew Rocklin',\n maintainer_email='[email protected]',\n license='BSD',\n keywords='task-scheduling parallelism',\n packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],\n long_description=(open('README.rst').read() if exists('README.rst')\n else ''),\n extras_require=extras_require,\n zip_safe=False)\n", "path": "setup.py"}]}
| 958 | 263 |
gh_patches_debug_33617
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-7910
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Catch additional pymacaroons.Macaroon.deserialize exceptions
It appears that someone has [enumerated the various exceptions `pymacaroons.Macaroon.deserialize` might raise](https://github.com/ecordell/pymacaroons/issues/50). It'd be great if that were resolved, but we might want to further harden the work from #7424 to handle these other cases?
_Originally posted by @ewdurbin in https://github.com/pypa/warehouse/issues/7298#issuecomment-589957864_
---
**Good First Issue**: This issue is good for first time contributors. If you've already contributed to Warehouse, work on [another issue without this label](https://github.com/pypa/warehouse/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3A%22good+first+issue%22) instead. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).
</issue>
<code>
[start of warehouse/macaroons/services.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import binascii
14 import datetime
15 import json
16 import uuid
17
18 import pymacaroons
19
20 from pymacaroons.exceptions import MacaroonDeserializationException
21 from sqlalchemy.orm import joinedload
22 from sqlalchemy.orm.exc import NoResultFound
23 from zope.interface import implementer
24
25 from warehouse.accounts.models import User
26 from warehouse.macaroons.caveats import InvalidMacaroon, Verifier
27 from warehouse.macaroons.interfaces import IMacaroonService
28 from warehouse.macaroons.models import Macaroon
29
30
31 @implementer(IMacaroonService)
32 class DatabaseMacaroonService:
33 def __init__(self, db_session):
34 self.db = db_session
35
36 def _extract_raw_macaroon(self, prefixed_macaroon):
37 """
38 Returns the base64-encoded macaroon component of a PyPI macaroon,
39 dropping the prefix.
40
41 Returns None if the macaroon is None, has no prefix, or has the
42 wrong prefix.
43 """
44 if prefixed_macaroon is None:
45 return None
46
47 prefix, _, raw_macaroon = prefixed_macaroon.partition("-")
48 if prefix != "pypi" or not raw_macaroon:
49 return None
50
51 return raw_macaroon
52
53 def find_macaroon(self, macaroon_id):
54 """
55 Returns a macaroon model from the DB by its identifier.
56 Returns None if no macaroon has the given ID.
57 """
58 try:
59 dm = (
60 self.db.query(Macaroon)
61 .options(joinedload("user"))
62 .filter(Macaroon.id == uuid.UUID(macaroon_id))
63 .one()
64 )
65 except NoResultFound:
66 return None
67
68 return dm
69
70 def find_userid(self, raw_macaroon):
71 """
72 Returns the id of the user associated with the given raw (serialized)
73 macaroon.
74 """
75 raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
76 if raw_macaroon is None:
77 return None
78
79 try:
80 m = pymacaroons.Macaroon.deserialize(raw_macaroon)
81 except binascii.Error:
82 return None
83 except MacaroonDeserializationException:
84 return None
85
86 dm = self.find_macaroon(m.identifier.decode())
87
88 if dm is None:
89 return None
90
91 return dm.user.id
92
93 def verify(self, raw_macaroon, context, principals, permission):
94 """
95 Returns True if the given raw (serialized) macaroon is
96 valid for the context, principals, and requested permission.
97
98 Raises InvalidMacaroon if the macaroon is not valid.
99 """
100 raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
101 if raw_macaroon is None:
102 raise InvalidMacaroon("malformed or nonexistent macaroon")
103
104 try:
105 m = pymacaroons.Macaroon.deserialize(raw_macaroon)
106 except MacaroonDeserializationException:
107 raise InvalidMacaroon("malformed macaroon")
108
109 dm = self.find_macaroon(m.identifier.decode())
110
111 if dm is None:
112 raise InvalidMacaroon("deleted or nonexistent macaroon")
113
114 verifier = Verifier(m, context, principals, permission)
115 if verifier.verify(dm.key):
116 dm.last_used = datetime.datetime.now()
117 return True
118
119 raise InvalidMacaroon("invalid macaroon")
120
121 def create_macaroon(self, location, user_id, description, caveats):
122 """
123 Returns a tuple of a new raw (serialized) macaroon and its DB model.
124 The description provided is not embedded into the macaroon, only stored
125 in the DB model.
126 """
127 user = self.db.query(User).filter(User.id == user_id).one()
128
129 dm = Macaroon(user=user, description=description, caveats=caveats)
130 self.db.add(dm)
131 self.db.flush()
132
133 m = pymacaroons.Macaroon(
134 location=location,
135 identifier=str(dm.id),
136 key=dm.key,
137 version=pymacaroons.MACAROON_V2,
138 )
139 m.add_first_party_caveat(json.dumps(caveats))
140 serialized_macaroon = f"pypi-{m.serialize()}"
141 return serialized_macaroon, dm
142
143 def delete_macaroon(self, macaroon_id):
144 """
145 Deletes a macaroon from the DB by its identifier.
146 """
147 dm = self.find_macaroon(macaroon_id)
148 self.db.delete(dm)
149 self.db.flush()
150
151 def get_macaroon_by_description(self, user_id, description):
152 """
153 Returns a macaroon model from the DB with the given description,
154 if one exists for the given user.
155
156 Returns None if the user doesn't have a macaroon with this description.
157 """
158 try:
159 dm = (
160 self.db.query(Macaroon)
161 .options(joinedload("user"))
162 .filter(Macaroon.description == description)
163 .filter(Macaroon.user_id == user_id)
164 .one()
165 )
166 except NoResultFound:
167 return None
168
169 return dm
170
171
172 def database_macaroon_factory(context, request):
173 return DatabaseMacaroonService(request.db)
174
[end of warehouse/macaroons/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py
--- a/warehouse/macaroons/services.py
+++ b/warehouse/macaroons/services.py
@@ -13,6 +13,7 @@
import binascii
import datetime
import json
+import struct
import uuid
import pymacaroons
@@ -67,20 +68,32 @@
return dm
+ def _deserialize_raw_macaroon(self, raw_macaroon):
+ raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
+
+ if raw_macaroon is None:
+ raise InvalidMacaroon("malformed or nonexistent macaroon")
+
+ try:
+ return pymacaroons.Macaroon.deserialize(raw_macaroon)
+ except (
+ IndexError,
+ TypeError,
+ ValueError,
+ binascii.Error,
+ struct.error,
+ MacaroonDeserializationException,
+ ):
+ raise InvalidMacaroon("malformed macaroon")
+
def find_userid(self, raw_macaroon):
"""
Returns the id of the user associated with the given raw (serialized)
macaroon.
"""
- raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
- if raw_macaroon is None:
- return None
-
try:
- m = pymacaroons.Macaroon.deserialize(raw_macaroon)
- except binascii.Error:
- return None
- except MacaroonDeserializationException:
+ m = self._deserialize_raw_macaroon(raw_macaroon)
+ except InvalidMacaroon:
return None
dm = self.find_macaroon(m.identifier.decode())
@@ -97,15 +110,7 @@
Raises InvalidMacaroon if the macaroon is not valid.
"""
- raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
- if raw_macaroon is None:
- raise InvalidMacaroon("malformed or nonexistent macaroon")
-
- try:
- m = pymacaroons.Macaroon.deserialize(raw_macaroon)
- except MacaroonDeserializationException:
- raise InvalidMacaroon("malformed macaroon")
-
+ m = self._deserialize_raw_macaroon(raw_macaroon)
dm = self.find_macaroon(m.identifier.decode())
if dm is None:
|
{"golden_diff": "diff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py\n--- a/warehouse/macaroons/services.py\n+++ b/warehouse/macaroons/services.py\n@@ -13,6 +13,7 @@\n import binascii\n import datetime\n import json\n+import struct\n import uuid\n \n import pymacaroons\n@@ -67,20 +68,32 @@\n \n return dm\n \n+ def _deserialize_raw_macaroon(self, raw_macaroon):\n+ raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n+\n+ if raw_macaroon is None:\n+ raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n+\n+ try:\n+ return pymacaroons.Macaroon.deserialize(raw_macaroon)\n+ except (\n+ IndexError,\n+ TypeError,\n+ ValueError,\n+ binascii.Error,\n+ struct.error,\n+ MacaroonDeserializationException,\n+ ):\n+ raise InvalidMacaroon(\"malformed macaroon\")\n+\n def find_userid(self, raw_macaroon):\n \"\"\"\n Returns the id of the user associated with the given raw (serialized)\n macaroon.\n \"\"\"\n- raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n- if raw_macaroon is None:\n- return None\n-\n try:\n- m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n- except binascii.Error:\n- return None\n- except MacaroonDeserializationException:\n+ m = self._deserialize_raw_macaroon(raw_macaroon)\n+ except InvalidMacaroon:\n return None\n \n dm = self.find_macaroon(m.identifier.decode())\n@@ -97,15 +110,7 @@\n \n Raises InvalidMacaroon if the macaroon is not valid.\n \"\"\"\n- raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n- if raw_macaroon is None:\n- raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n-\n- try:\n- m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n- except MacaroonDeserializationException:\n- raise InvalidMacaroon(\"malformed macaroon\")\n-\n+ m = self._deserialize_raw_macaroon(raw_macaroon)\n dm = self.find_macaroon(m.identifier.decode())\n \n if dm is None:\n", "issue": "Catch additional pymacaroons.Macaroon.deserialize exceptions\nIt appears that someone has [enumerated the various exceptions `pymacaroons.Macaroon.deserialize` might raise](https://github.com/ecordell/pymacaroons/issues/50). It'd be great if that were resolved, but we might want to further harden the work from #7424 to handle these other cases?\r\n\r\n_Originally posted by @ewdurbin in https://github.com/pypa/warehouse/issues/7298#issuecomment-589957864_\r\n\r\n---\r\n\r\n**Good First Issue**: This issue is good for first time contributors. If you've already contributed to Warehouse, work on [another issue without this label](https://github.com/pypa/warehouse/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3A%22good+first+issue%22) instead. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport datetime\nimport json\nimport uuid\n\nimport pymacaroons\n\nfrom pymacaroons.exceptions import MacaroonDeserializationException\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom zope.interface import implementer\n\nfrom warehouse.accounts.models import User\nfrom warehouse.macaroons.caveats import InvalidMacaroon, Verifier\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.models import Macaroon\n\n\n@implementer(IMacaroonService)\nclass DatabaseMacaroonService:\n def __init__(self, db_session):\n self.db = db_session\n\n def _extract_raw_macaroon(self, prefixed_macaroon):\n \"\"\"\n Returns the base64-encoded macaroon component of a PyPI macaroon,\n dropping the prefix.\n\n Returns None if the macaroon is None, has no prefix, or has the\n wrong prefix.\n \"\"\"\n if prefixed_macaroon is None:\n return None\n\n prefix, _, raw_macaroon = prefixed_macaroon.partition(\"-\")\n if prefix != \"pypi\" or not raw_macaroon:\n return None\n\n return raw_macaroon\n\n def find_macaroon(self, macaroon_id):\n \"\"\"\n Returns a macaroon model from the DB by its identifier.\n Returns None if no macaroon has the given ID.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.id == uuid.UUID(macaroon_id))\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n def find_userid(self, raw_macaroon):\n \"\"\"\n Returns the id of the user associated with the given raw (serialized)\n macaroon.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n return None\n\n try:\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n except binascii.Error:\n return None\n except MacaroonDeserializationException:\n return None\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n return None\n\n return dm.user.id\n\n def verify(self, raw_macaroon, context, principals, permission):\n \"\"\"\n Returns True if the given raw (serialized) macaroon is\n valid for the context, principals, and requested permission.\n\n Raises InvalidMacaroon if the macaroon is not valid.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n\n try:\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n except MacaroonDeserializationException:\n raise InvalidMacaroon(\"malformed macaroon\")\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n raise InvalidMacaroon(\"deleted or nonexistent macaroon\")\n\n verifier = Verifier(m, context, principals, permission)\n if verifier.verify(dm.key):\n dm.last_used = datetime.datetime.now()\n return True\n\n raise InvalidMacaroon(\"invalid macaroon\")\n\n def create_macaroon(self, location, user_id, description, caveats):\n \"\"\"\n Returns a tuple of a new raw (serialized) macaroon and its DB model.\n The description provided is not embedded into the macaroon, only stored\n in the DB model.\n \"\"\"\n user = self.db.query(User).filter(User.id == user_id).one()\n\n dm = Macaroon(user=user, description=description, caveats=caveats)\n self.db.add(dm)\n self.db.flush()\n\n m = pymacaroons.Macaroon(\n location=location,\n identifier=str(dm.id),\n key=dm.key,\n version=pymacaroons.MACAROON_V2,\n )\n m.add_first_party_caveat(json.dumps(caveats))\n serialized_macaroon = f\"pypi-{m.serialize()}\"\n return serialized_macaroon, dm\n\n def delete_macaroon(self, macaroon_id):\n \"\"\"\n Deletes a macaroon from the DB by its identifier.\n \"\"\"\n dm = self.find_macaroon(macaroon_id)\n self.db.delete(dm)\n self.db.flush()\n\n def get_macaroon_by_description(self, user_id, description):\n \"\"\"\n Returns a macaroon model from the DB with the given description,\n if one exists for the given user.\n\n Returns None if the user doesn't have a macaroon with this description.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.description == description)\n .filter(Macaroon.user_id == user_id)\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n\ndef database_macaroon_factory(context, request):\n return DatabaseMacaroonService(request.db)\n", "path": "warehouse/macaroons/services.py"}]}
| 2,554 | 545 |
gh_patches_debug_14884
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-1205
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: ` chars are not escaped when parsing !source

When responding to faulty `!source` commands, the backticks aren't escaped and a formatting issue occurs.
This _might_ lead to being able to ping roles/users, should Discord ever decide to change the embed ping behavior.
</issue>
<code>
[start of bot/exts/info/source.py]
1 import inspect
2 from pathlib import Path
3 from typing import Optional, Tuple, Union
4
5 from discord import Embed
6 from discord.ext import commands
7
8 from bot.bot import Bot
9 from bot.constants import URLs
10
11 SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]
12
13
14 class SourceConverter(commands.Converter):
15 """Convert an argument into a help command, tag, command, or cog."""
16
17 async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
18 """Convert argument into source object."""
19 if argument.lower().startswith("help"):
20 return ctx.bot.help_command
21
22 cog = ctx.bot.get_cog(argument)
23 if cog:
24 return cog
25
26 cmd = ctx.bot.get_command(argument)
27 if cmd:
28 return cmd
29
30 tags_cog = ctx.bot.get_cog("Tags")
31 show_tag = True
32
33 if not tags_cog:
34 show_tag = False
35 elif argument.lower() in tags_cog._cache:
36 return argument.lower()
37
38 raise commands.BadArgument(
39 f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
40 )
41
42
43 class BotSource(commands.Cog):
44 """Displays information about the bot's source code."""
45
46 def __init__(self, bot: Bot):
47 self.bot = bot
48
49 @commands.command(name="source", aliases=("src",))
50 async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
51 """Display information and a GitHub link to the source code of a command, tag, or cog."""
52 if not source_item:
53 embed = Embed(title="Bot's GitHub Repository")
54 embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
55 embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
56 await ctx.send(embed=embed)
57 return
58
59 embed = await self.build_embed(source_item)
60 await ctx.send(embed=embed)
61
62 def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
63 """
64 Build GitHub link of source item, return this link, file location and first line number.
65
66 Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).
67 """
68 if isinstance(source_item, commands.Command):
69 src = source_item.callback.__code__
70 filename = src.co_filename
71 elif isinstance(source_item, str):
72 tags_cog = self.bot.get_cog("Tags")
73 filename = tags_cog._cache[source_item]["location"]
74 else:
75 src = type(source_item)
76 try:
77 filename = inspect.getsourcefile(src)
78 except TypeError:
79 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
80
81 if not isinstance(source_item, str):
82 try:
83 lines, first_line_no = inspect.getsourcelines(src)
84 except OSError:
85 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
86
87 lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
88 else:
89 first_line_no = None
90 lines_extension = ""
91
92 # Handle tag file location differently than others to avoid errors in some cases
93 if not first_line_no:
94 file_location = Path(filename).relative_to("/bot/")
95 else:
96 file_location = Path(filename).relative_to(Path.cwd()).as_posix()
97
98 url = f"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}"
99
100 return url, file_location, first_line_no or None
101
102 async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
103 """Build embed based on source object."""
104 url, location, first_line = self.get_source_link(source_object)
105
106 if isinstance(source_object, commands.HelpCommand):
107 title = "Help Command"
108 description = source_object.__doc__.splitlines()[1]
109 elif isinstance(source_object, commands.Command):
110 description = source_object.short_doc
111 title = f"Command: {source_object.qualified_name}"
112 elif isinstance(source_object, str):
113 title = f"Tag: {source_object}"
114 description = ""
115 else:
116 title = f"Cog: {source_object.qualified_name}"
117 description = source_object.description.splitlines()[0]
118
119 embed = Embed(title=title, description=description)
120 embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
121 line_text = f":{first_line}" if first_line else ""
122 embed.set_footer(text=f"{location}{line_text}")
123
124 return embed
125
126
127 def setup(bot: Bot) -> None:
128 """Load the BotSource cog."""
129 bot.add_cog(BotSource(bot))
130
[end of bot/exts/info/source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py
--- a/bot/exts/info/source.py
+++ b/bot/exts/info/source.py
@@ -2,7 +2,7 @@
from pathlib import Path
from typing import Optional, Tuple, Union
-from discord import Embed
+from discord import Embed, utils
from discord.ext import commands
from bot.bot import Bot
@@ -35,8 +35,10 @@
elif argument.lower() in tags_cog._cache:
return argument.lower()
+ escaped_arg = utils.escape_markdown(argument)
+
raise commands.BadArgument(
- f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
+ f"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog."
)
|
{"golden_diff": "diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py\n--- a/bot/exts/info/source.py\n+++ b/bot/exts/info/source.py\n@@ -2,7 +2,7 @@\n from pathlib import Path\n from typing import Optional, Tuple, Union\n \n-from discord import Embed\n+from discord import Embed, utils\n from discord.ext import commands\n \n from bot.bot import Bot\n@@ -35,8 +35,10 @@\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n \n+ escaped_arg = utils.escape_markdown(argument)\n+\n raise commands.BadArgument(\n- f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n+ f\"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n", "issue": "Bug: ` chars are not escaped when parsing !source\n\r\n\r\nWhen responding to faulty `!source` commands, the backticks aren't escaped and a formatting issue occurs.\r\n\r\nThis _might_ lead to being able to ping roles/users, should Discord ever decide to change the embed ping behavior.\n", "before_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n raise commands.BadArgument(\n f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n src = source_item.callback.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/exts/info/source.py"}]}
| 1,931 | 195 |
gh_patches_debug_41439
|
rasdani/github-patches
|
git_diff
|
coqui-ai__TTS-3336
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Punctuation restoration works incorrect
### Describe the bug
Punctuation restoration works incorrect.
### To Reproduce
```
from TTS.tts.utils.text.punctuation import Punctuation, _PUNC_IDX, PuncPosition
# original text "...i think i understand."
punctuator = Punctuation()
text = ['', 'i think i understand']
punctuation = [_PUNC_IDX(punc='...', position=PuncPosition.BEGIN), _PUNC_IDX(punc='.', position=PuncPosition.END)]
punctuator.restore(text, punctuation)
# result ["....", "i think i understand"]
```
### Expected behavior
result: `["...i think i understand."]`
### Logs
_No response_
### Environment
```shell
{
"CUDA": {
"GPU": [
"NVIDIA RTX A6000"
],
"available": true,
"version": "11.8"
},
"Packages": {
"PyTorch_debug": false,
"PyTorch_version": "2.0.0+cu118",
"TTS": "0.16.6",
"numpy": "1.22.0"
},
"System": {
"OS": "Linux",
"architecture": [
"64bit",
""
],
"processor": "x86_64",
"python": "3.10.12",
"version": "#170-Ubuntu SMP Fri Jun 16 13:43:31 UTC 2023"
}
}
```
### Additional context
_No response_
</issue>
<code>
[start of TTS/tts/utils/text/punctuation.py]
1 import collections
2 import re
3 from enum import Enum
4
5 import six
6
7 _DEF_PUNCS = ';:,.!?¡¿—…"«»“”'
8
9 _PUNC_IDX = collections.namedtuple("_punc_index", ["punc", "position"])
10
11
12 class PuncPosition(Enum):
13 """Enum for the punctuations positions"""
14
15 BEGIN = 0
16 END = 1
17 MIDDLE = 2
18 ALONE = 3
19
20
21 class Punctuation:
22 """Handle punctuations in text.
23
24 Just strip punctuations from text or strip and restore them later.
25
26 Args:
27 puncs (str): The punctuations to be processed. Defaults to `_DEF_PUNCS`.
28
29 Example:
30 >>> punc = Punctuation()
31 >>> punc.strip("This is. example !")
32 'This is example'
33
34 >>> text_striped, punc_map = punc.strip_to_restore("This is. example !")
35 >>> ' '.join(text_striped)
36 'This is example'
37
38 >>> text_restored = punc.restore(text_striped, punc_map)
39 >>> text_restored[0]
40 'This is. example !'
41 """
42
43 def __init__(self, puncs: str = _DEF_PUNCS):
44 self.puncs = puncs
45
46 @staticmethod
47 def default_puncs():
48 """Return default set of punctuations."""
49 return _DEF_PUNCS
50
51 @property
52 def puncs(self):
53 return self._puncs
54
55 @puncs.setter
56 def puncs(self, value):
57 if not isinstance(value, six.string_types):
58 raise ValueError("[!] Punctuations must be of type str.")
59 self._puncs = "".join(list(dict.fromkeys(list(value)))) # remove duplicates without changing the oreder
60 self.puncs_regular_exp = re.compile(rf"(\s*[{re.escape(self._puncs)}]+\s*)+")
61
62 def strip(self, text):
63 """Remove all the punctuations by replacing with `space`.
64
65 Args:
66 text (str): The text to be processed.
67
68 Example::
69
70 "This is. example !" -> "This is example "
71 """
72 return re.sub(self.puncs_regular_exp, " ", text).rstrip().lstrip()
73
74 def strip_to_restore(self, text):
75 """Remove punctuations from text to restore them later.
76
77 Args:
78 text (str): The text to be processed.
79
80 Examples ::
81
82 "This is. example !" -> [["This is", "example"], [".", "!"]]
83
84 """
85 text, puncs = self._strip_to_restore(text)
86 return text, puncs
87
88 def _strip_to_restore(self, text):
89 """Auxiliary method for Punctuation.preserve()"""
90 matches = list(re.finditer(self.puncs_regular_exp, text))
91 if not matches:
92 return [text], []
93 # the text is only punctuations
94 if len(matches) == 1 and matches[0].group() == text:
95 return [], [_PUNC_IDX(text, PuncPosition.ALONE)]
96 # build a punctuation map to be used later to restore punctuations
97 puncs = []
98 for match in matches:
99 position = PuncPosition.MIDDLE
100 if match == matches[0] and text.startswith(match.group()):
101 position = PuncPosition.BEGIN
102 elif match == matches[-1] and text.endswith(match.group()):
103 position = PuncPosition.END
104 puncs.append(_PUNC_IDX(match.group(), position))
105 # convert str text to a List[str], each item is separated by a punctuation
106 splitted_text = []
107 for idx, punc in enumerate(puncs):
108 split = text.split(punc.punc)
109 prefix, suffix = split[0], punc.punc.join(split[1:])
110 splitted_text.append(prefix)
111 # if the text does not end with a punctuation, add it to the last item
112 if idx == len(puncs) - 1 and len(suffix) > 0:
113 splitted_text.append(suffix)
114 text = suffix
115 return splitted_text, puncs
116
117 @classmethod
118 def restore(cls, text, puncs):
119 """Restore punctuation in a text.
120
121 Args:
122 text (str): The text to be processed.
123 puncs (List[str]): The list of punctuations map to be used for restoring.
124
125 Examples ::
126
127 ['This is', 'example'], ['.', '!'] -> "This is. example!"
128
129 """
130 return cls._restore(text, puncs, 0)
131
132 @classmethod
133 def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements
134 """Auxiliary method for Punctuation.restore()"""
135 if not puncs:
136 return text
137
138 # nothing have been phonemized, returns the puncs alone
139 if not text:
140 return ["".join(m.punc for m in puncs)]
141
142 current = puncs[0]
143
144 if current.position == PuncPosition.BEGIN:
145 return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)
146
147 if current.position == PuncPosition.END:
148 return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)
149
150 if current.position == PuncPosition.ALONE:
151 return [current.mark] + cls._restore(text, puncs[1:], num + 1)
152
153 # POSITION == MIDDLE
154 if len(text) == 1: # pragma: nocover
155 # a corner case where the final part of an intermediate
156 # mark (I) has not been phonemized
157 return cls._restore([text[0] + current.punc], puncs[1:], num)
158
159 return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)
160
161
162 # if __name__ == "__main__":
163 # punc = Punctuation()
164 # text = "This is. This is, example!"
165
166 # print(punc.strip(text))
167
168 # split_text, puncs = punc.strip_to_restore(text)
169 # print(split_text, " ---- ", puncs)
170
171 # restored_text = punc.restore(split_text, puncs)
172 # print(restored_text)
173
[end of TTS/tts/utils/text/punctuation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/TTS/tts/utils/text/punctuation.py b/TTS/tts/utils/text/punctuation.py
--- a/TTS/tts/utils/text/punctuation.py
+++ b/TTS/tts/utils/text/punctuation.py
@@ -15,7 +15,6 @@
BEGIN = 0
END = 1
MIDDLE = 2
- ALONE = 3
class Punctuation:
@@ -92,7 +91,7 @@
return [text], []
# the text is only punctuations
if len(matches) == 1 and matches[0].group() == text:
- return [], [_PUNC_IDX(text, PuncPosition.ALONE)]
+ return [], [_PUNC_IDX(text, PuncPosition.BEGIN)]
# build a punctuation map to be used later to restore punctuations
puncs = []
for match in matches:
@@ -107,11 +106,14 @@
for idx, punc in enumerate(puncs):
split = text.split(punc.punc)
prefix, suffix = split[0], punc.punc.join(split[1:])
+ text = suffix
+ if prefix == "":
+ # We don't want to insert an empty string in case of initial punctuation
+ continue
splitted_text.append(prefix)
# if the text does not end with a punctuation, add it to the last item
if idx == len(puncs) - 1 and len(suffix) > 0:
splitted_text.append(suffix)
- text = suffix
return splitted_text, puncs
@classmethod
@@ -127,10 +129,10 @@
['This is', 'example'], ['.', '!'] -> "This is. example!"
"""
- return cls._restore(text, puncs, 0)
+ return cls._restore(text, puncs)
@classmethod
- def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements
+ def _restore(cls, text, puncs): # pylint: disable=too-many-return-statements
"""Auxiliary method for Punctuation.restore()"""
if not puncs:
return text
@@ -142,21 +144,18 @@
current = puncs[0]
if current.position == PuncPosition.BEGIN:
- return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)
+ return cls._restore([current.punc + text[0]] + text[1:], puncs[1:])
if current.position == PuncPosition.END:
- return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)
-
- if current.position == PuncPosition.ALONE:
- return [current.mark] + cls._restore(text, puncs[1:], num + 1)
+ return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:])
# POSITION == MIDDLE
if len(text) == 1: # pragma: nocover
# a corner case where the final part of an intermediate
# mark (I) has not been phonemized
- return cls._restore([text[0] + current.punc], puncs[1:], num)
+ return cls._restore([text[0] + current.punc], puncs[1:])
- return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)
+ return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:])
# if __name__ == "__main__":
|
{"golden_diff": "diff --git a/TTS/tts/utils/text/punctuation.py b/TTS/tts/utils/text/punctuation.py\n--- a/TTS/tts/utils/text/punctuation.py\n+++ b/TTS/tts/utils/text/punctuation.py\n@@ -15,7 +15,6 @@\n BEGIN = 0\n END = 1\n MIDDLE = 2\n- ALONE = 3\n \n \n class Punctuation:\n@@ -92,7 +91,7 @@\n return [text], []\n # the text is only punctuations\n if len(matches) == 1 and matches[0].group() == text:\n- return [], [_PUNC_IDX(text, PuncPosition.ALONE)]\n+ return [], [_PUNC_IDX(text, PuncPosition.BEGIN)]\n # build a punctuation map to be used later to restore punctuations\n puncs = []\n for match in matches:\n@@ -107,11 +106,14 @@\n for idx, punc in enumerate(puncs):\n split = text.split(punc.punc)\n prefix, suffix = split[0], punc.punc.join(split[1:])\n+ text = suffix\n+ if prefix == \"\":\n+ # We don't want to insert an empty string in case of initial punctuation\n+ continue\n splitted_text.append(prefix)\n # if the text does not end with a punctuation, add it to the last item\n if idx == len(puncs) - 1 and len(suffix) > 0:\n splitted_text.append(suffix)\n- text = suffix\n return splitted_text, puncs\n \n @classmethod\n@@ -127,10 +129,10 @@\n ['This is', 'example'], ['.', '!'] -> \"This is. example!\"\n \n \"\"\"\n- return cls._restore(text, puncs, 0)\n+ return cls._restore(text, puncs)\n \n @classmethod\n- def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements\n+ def _restore(cls, text, puncs): # pylint: disable=too-many-return-statements\n \"\"\"Auxiliary method for Punctuation.restore()\"\"\"\n if not puncs:\n return text\n@@ -142,21 +144,18 @@\n current = puncs[0]\n \n if current.position == PuncPosition.BEGIN:\n- return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)\n+ return cls._restore([current.punc + text[0]] + text[1:], puncs[1:])\n \n if current.position == PuncPosition.END:\n- return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)\n-\n- if current.position == PuncPosition.ALONE:\n- return [current.mark] + cls._restore(text, puncs[1:], num + 1)\n+ return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:])\n \n # POSITION == MIDDLE\n if len(text) == 1: # pragma: nocover\n # a corner case where the final part of an intermediate\n # mark (I) has not been phonemized\n- return cls._restore([text[0] + current.punc], puncs[1:], num)\n+ return cls._restore([text[0] + current.punc], puncs[1:])\n \n- return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)\n+ return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:])\n \n \n # if __name__ == \"__main__\":\n", "issue": "[Bug] Punctuation restoration works incorrect\n### Describe the bug\n\nPunctuation restoration works incorrect.\n\n### To Reproduce\n\n```\r\nfrom TTS.tts.utils.text.punctuation import Punctuation, _PUNC_IDX, PuncPosition\r\n\r\n# original text \"...i think i understand.\"\r\npunctuator = Punctuation()\r\ntext = ['', 'i think i understand']\r\npunctuation = [_PUNC_IDX(punc='...', position=PuncPosition.BEGIN), _PUNC_IDX(punc='.', position=PuncPosition.END)]\r\npunctuator.restore(text, punctuation)\r\n\r\n# result [\"....\", \"i think i understand\"]\r\n```\n\n### Expected behavior\n\nresult: `[\"...i think i understand.\"]`\n\n### Logs\n\n_No response_\n\n### Environment\n\n```shell\n{\r\n \"CUDA\": {\r\n \"GPU\": [\r\n \"NVIDIA RTX A6000\"\r\n ],\r\n \"available\": true,\r\n \"version\": \"11.8\"\r\n },\r\n \"Packages\": {\r\n \"PyTorch_debug\": false,\r\n \"PyTorch_version\": \"2.0.0+cu118\",\r\n \"TTS\": \"0.16.6\",\r\n \"numpy\": \"1.22.0\"\r\n },\r\n \"System\": {\r\n \"OS\": \"Linux\",\r\n \"architecture\": [\r\n \"64bit\",\r\n \"\"\r\n ],\r\n \"processor\": \"x86_64\",\r\n \"python\": \"3.10.12\",\r\n \"version\": \"#170-Ubuntu SMP Fri Jun 16 13:43:31 UTC 2023\"\r\n }\r\n}\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import collections\nimport re\nfrom enum import Enum\n\nimport six\n\n_DEF_PUNCS = ';:,.!?\u00a1\u00bf\u2014\u2026\"\u00ab\u00bb\u201c\u201d'\n\n_PUNC_IDX = collections.namedtuple(\"_punc_index\", [\"punc\", \"position\"])\n\n\nclass PuncPosition(Enum):\n \"\"\"Enum for the punctuations positions\"\"\"\n\n BEGIN = 0\n END = 1\n MIDDLE = 2\n ALONE = 3\n\n\nclass Punctuation:\n \"\"\"Handle punctuations in text.\n\n Just strip punctuations from text or strip and restore them later.\n\n Args:\n puncs (str): The punctuations to be processed. Defaults to `_DEF_PUNCS`.\n\n Example:\n >>> punc = Punctuation()\n >>> punc.strip(\"This is. example !\")\n 'This is example'\n\n >>> text_striped, punc_map = punc.strip_to_restore(\"This is. example !\")\n >>> ' '.join(text_striped)\n 'This is example'\n\n >>> text_restored = punc.restore(text_striped, punc_map)\n >>> text_restored[0]\n 'This is. example !'\n \"\"\"\n\n def __init__(self, puncs: str = _DEF_PUNCS):\n self.puncs = puncs\n\n @staticmethod\n def default_puncs():\n \"\"\"Return default set of punctuations.\"\"\"\n return _DEF_PUNCS\n\n @property\n def puncs(self):\n return self._puncs\n\n @puncs.setter\n def puncs(self, value):\n if not isinstance(value, six.string_types):\n raise ValueError(\"[!] Punctuations must be of type str.\")\n self._puncs = \"\".join(list(dict.fromkeys(list(value)))) # remove duplicates without changing the oreder\n self.puncs_regular_exp = re.compile(rf\"(\\s*[{re.escape(self._puncs)}]+\\s*)+\")\n\n def strip(self, text):\n \"\"\"Remove all the punctuations by replacing with `space`.\n\n Args:\n text (str): The text to be processed.\n\n Example::\n\n \"This is. example !\" -> \"This is example \"\n \"\"\"\n return re.sub(self.puncs_regular_exp, \" \", text).rstrip().lstrip()\n\n def strip_to_restore(self, text):\n \"\"\"Remove punctuations from text to restore them later.\n\n Args:\n text (str): The text to be processed.\n\n Examples ::\n\n \"This is. example !\" -> [[\"This is\", \"example\"], [\".\", \"!\"]]\n\n \"\"\"\n text, puncs = self._strip_to_restore(text)\n return text, puncs\n\n def _strip_to_restore(self, text):\n \"\"\"Auxiliary method for Punctuation.preserve()\"\"\"\n matches = list(re.finditer(self.puncs_regular_exp, text))\n if not matches:\n return [text], []\n # the text is only punctuations\n if len(matches) == 1 and matches[0].group() == text:\n return [], [_PUNC_IDX(text, PuncPosition.ALONE)]\n # build a punctuation map to be used later to restore punctuations\n puncs = []\n for match in matches:\n position = PuncPosition.MIDDLE\n if match == matches[0] and text.startswith(match.group()):\n position = PuncPosition.BEGIN\n elif match == matches[-1] and text.endswith(match.group()):\n position = PuncPosition.END\n puncs.append(_PUNC_IDX(match.group(), position))\n # convert str text to a List[str], each item is separated by a punctuation\n splitted_text = []\n for idx, punc in enumerate(puncs):\n split = text.split(punc.punc)\n prefix, suffix = split[0], punc.punc.join(split[1:])\n splitted_text.append(prefix)\n # if the text does not end with a punctuation, add it to the last item\n if idx == len(puncs) - 1 and len(suffix) > 0:\n splitted_text.append(suffix)\n text = suffix\n return splitted_text, puncs\n\n @classmethod\n def restore(cls, text, puncs):\n \"\"\"Restore punctuation in a text.\n\n Args:\n text (str): The text to be processed.\n puncs (List[str]): The list of punctuations map to be used for restoring.\n\n Examples ::\n\n ['This is', 'example'], ['.', '!'] -> \"This is. example!\"\n\n \"\"\"\n return cls._restore(text, puncs, 0)\n\n @classmethod\n def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements\n \"\"\"Auxiliary method for Punctuation.restore()\"\"\"\n if not puncs:\n return text\n\n # nothing have been phonemized, returns the puncs alone\n if not text:\n return [\"\".join(m.punc for m in puncs)]\n\n current = puncs[0]\n\n if current.position == PuncPosition.BEGIN:\n return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)\n\n if current.position == PuncPosition.END:\n return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)\n\n if current.position == PuncPosition.ALONE:\n return [current.mark] + cls._restore(text, puncs[1:], num + 1)\n\n # POSITION == MIDDLE\n if len(text) == 1: # pragma: nocover\n # a corner case where the final part of an intermediate\n # mark (I) has not been phonemized\n return cls._restore([text[0] + current.punc], puncs[1:], num)\n\n return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)\n\n\n# if __name__ == \"__main__\":\n# punc = Punctuation()\n# text = \"This is. This is, example!\"\n\n# print(punc.strip(text))\n\n# split_text, puncs = punc.strip_to_restore(text)\n# print(split_text, \" ---- \", puncs)\n\n# restored_text = punc.restore(split_text, puncs)\n# print(restored_text)\n", "path": "TTS/tts/utils/text/punctuation.py"}]}
| 2,737 | 869 |
gh_patches_debug_2552
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-6839
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`segmentation.watershed` returns wrong data type
### Description:
The documentation of `segmentation.watershed` says that:
> ### Returns
> **out**: ndarray
> A labeled matrix of the same type and shape as markers
[[0.18.x]](https://scikit-image.org/docs/0.18.x/api/skimage.segmentation.html#skimage.segmentation.watershed) [[0.19.x]](https://scikit-image.org/docs/0.19.x/api/skimage.segmentation.html#skimage.segmentation.watershed)
I have tested this with 0.18.1:
```python
import skimage.segmentation
import numpy as np
print(skimage.segmentation.watershed(np.zeros((100, 100)), np.zeros((100, 100), np.uint16)).dtype)
```
Gives `int32` but `uint16` is expected.
### Way to reproduce:
```Python
import skimage.segmentation
import numpy as np
print(skimage.segmentation.watershed(np.zeros((100, 100)), np.zeros((100, 100), np.uint16)).dtype)
```
### Traceback or output:
```Shell
int32
```
### Version information:
```Shell
3.8.5 (default, Sep 4 2020, 07:30:14)
[GCC 7.3.0]
Linux-5.8.0-36-generic-x86_64-with-glibc2.10
scikit-image version: 0.18.1
numpy version: 1.20.3
```
</issue>
<code>
[start of skimage/segmentation/_watershed.py]
1 """watershed.py - watershed algorithm
2
3 This module implements a watershed algorithm that apportions pixels into
4 marked basins. The algorithm uses a priority queue to hold the pixels
5 with the metric for the priority queue being pixel value, then the time
6 of entry into the queue - this settles ties in favor of the closest marker.
7
8 Some ideas taken from
9 Soille, "Automated Basin Delineation from Digital Elevation Models Using
10 Mathematical Morphology", Signal Processing 20 (1990) 171-182.
11
12 The most important insight in the paper is that entry time onto the queue
13 solves two problems: a pixel should be assigned to the neighbor with the
14 largest gradient or, if there is no gradient, pixels on a plateau should
15 be split between markers on opposite sides.
16 """
17
18 import numpy as np
19 from scipy import ndimage as ndi
20
21 from . import _watershed_cy
22 from ..morphology.extrema import local_minima
23 from ..morphology._util import (_validate_connectivity,
24 _offsets_to_raveled_neighbors)
25 from ..util import crop, regular_seeds
26
27
28 def _validate_inputs(image, markers, mask, connectivity):
29 """Ensure that all inputs to watershed have matching shapes and types.
30
31 Parameters
32 ----------
33 image : array
34 The input image.
35 markers : int or array of int
36 The marker image.
37 mask : array, or None
38 A boolean mask, True where we want to compute the watershed.
39 connectivity : int in {1, ..., image.ndim}
40 The connectivity of the neighborhood of a pixel.
41
42 Returns
43 -------
44 image, markers, mask : arrays
45 The validated and formatted arrays. Image will have dtype float64,
46 markers int32, and mask int8. If ``None`` was given for the mask,
47 it is a volume of all 1s.
48
49 Raises
50 ------
51 ValueError
52 If the shapes of the given arrays don't match.
53 """
54 n_pixels = image.size
55 if mask is None:
56 # Use a complete `True` mask if none is provided
57 mask = np.ones(image.shape, bool)
58 else:
59 mask = np.asanyarray(mask, dtype=bool)
60 n_pixels = np.sum(mask)
61 if mask.shape != image.shape:
62 message = (f'`mask` (shape {mask.shape}) must have same shape '
63 f'as `image` (shape {image.shape})')
64 raise ValueError(message)
65 if markers is None:
66 markers_bool = local_minima(image, connectivity=connectivity) * mask
67 footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)
68 markers = ndi.label(markers_bool, structure=footprint)[0]
69 elif not isinstance(markers, (np.ndarray, list, tuple)):
70 # not array-like, assume int
71 # given int, assume that number of markers *within mask*.
72 markers = regular_seeds(image.shape,
73 int(markers / (n_pixels / image.size)))
74 markers *= mask
75 else:
76 markers = np.asanyarray(markers) * mask
77 if markers.shape != image.shape:
78 message = (f'`markers` (shape {markers.shape}) must have same '
79 f'shape as `image` (shape {image.shape})')
80 raise ValueError(message)
81 return (image.astype(np.float64),
82 markers.astype(np.int32),
83 mask.astype(np.int8))
84
85
86 def watershed(image, markers=None, connectivity=1, offset=None, mask=None,
87 compactness=0, watershed_line=False):
88 """Find watershed basins in `image` flooded from given `markers`.
89
90 Parameters
91 ----------
92 image : ndarray (2-D, 3-D, ...)
93 Data array where the lowest value points are labeled first.
94 markers : int, or ndarray of int, same shape as `image`, optional
95 The desired number of markers, or an array marking the basins with the
96 values to be assigned in the label matrix. Zero means not a marker. If
97 ``None`` (no markers given), the local minima of the image are used as
98 markers.
99 connectivity : ndarray, optional
100 An array with the same number of dimensions as `image` whose
101 non-zero elements indicate neighbors for connection.
102 Following the scipy convention, default is a one-connected array of
103 the dimension of the image.
104 offset : array_like of shape image.ndim, optional
105 offset of the connectivity (one offset per dimension)
106 mask : ndarray of bools or 0s and 1s, optional
107 Array of same shape as `image`. Only points at which mask == True
108 will be labeled.
109 compactness : float, optional
110 Use compact watershed [3]_ with given compactness parameter.
111 Higher values result in more regularly-shaped watershed basins.
112 watershed_line : bool, optional
113 If watershed_line is True, a one-pixel wide line separates the regions
114 obtained by the watershed algorithm. The line has the label 0.
115 Note that the method used for adding this line expects that
116 marker regions are not adjacent; the watershed line may not catch
117 borders between adjacent marker regions.
118
119 Returns
120 -------
121 out : ndarray
122 A labeled matrix of the same type and shape as markers
123
124 See Also
125 --------
126 skimage.segmentation.random_walker : random walker segmentation
127 A segmentation algorithm based on anisotropic diffusion, usually
128 slower than the watershed but with good results on noisy data and
129 boundaries with holes.
130
131 Notes
132 -----
133 This function implements a watershed algorithm [1]_ [2]_ that apportions
134 pixels into marked basins. The algorithm uses a priority queue to hold
135 the pixels with the metric for the priority queue being pixel value, then
136 the time of entry into the queue - this settles ties in favor of the
137 closest marker.
138
139 Some ideas taken from
140 Soille, "Automated Basin Delineation from Digital Elevation Models Using
141 Mathematical Morphology", Signal Processing 20 (1990) 171-182
142
143 The most important insight in the paper is that entry time onto the queue
144 solves two problems: a pixel should be assigned to the neighbor with the
145 largest gradient or, if there is no gradient, pixels on a plateau should
146 be split between markers on opposite sides.
147
148 This implementation converts all arguments to specific, lowest common
149 denominator types, then passes these to a C algorithm.
150
151 Markers can be determined manually, or automatically using for example
152 the local minima of the gradient of the image, or the local maxima of the
153 distance function to the background for separating overlapping objects
154 (see example).
155
156 References
157 ----------
158 .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29
159
160 .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html
161
162 .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and
163 Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation
164 Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`
165 https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf
166
167 Examples
168 --------
169 The watershed algorithm is useful to separate overlapping objects.
170
171 We first generate an initial image with two overlapping circles:
172
173 >>> x, y = np.indices((80, 80))
174 >>> x1, y1, x2, y2 = 28, 28, 44, 52
175 >>> r1, r2 = 16, 20
176 >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
177 >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
178 >>> image = np.logical_or(mask_circle1, mask_circle2)
179
180 Next, we want to separate the two circles. We generate markers at the
181 maxima of the distance to the background:
182
183 >>> from scipy import ndimage as ndi
184 >>> distance = ndi.distance_transform_edt(image)
185 >>> from skimage.feature import peak_local_max
186 >>> max_coords = peak_local_max(distance, labels=image,
187 ... footprint=np.ones((3, 3)))
188 >>> local_maxima = np.zeros_like(image, dtype=bool)
189 >>> local_maxima[tuple(max_coords.T)] = True
190 >>> markers = ndi.label(local_maxima)[0]
191
192 Finally, we run the watershed on the image and markers:
193
194 >>> labels = watershed(-distance, markers, mask=image)
195
196 The algorithm works also for 3-D images, and can be used for example to
197 separate overlapping spheres.
198 """
199 image, markers, mask = _validate_inputs(image, markers, mask, connectivity)
200 connectivity, offset = _validate_connectivity(image.ndim, connectivity,
201 offset)
202
203 # pad the image, markers, and mask so that we can use the mask to
204 # keep from running off the edges
205 pad_width = [(p, p) for p in offset]
206 image = np.pad(image, pad_width, mode='constant')
207 mask = np.pad(mask, pad_width, mode='constant').ravel()
208 output = np.pad(markers, pad_width, mode='constant')
209
210 flat_neighborhood = _offsets_to_raveled_neighbors(
211 image.shape, connectivity, center=offset)
212 marker_locations = np.flatnonzero(output)
213 image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize
214
215 _watershed_cy.watershed_raveled(image.ravel(),
216 marker_locations, flat_neighborhood,
217 mask, image_strides, compactness,
218 output.ravel(),
219 watershed_line)
220
221 output = crop(output, pad_width, copy=True)
222
223 return output
224
[end of skimage/segmentation/_watershed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/skimage/segmentation/_watershed.py b/skimage/segmentation/_watershed.py
--- a/skimage/segmentation/_watershed.py
+++ b/skimage/segmentation/_watershed.py
@@ -79,7 +79,7 @@
f'shape as `image` (shape {image.shape})')
raise ValueError(message)
return (image.astype(np.float64),
- markers.astype(np.int32),
+ markers,
mask.astype(np.int8))
|
{"golden_diff": "diff --git a/skimage/segmentation/_watershed.py b/skimage/segmentation/_watershed.py\n--- a/skimage/segmentation/_watershed.py\n+++ b/skimage/segmentation/_watershed.py\n@@ -79,7 +79,7 @@\n f'shape as `image` (shape {image.shape})')\n raise ValueError(message)\n return (image.astype(np.float64),\n- markers.astype(np.int32),\n+ markers,\n mask.astype(np.int8))\n", "issue": "`segmentation.watershed` returns wrong data type\n### Description:\n\nThe documentation of `segmentation.watershed` says that:\r\n\r\n> ### Returns\r\n> **out**: ndarray\r\n> A labeled matrix of the same type and shape as markers\r\n\r\n[[0.18.x]](https://scikit-image.org/docs/0.18.x/api/skimage.segmentation.html#skimage.segmentation.watershed) [[0.19.x]](https://scikit-image.org/docs/0.19.x/api/skimage.segmentation.html#skimage.segmentation.watershed)\r\n\r\nI have tested this with 0.18.1:\r\n\r\n```python\r\nimport skimage.segmentation\r\nimport numpy as np\r\nprint(skimage.segmentation.watershed(np.zeros((100, 100)), np.zeros((100, 100), np.uint16)).dtype)\r\n```\r\n\r\nGives `int32` but `uint16` is expected.\n\n### Way to reproduce:\n\n```Python\nimport skimage.segmentation\r\nimport numpy as np\r\nprint(skimage.segmentation.watershed(np.zeros((100, 100)), np.zeros((100, 100), np.uint16)).dtype)\n```\n\n\n### Traceback or output:\n\n```Shell\nint32\n```\n\n\n### Version information:\n\n```Shell\n3.8.5 (default, Sep 4 2020, 07:30:14) \r\n[GCC 7.3.0]\r\nLinux-5.8.0-36-generic-x86_64-with-glibc2.10\r\nscikit-image version: 0.18.1\r\nnumpy version: 1.20.3\n```\n\n", "before_files": [{"content": "\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed_cy\nfrom ..morphology.extrema import local_minima\nfrom ..morphology._util import (_validate_connectivity,\n _offsets_to_raveled_neighbors)\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask, connectivity):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n connectivity : int in {1, ..., image.ndim}\n The connectivity of the neighborhood of a pixel.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n n_pixels = image.size\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n else:\n mask = np.asanyarray(mask, dtype=bool)\n n_pixels = np.sum(mask)\n if mask.shape != image.shape:\n message = (f'`mask` (shape {mask.shape}) must have same shape '\n f'as `image` (shape {image.shape})')\n raise ValueError(message)\n if markers is None:\n markers_bool = local_minima(image, connectivity=connectivity) * mask\n footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)\n markers = ndi.label(markers_bool, structure=footprint)[0]\n elif not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n # given int, assume that number of markers *within mask*.\n markers = regular_seeds(image.shape,\n int(markers / (n_pixels / image.size)))\n markers *= mask\n else:\n markers = np.asanyarray(markers) * mask\n if markers.shape != image.shape:\n message = (f'`markers` (shape {markers.shape}) must have same '\n f'shape as `image` (shape {image.shape})')\n raise ValueError(message)\n return (image.astype(np.float64),\n markers.astype(np.int32),\n mask.astype(np.int8))\n\n\ndef watershed(image, markers=None, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image : ndarray (2-D, 3-D, ...)\n Data array where the lowest value points are labeled first.\n markers : int, or ndarray of int, same shape as `image`, optional\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker. If\n ``None`` (no markers given), the local minima of the image are used as\n markers.\n connectivity : ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset : array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask : ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n Note that the method used for adding this line expects that\n marker regions are not adjacent; the watershed line may not catch\n borders between adjacent marker regions.\n\n Returns\n -------\n out : ndarray\n A labeled matrix of the same type and shape as markers\n\n See Also\n --------\n skimage.segmentation.random_walker : random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`\n https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> max_coords = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)))\n >>> local_maxima = np.zeros_like(image, dtype=bool)\n >>> local_maxima[tuple(max_coords.T)] = True\n >>> markers = ndi.label(local_maxima)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask, connectivity)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _offsets_to_raveled_neighbors(\n image.shape, connectivity, center=offset)\n marker_locations = np.flatnonzero(output)\n image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize\n\n _watershed_cy.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n return output\n", "path": "skimage/segmentation/_watershed.py"}]}
| 3,689 | 114 |
gh_patches_debug_1365
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-2255
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"checkov --add-check" failing due to missing templates directory in setup.py
When running `checkov --add-check`, you get an error due to the templates not being installed properly
```
gitpod /workspace/checkov $ checkov --add-check
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: 2.0.744
What action would you like to take? (add) [add]:
Enter the title of your new check (without a .py) [MyNewTest]:
Select a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]:
Describe what this check does [Ensure that X does Y...]:
What kind of check would you like to add? (terraform) [terraform]:
Select the cloud provider this will run on (azure, aws, gcp) [aws]:
Select a terraform object for this check (data, provider, resource) [resource]:
Enter the terraform object type [aws_iam_policy]:
Please ensure you are at the root of the Checkov repository before completing this prompt
Traceback (most recent call last):
File "/home/gitpod/.pyenv/versions/3.8.12/bin/checkov", line 9, in <module>
sys.exit(run())
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/main.py", line 77, in run
check.action()
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 184, in action
getattr(self, self.chosen_action)()
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 189, in add
self.populate_templates()
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 196, in populate_templates
tf_unit_test_template = self.template_env().get_template("unittest-terraform.jinja2")
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 167, in template_env
print("jinja2.list_templates: %s" % jinja2.list_templates())
AttributeError: module 'jinja2' has no attribute 'list_templates'
gitpod /workspace/checkov $
```
The problem occurs on Mac, regardless of whether checkov is installed using `pip3 install checkov` or `brew install checkov`. I think it will probably occur in other environments as well.
The fix inside checkov's gitpod environment seems to be fairly simple - just copy the template files from the repo into the installed package:
```
gitpod /workspace/checkov $ ls /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/
__init__.py __pycache__
gitpod /workspace/checkov $ cp checkov/common/util/templates/*.jinja2 /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/
gitpod /workspace/checkov $ checkov --add-check
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: 2.0.744
What action would you like to take? (add) [add]:
Enter the title of your new check (without a .py) [MyNewTest]:
Select a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]:
Describe what this check does [Ensure that X does Y...]:
What kind of check would you like to add? (terraform) [terraform]:
Select the cloud provider this will run on (azure, aws, gcp) [aws]:
Select a terraform object for this check (data, provider, resource) [resource]:
Enter the terraform object type [aws_iam_policy]:
Please ensure you are at the root of the Checkov repository before completing this prompt
Creating Check MyNewTest.py in /workspace/checkov/checkov/terraform/checks/resource/aws
Successfully created /workspace/checkov/checkov/terraform/checks/resource/aws/MyNewTest.py
Creating Unit Test Stubs for MyNewTest in /workspace/checkov/tests/terraform/checks/resource/aws
Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/example_MyNewTest/MyNewTest.tf
Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/test_MyNewTest.py
Next steps:
1) Edit your new check located in the checks/ directory listed above
2) Add both a PASS and FAIL unit test to the newly created unit test under the tests/ directory to show others how to fix failures
gitpod /workspace/checkov $
```
I _think_ the problem is simply due to the template files being inadvertently left out of checkov's `setup.py`, and that adding them into the `package_dir` section as below should probably fix things:
```
package_dir={
"checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
"checkov.common.util.templates": "checkov.common.util.templates"
},
```
However there's a number of directories under `checkov.common` that maybe should also be added to checkov's `setup.py`, and I'm not familiar enough with all of checkov's use cases to know which ones are important
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2>=0.3.24",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3>=1.17",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "argcomplete",
57 "detect-secrets",
58 "policyuniverse",
59 "typing-extensions",
60 "cachetools",
61 "cyclonedx-python-lib>=0.11.0,<1.0.0",
62 "click>=8.0.0",
63 "aiohttp",
64 "aiodns",
65 "aiomultiprocess",
66 "jsonpath_ng",
67 "jsonschema~=3.0"
68 ],
69 license="Apache License 2.0",
70 name="checkov",
71 version=version,
72 python_requires=">=3.7",
73 description="Infrastructure as code static analysis",
74 author="bridgecrew",
75 author_email="[email protected]",
76 url="https://github.com/bridgecrewio/checkov",
77 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
78 include_package_data=True,
79 package_dir={
80 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks"
81 },
82 package_data={
83 "checkov.terraform.checks.graph_checks": [
84 "aws/*.yaml",
85 "gcp/*.yaml",
86 "azure/*.yaml",
87 ]
88 },
89 scripts=["bin/checkov", "bin/checkov.cmd"],
90 long_description=long_description,
91 long_description_content_type="text/markdown",
92 classifiers=[
93 "Environment :: Console",
94 "Intended Audience :: Developers",
95 "Intended Audience :: System Administrators",
96 "Programming Language :: Python :: 3.7",
97 "Programming Language :: Python :: 3.8",
98 "Programming Language :: Python :: 3.9",
99 "Programming Language :: Python :: 3.10",
100 "Topic :: Security",
101 "Topic :: Software Development :: Build Tools",
102 ],
103 )
104
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -84,6 +84,9 @@
"aws/*.yaml",
"gcp/*.yaml",
"azure/*.yaml",
+ ],
+ "checkov.common.util.templates": [
+ "*.jinja2"
]
},
scripts=["bin/checkov", "bin/checkov.cmd"],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -84,6 +84,9 @@\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n+ ],\n+ \"checkov.common.util.templates\": [\n+ \"*.jinja2\"\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n", "issue": "\"checkov --add-check\" failing due to missing templates directory in setup.py\nWhen running `checkov --add-check`, you get an error due to the templates not being installed properly\r\n\r\n```\r\ngitpod /workspace/checkov $ checkov --add-check\r\n\r\n _ _ \r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V / \r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/ \r\n \r\nBy bridgecrew.io | version: 2.0.744 \r\nWhat action would you like to take? (add) [add]: \r\n\r\nEnter the title of your new check (without a .py) [MyNewTest]: \r\n\r\nSelect a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]: \r\n\r\nDescribe what this check does [Ensure that X does Y...]: \r\n\r\nWhat kind of check would you like to add? (terraform) [terraform]: \r\n\r\nSelect the cloud provider this will run on (azure, aws, gcp) [aws]: \r\n\r\nSelect a terraform object for this check (data, provider, resource) [resource]: \r\n\r\nEnter the terraform object type [aws_iam_policy]: \r\n\r\nPlease ensure you are at the root of the Checkov repository before completing this prompt\r\nTraceback (most recent call last):\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/bin/checkov\", line 9, in <module>\r\n sys.exit(run())\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/main.py\", line 77, in run\r\n check.action()\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py\", line 184, in action\r\n getattr(self, self.chosen_action)()\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py\", line 189, in add\r\n self.populate_templates()\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py\", line 196, in populate_templates\r\n tf_unit_test_template = self.template_env().get_template(\"unittest-terraform.jinja2\")\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py\", line 167, in template_env\r\n print(\"jinja2.list_templates: %s\" % jinja2.list_templates())\r\nAttributeError: module 'jinja2' has no attribute 'list_templates'\r\ngitpod /workspace/checkov $ \r\n```\r\nThe problem occurs on Mac, regardless of whether checkov is installed using `pip3 install checkov` or `brew install checkov`. I think it will probably occur in other environments as well.\r\n\r\nThe fix inside checkov's gitpod environment seems to be fairly simple - just copy the template files from the repo into the installed package:\r\n```\r\ngitpod /workspace/checkov $ ls /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/\r\n__init__.py __pycache__\r\ngitpod /workspace/checkov $ cp checkov/common/util/templates/*.jinja2 /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/\r\ngitpod /workspace/checkov $ checkov --add-check\r\n _ _ \r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V / \r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/ \r\n \r\nBy bridgecrew.io | version: 2.0.744 \r\nWhat action would you like to take? (add) [add]: \r\n\r\nEnter the title of your new check (without a .py) [MyNewTest]: \r\n\r\nSelect a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]: \r\n\r\nDescribe what this check does [Ensure that X does Y...]: \r\n\r\nWhat kind of check would you like to add? (terraform) [terraform]: \r\n\r\nSelect the cloud provider this will run on (azure, aws, gcp) [aws]: \r\n\r\nSelect a terraform object for this check (data, provider, resource) [resource]: \r\n\r\nEnter the terraform object type [aws_iam_policy]: \r\n\r\nPlease ensure you are at the root of the Checkov repository before completing this prompt\r\nCreating Check MyNewTest.py in /workspace/checkov/checkov/terraform/checks/resource/aws\r\n Successfully created /workspace/checkov/checkov/terraform/checks/resource/aws/MyNewTest.py\r\nCreating Unit Test Stubs for MyNewTest in /workspace/checkov/tests/terraform/checks/resource/aws\r\n Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/example_MyNewTest/MyNewTest.tf\r\n Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/test_MyNewTest.py\r\n\r\nNext steps:\r\n 1) Edit your new check located in the checks/ directory listed above\r\n 2) Add both a PASS and FAIL unit test to the newly created unit test under the tests/ directory to show others how to fix failures\r\n\r\ngitpod /workspace/checkov $ \r\n```\r\n\r\nI _think_ the problem is simply due to the template files being inadvertently left out of checkov's `setup.py`, and that adding them into the `package_dir` section as below should probably fix things:\r\n```\r\n package_dir={\r\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\r\n \"checkov.common.util.templates\": \"checkov.common.util.templates\"\r\n },\r\n```\r\n\r\nHowever there's a number of directories under `checkov.common` that maybe should also be added to checkov's `setup.py`, and I'm not familiar enough with all of checkov's use cases to know which ones are important\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib>=0.11.0,<1.0.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py"}]}
| 2,876 | 88 |
gh_patches_debug_25534
|
rasdani/github-patches
|
git_diff
|
getredash__redash-3078
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
URL query runner: URL base path doesn't need to be a required field
</issue>
<code>
[start of redash/query_runner/url.py]
1 from redash.query_runner import BaseHTTPQueryRunner, register
2
3
4 class Url(BaseHTTPQueryRunner):
5
6 @classmethod
7 def annotate_query(cls):
8 return False
9
10 def test_connection(self):
11 pass
12
13 def run_query(self, query, user):
14 base_url = self.configuration.get("url", None)
15
16 try:
17 query = query.strip()
18
19 if base_url is not None and base_url != "":
20 if query.find("://") > -1:
21 return None, "Accepting only relative URLs to '%s'" % base_url
22
23 if base_url is None:
24 base_url = ""
25
26 url = base_url + query
27
28 response, error = self.get_response(url)
29 if error is not None:
30 return None, error
31
32 json_data = response.content.strip()
33
34 if json_data:
35 return json_data, None
36 else:
37 return None, "Got empty response from '{}'.".format(url)
38 except KeyboardInterrupt:
39 return None, "Query cancelled by user."
40
41
42 register(Url)
43
[end of redash/query_runner/url.py]
[start of redash/query_runner/__init__.py]
1 import logging
2 import requests
3
4 from redash import settings
5 from redash.utils import json_loads
6
7 logger = logging.getLogger(__name__)
8
9 __all__ = [
10 'BaseQueryRunner',
11 'BaseHTTPQueryRunner',
12 'InterruptException',
13 'BaseSQLQueryRunner',
14 'TYPE_DATETIME',
15 'TYPE_BOOLEAN',
16 'TYPE_INTEGER',
17 'TYPE_STRING',
18 'TYPE_DATE',
19 'TYPE_FLOAT',
20 'SUPPORTED_COLUMN_TYPES',
21 'register',
22 'get_query_runner',
23 'import_query_runners'
24 ]
25
26 # Valid types of columns returned in results:
27 TYPE_INTEGER = 'integer'
28 TYPE_FLOAT = 'float'
29 TYPE_BOOLEAN = 'boolean'
30 TYPE_STRING = 'string'
31 TYPE_DATETIME = 'datetime'
32 TYPE_DATE = 'date'
33
34 SUPPORTED_COLUMN_TYPES = set([
35 TYPE_INTEGER,
36 TYPE_FLOAT,
37 TYPE_BOOLEAN,
38 TYPE_STRING,
39 TYPE_DATETIME,
40 TYPE_DATE
41 ])
42
43
44 class InterruptException(Exception):
45 pass
46
47
48 class NotSupported(Exception):
49 pass
50
51
52 class BaseQueryRunner(object):
53 noop_query = None
54
55 def __init__(self, configuration):
56 self.syntax = 'sql'
57 self.configuration = configuration
58
59 @classmethod
60 def name(cls):
61 return cls.__name__
62
63 @classmethod
64 def type(cls):
65 return cls.__name__.lower()
66
67 @classmethod
68 def enabled(cls):
69 return True
70
71 @classmethod
72 def annotate_query(cls):
73 return True
74
75 @classmethod
76 def configuration_schema(cls):
77 return {}
78
79 def test_connection(self):
80 if self.noop_query is None:
81 raise NotImplementedError()
82 data, error = self.run_query(self.noop_query, None)
83
84 if error is not None:
85 raise Exception(error)
86
87 def run_query(self, query, user):
88 raise NotImplementedError()
89
90 def fetch_columns(self, columns):
91 column_names = []
92 duplicates_counter = 1
93 new_columns = []
94
95 for col in columns:
96 column_name = col[0]
97 if column_name in column_names:
98 column_name = "{}{}".format(column_name, duplicates_counter)
99 duplicates_counter += 1
100
101 column_names.append(column_name)
102 new_columns.append({'name': column_name,
103 'friendly_name': column_name,
104 'type': col[1]})
105
106 return new_columns
107
108 def get_schema(self, get_stats=False):
109 raise NotSupported()
110
111 def _run_query_internal(self, query):
112 results, error = self.run_query(query, None)
113
114 if error is not None:
115 raise Exception("Failed running query [%s]." % query)
116 return json_loads(results)['rows']
117
118 @classmethod
119 def to_dict(cls):
120 return {
121 'name': cls.name(),
122 'type': cls.type(),
123 'configuration_schema': cls.configuration_schema()
124 }
125
126
127 class BaseSQLQueryRunner(BaseQueryRunner):
128
129 def get_schema(self, get_stats=False):
130 schema_dict = {}
131 self._get_tables(schema_dict)
132 if settings.SCHEMA_RUN_TABLE_SIZE_CALCULATIONS and get_stats:
133 self._get_tables_stats(schema_dict)
134 return schema_dict.values()
135
136 def _get_tables(self, schema_dict):
137 return []
138
139 def _get_tables_stats(self, tables_dict):
140 for t in tables_dict.keys():
141 if type(tables_dict[t]) == dict:
142 res = self._run_query_internal('select count(*) as cnt from %s' % t)
143 tables_dict[t]['size'] = res[0]['cnt']
144
145
146 class BaseHTTPQueryRunner(BaseQueryRunner):
147 response_error = "Endpoint returned unexpected status code"
148 requires_authentication = False
149 url_title = 'URL base path'
150 username_title = 'HTTP Basic Auth Username'
151 password_title = 'HTTP Basic Auth Password'
152
153 @classmethod
154 def configuration_schema(cls):
155 schema = {
156 'type': 'object',
157 'properties': {
158 'url': {
159 'type': 'string',
160 'title': cls.url_title,
161 },
162 'username': {
163 'type': 'string',
164 'title': cls.username_title,
165 },
166 'password': {
167 'type': 'string',
168 'title': cls.password_title,
169 },
170 },
171 'required': ['url'],
172 'secret': ['password']
173 }
174 if cls.requires_authentication:
175 schema['required'] += ['username', 'password']
176 return schema
177
178 def get_auth(self):
179 username = self.configuration.get('username')
180 password = self.configuration.get('password')
181 if username and password:
182 return (username, password)
183 if self.requires_authentication:
184 raise ValueError("Username and Password required")
185 else:
186 return None
187
188 def get_response(self, url, auth=None, **kwargs):
189 # Get authentication values if not given
190 if auth is None:
191 auth = self.get_auth()
192
193 # Then call requests to get the response from the given endpoint
194 # URL optionally, with the additional requests parameters.
195 error = None
196 response = None
197 try:
198 response = requests.get(url, auth=auth, **kwargs)
199 # Raise a requests HTTP exception with the appropriate reason
200 # for 4xx and 5xx response status codes which is later caught
201 # and passed back.
202 response.raise_for_status()
203
204 # Any other responses (e.g. 2xx and 3xx):
205 if response.status_code != 200:
206 error = '{} ({}).'.format(
207 self.response_error,
208 response.status_code,
209 )
210
211 except requests.HTTPError as exc:
212 logger.exception(exc)
213 error = (
214 "Failed to execute query. "
215 "Return Code: {} Reason: {}".format(
216 response.status_code,
217 response.text
218 )
219 )
220 except requests.RequestException as exc:
221 # Catch all other requests exceptions and return the error.
222 logger.exception(exc)
223 error = str(exc)
224
225 # Return response and error.
226 return response, error
227
228
229 query_runners = {}
230
231
232 def register(query_runner_class):
233 global query_runners
234 if query_runner_class.enabled():
235 logger.debug("Registering %s (%s) query runner.", query_runner_class.name(), query_runner_class.type())
236 query_runners[query_runner_class.type()] = query_runner_class
237 else:
238 logger.debug("%s query runner enabled but not supported, not registering. Either disable or install missing "
239 "dependencies.", query_runner_class.name())
240
241
242 def get_query_runner(query_runner_type, configuration):
243 query_runner_class = query_runners.get(query_runner_type, None)
244 if query_runner_class is None:
245 return None
246
247 return query_runner_class(configuration)
248
249
250 def get_configuration_schema_for_query_runner_type(query_runner_type):
251 query_runner_class = query_runners.get(query_runner_type, None)
252 if query_runner_class is None:
253 return None
254
255 return query_runner_class.configuration_schema()
256
257
258 def import_query_runners(query_runner_imports):
259 for runner_import in query_runner_imports:
260 __import__(runner_import)
261
[end of redash/query_runner/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py
--- a/redash/query_runner/__init__.py
+++ b/redash/query_runner/__init__.py
@@ -146,6 +146,7 @@
class BaseHTTPQueryRunner(BaseQueryRunner):
response_error = "Endpoint returned unexpected status code"
requires_authentication = False
+ requires_url = True
url_title = 'URL base path'
username_title = 'HTTP Basic Auth Username'
password_title = 'HTTP Basic Auth Password'
@@ -168,9 +169,15 @@
'title': cls.password_title,
},
},
- 'required': ['url'],
'secret': ['password']
}
+
+ if cls.requires_url or cls.requires_authentication:
+ schema['required'] = []
+
+ if cls.requires_url:
+ schema['required'] += ['url']
+
if cls.requires_authentication:
schema['required'] += ['username', 'password']
return schema
diff --git a/redash/query_runner/url.py b/redash/query_runner/url.py
--- a/redash/query_runner/url.py
+++ b/redash/query_runner/url.py
@@ -2,6 +2,7 @@
class Url(BaseHTTPQueryRunner):
+ requires_url = False
@classmethod
def annotate_query(cls):
|
{"golden_diff": "diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py\n--- a/redash/query_runner/__init__.py\n+++ b/redash/query_runner/__init__.py\n@@ -146,6 +146,7 @@\n class BaseHTTPQueryRunner(BaseQueryRunner):\n response_error = \"Endpoint returned unexpected status code\"\n requires_authentication = False\n+ requires_url = True\n url_title = 'URL base path'\n username_title = 'HTTP Basic Auth Username'\n password_title = 'HTTP Basic Auth Password'\n@@ -168,9 +169,15 @@\n 'title': cls.password_title,\n },\n },\n- 'required': ['url'],\n 'secret': ['password']\n }\n+\n+ if cls.requires_url or cls.requires_authentication:\n+ schema['required'] = []\n+\n+ if cls.requires_url:\n+ schema['required'] += ['url']\n+\n if cls.requires_authentication:\n schema['required'] += ['username', 'password']\n return schema\ndiff --git a/redash/query_runner/url.py b/redash/query_runner/url.py\n--- a/redash/query_runner/url.py\n+++ b/redash/query_runner/url.py\n@@ -2,6 +2,7 @@\n \n \n class Url(BaseHTTPQueryRunner):\n+ requires_url = False\n \n @classmethod\n def annotate_query(cls):\n", "issue": "URL query runner: URL base path doesn't need to be a required field\n\n", "before_files": [{"content": "from redash.query_runner import BaseHTTPQueryRunner, register\n\n\nclass Url(BaseHTTPQueryRunner):\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def test_connection(self):\n pass\n\n def run_query(self, query, user):\n base_url = self.configuration.get(\"url\", None)\n\n try:\n query = query.strip()\n\n if base_url is not None and base_url != \"\":\n if query.find(\"://\") > -1:\n return None, \"Accepting only relative URLs to '%s'\" % base_url\n\n if base_url is None:\n base_url = \"\"\n\n url = base_url + query\n\n response, error = self.get_response(url)\n if error is not None:\n return None, error\n\n json_data = response.content.strip()\n\n if json_data:\n return json_data, None\n else:\n return None, \"Got empty response from '{}'.\".format(url)\n except KeyboardInterrupt:\n return None, \"Query cancelled by user.\"\n\n\nregister(Url)\n", "path": "redash/query_runner/url.py"}, {"content": "import logging\nimport requests\n\nfrom redash import settings\nfrom redash.utils import json_loads\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\n 'BaseQueryRunner',\n 'BaseHTTPQueryRunner',\n 'InterruptException',\n 'BaseSQLQueryRunner',\n 'TYPE_DATETIME',\n 'TYPE_BOOLEAN',\n 'TYPE_INTEGER',\n 'TYPE_STRING',\n 'TYPE_DATE',\n 'TYPE_FLOAT',\n 'SUPPORTED_COLUMN_TYPES',\n 'register',\n 'get_query_runner',\n 'import_query_runners'\n]\n\n# Valid types of columns returned in results:\nTYPE_INTEGER = 'integer'\nTYPE_FLOAT = 'float'\nTYPE_BOOLEAN = 'boolean'\nTYPE_STRING = 'string'\nTYPE_DATETIME = 'datetime'\nTYPE_DATE = 'date'\n\nSUPPORTED_COLUMN_TYPES = set([\n TYPE_INTEGER,\n TYPE_FLOAT,\n TYPE_BOOLEAN,\n TYPE_STRING,\n TYPE_DATETIME,\n TYPE_DATE\n])\n\n\nclass InterruptException(Exception):\n pass\n\n\nclass NotSupported(Exception):\n pass\n\n\nclass BaseQueryRunner(object):\n noop_query = None\n\n def __init__(self, configuration):\n self.syntax = 'sql'\n self.configuration = configuration\n\n @classmethod\n def name(cls):\n return cls.__name__\n\n @classmethod\n def type(cls):\n return cls.__name__.lower()\n\n @classmethod\n def enabled(cls):\n return True\n\n @classmethod\n def annotate_query(cls):\n return True\n\n @classmethod\n def configuration_schema(cls):\n return {}\n\n def test_connection(self):\n if self.noop_query is None:\n raise NotImplementedError()\n data, error = self.run_query(self.noop_query, None)\n\n if error is not None:\n raise Exception(error)\n\n def run_query(self, query, user):\n raise NotImplementedError()\n\n def fetch_columns(self, columns):\n column_names = []\n duplicates_counter = 1\n new_columns = []\n\n for col in columns:\n column_name = col[0]\n if column_name in column_names:\n column_name = \"{}{}\".format(column_name, duplicates_counter)\n duplicates_counter += 1\n\n column_names.append(column_name)\n new_columns.append({'name': column_name,\n 'friendly_name': column_name,\n 'type': col[1]})\n\n return new_columns\n\n def get_schema(self, get_stats=False):\n raise NotSupported()\n\n def _run_query_internal(self, query):\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed running query [%s].\" % query)\n return json_loads(results)['rows']\n\n @classmethod\n def to_dict(cls):\n return {\n 'name': cls.name(),\n 'type': cls.type(),\n 'configuration_schema': cls.configuration_schema()\n }\n\n\nclass BaseSQLQueryRunner(BaseQueryRunner):\n\n def get_schema(self, get_stats=False):\n schema_dict = {}\n self._get_tables(schema_dict)\n if settings.SCHEMA_RUN_TABLE_SIZE_CALCULATIONS and get_stats:\n self._get_tables_stats(schema_dict)\n return schema_dict.values()\n\n def _get_tables(self, schema_dict):\n return []\n\n def _get_tables_stats(self, tables_dict):\n for t in tables_dict.keys():\n if type(tables_dict[t]) == dict:\n res = self._run_query_internal('select count(*) as cnt from %s' % t)\n tables_dict[t]['size'] = res[0]['cnt']\n\n\nclass BaseHTTPQueryRunner(BaseQueryRunner):\n response_error = \"Endpoint returned unexpected status code\"\n requires_authentication = False\n url_title = 'URL base path'\n username_title = 'HTTP Basic Auth Username'\n password_title = 'HTTP Basic Auth Password'\n\n @classmethod\n def configuration_schema(cls):\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {\n 'type': 'string',\n 'title': cls.url_title,\n },\n 'username': {\n 'type': 'string',\n 'title': cls.username_title,\n },\n 'password': {\n 'type': 'string',\n 'title': cls.password_title,\n },\n },\n 'required': ['url'],\n 'secret': ['password']\n }\n if cls.requires_authentication:\n schema['required'] += ['username', 'password']\n return schema\n\n def get_auth(self):\n username = self.configuration.get('username')\n password = self.configuration.get('password')\n if username and password:\n return (username, password)\n if self.requires_authentication:\n raise ValueError(\"Username and Password required\")\n else:\n return None\n\n def get_response(self, url, auth=None, **kwargs):\n # Get authentication values if not given\n if auth is None:\n auth = self.get_auth()\n\n # Then call requests to get the response from the given endpoint\n # URL optionally, with the additional requests parameters.\n error = None\n response = None\n try:\n response = requests.get(url, auth=auth, **kwargs)\n # Raise a requests HTTP exception with the appropriate reason\n # for 4xx and 5xx response status codes which is later caught\n # and passed back.\n response.raise_for_status()\n\n # Any other responses (e.g. 2xx and 3xx):\n if response.status_code != 200:\n error = '{} ({}).'.format(\n self.response_error,\n response.status_code,\n )\n\n except requests.HTTPError as exc:\n logger.exception(exc)\n error = (\n \"Failed to execute query. \"\n \"Return Code: {} Reason: {}\".format(\n response.status_code,\n response.text\n )\n )\n except requests.RequestException as exc:\n # Catch all other requests exceptions and return the error.\n logger.exception(exc)\n error = str(exc)\n\n # Return response and error.\n return response, error\n\n\nquery_runners = {}\n\n\ndef register(query_runner_class):\n global query_runners\n if query_runner_class.enabled():\n logger.debug(\"Registering %s (%s) query runner.\", query_runner_class.name(), query_runner_class.type())\n query_runners[query_runner_class.type()] = query_runner_class\n else:\n logger.debug(\"%s query runner enabled but not supported, not registering. Either disable or install missing \"\n \"dependencies.\", query_runner_class.name())\n\n\ndef get_query_runner(query_runner_type, configuration):\n query_runner_class = query_runners.get(query_runner_type, None)\n if query_runner_class is None:\n return None\n\n return query_runner_class(configuration)\n\n\ndef get_configuration_schema_for_query_runner_type(query_runner_type):\n query_runner_class = query_runners.get(query_runner_type, None)\n if query_runner_class is None:\n return None\n\n return query_runner_class.configuration_schema()\n\n\ndef import_query_runners(query_runner_imports):\n for runner_import in query_runner_imports:\n __import__(runner_import)\n", "path": "redash/query_runner/__init__.py"}]}
| 3,066 | 296 |
gh_patches_debug_15470
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-567
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build nightly from tf-nightly
Currently we build against `tf-nightly-2.0-preview`. Now that TF2 is released we should switch to `tf-nightly` once we confirm that the switch has been made.
</issue>
<code>
[start of setup.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons.
16
17 TensorFlow Addons is a repository of contributions that conform to well-
18 established API patterns, but implement new functionality not available
19 in core TensorFlow. TensorFlow natively supports a large number of
20 operators, layers, metrics, losses, and optimizers. However, in a fast
21 moving field like ML, there are many interesting new developments that
22 cannot be integrated into core TensorFlow (because their broad
23 applicability is not yet clear, or it is mostly used by a smaller subset
24 of the community).
25 """
26
27 from __future__ import absolute_import
28 from __future__ import division
29 from __future__ import print_function
30
31 import os
32 import platform
33 import sys
34
35 from datetime import datetime
36 from setuptools import find_packages
37 from setuptools import setup
38 from setuptools.dist import Distribution
39 from setuptools import Extension
40
41 DOCLINES = __doc__.split('\n')
42
43 TFA_NIGHTLY = 'tfa-nightly'
44 TFA_RELEASE = 'tensorflow-addons'
45
46 if '--nightly' in sys.argv:
47 project_name = TFA_NIGHTLY
48 nightly_idx = sys.argv.index('--nightly')
49 sys.argv.pop(nightly_idx)
50 else:
51 project_name = TFA_RELEASE
52
53 # Version
54 version = {}
55 base_dir = os.path.dirname(os.path.abspath(__file__))
56 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
57 # yapf: disable
58 exec(fp.read(), version)
59 # yapf: enable
60
61 if project_name == TFA_NIGHTLY:
62 version['__version__'] += datetime.strftime(datetime.today(), "%Y%m%d")
63
64 # Dependencies
65 REQUIRED_PACKAGES = [
66 'six >= 1.10.0',
67 ]
68
69 if project_name == TFA_RELEASE:
70 # TODO: remove if-else condition when tf supports package consolidation.
71 if platform.system() == 'Linux':
72 REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')
73 else:
74 REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')
75 elif project_name == TFA_NIGHTLY:
76 # TODO: remove if-else condition when tf-nightly supports package consolidation.
77 if platform.system() == 'Linux':
78 REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')
79 else:
80 REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')
81
82
83 class BinaryDistribution(Distribution):
84 """This class is needed in order to create OS specific wheels."""
85
86 def has_ext_modules(self):
87 return True
88
89
90 setup(
91 name=project_name,
92 version=version['__version__'],
93 description=DOCLINES[0],
94 long_description='\n'.join(DOCLINES[2:]),
95 author='Google Inc.',
96 author_email='[email protected]',
97 packages=find_packages(),
98 ext_modules=[Extension('_foo', ['stub.cc'])],
99 install_requires=REQUIRED_PACKAGES,
100 include_package_data=True,
101 zip_safe=False,
102 distclass=BinaryDistribution,
103 classifiers=[
104 'Development Status :: 4 - Beta',
105 'Intended Audience :: Developers',
106 'Intended Audience :: Education',
107 'Intended Audience :: Science/Research',
108 'License :: OSI Approved :: Apache Software License',
109 'Programming Language :: Python :: 2.7',
110 'Programming Language :: Python :: 3.5',
111 'Programming Language :: Python :: 3.6',
112 'Programming Language :: Python :: 3.7',
113 'Topic :: Scientific/Engineering :: Mathematics',
114 'Topic :: Software Development :: Libraries :: Python Modules',
115 'Topic :: Software Development :: Libraries',
116 ],
117 license='Apache 2.0',
118 keywords='tensorflow addons machine learning',
119 )
120
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,15 +69,11 @@
if project_name == TFA_RELEASE:
# TODO: remove if-else condition when tf supports package consolidation.
if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')
+ REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')
else:
- REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')
+ REQUIRED_PACKAGES.append('tensorflow == 2.0.0')
elif project_name == TFA_NIGHTLY:
- # TODO: remove if-else condition when tf-nightly supports package consolidation.
- if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')
- else:
- REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')
+ REQUIRED_PACKAGES.append('tf-nightly')
class BinaryDistribution(Distribution):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,15 +69,11 @@\n if project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')\n+ REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')\n else:\n- REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')\n+ REQUIRED_PACKAGES.append('tensorflow == 2.0.0')\n elif project_name == TFA_NIGHTLY:\n- # TODO: remove if-else condition when tf-nightly supports package consolidation.\n- if platform.system() == 'Linux':\n- REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')\n- else:\n- REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')\n+ REQUIRED_PACKAGES.append('tf-nightly')\n \n \n class BinaryDistribution(Distribution):\n", "issue": "Build nightly from tf-nightly\nCurrently we build against `tf-nightly-2.0-preview`. Now that TF2 is released we should switch to `tf-nightly` once we confirm that the switch has been made.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport platform\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split('\\n')\n\nTFA_NIGHTLY = 'tfa-nightly'\nTFA_RELEASE = 'tensorflow-addons'\n\nif '--nightly' in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index('--nightly')\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nif project_name == TFA_NIGHTLY:\n version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\n# Dependencies\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nif project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')\n else:\n REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')\nelif project_name == TFA_NIGHTLY:\n # TODO: remove if-else condition when tf-nightly supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')\n else:\n REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n ext_modules=[Extension('_foo', ['stub.cc'])],\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}]}
| 1,756 | 237 |
gh_patches_debug_16380
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-701
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Noxfile is not updated with Sphinx version pin
`noxfile.py` is not updated with Sphinx version pin, please help take a look!
</issue>
<code>
[start of noxfile.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import pathlib
18 import os
19 import shutil
20
21 import nox
22
23
24 PYTYPE_VERSION = "pytype==2021.4.9"
25 BLACK_VERSION = "black==19.10b0"
26 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
27
28 DEFAULT_PYTHON_VERSION = "3.8"
29 SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
30 UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
31 CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
32
33 # 'docfx' is excluded since it only needs to run in 'docs-presubmit'
34 nox.options.sessions = [
35 "unit_noextras",
36 "unit",
37 "system",
38 "snippets",
39 "cover",
40 "lint",
41 "lint_setup_py",
42 "blacken",
43 "pytype",
44 "docs",
45 ]
46
47
48 def default(session, install_extras=True):
49 """Default unit test session.
50
51 This is intended to be run **without** an interpreter set, so
52 that the current ``python`` (on the ``PATH``) or the version of
53 Python corresponding to the ``nox`` binary the ``PATH`` can
54 run the tests.
55 """
56 constraints_path = str(
57 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
58 )
59
60 # Install all test dependencies, then install local packages in-place.
61 session.install(
62 "mock",
63 "pytest",
64 "google-cloud-testutils",
65 "pytest-cov",
66 "freezegun",
67 "-c",
68 constraints_path,
69 )
70
71 install_target = ".[all]" if install_extras else "."
72 session.install("-e", install_target, "-c", constraints_path)
73
74 session.install("ipython", "-c", constraints_path)
75
76 # Run py.test against the unit tests.
77 session.run(
78 "py.test",
79 "--quiet",
80 "--cov=google/cloud/bigquery",
81 "--cov=tests/unit",
82 "--cov-append",
83 "--cov-config=.coveragerc",
84 "--cov-report=",
85 "--cov-fail-under=0",
86 os.path.join("tests", "unit"),
87 *session.posargs,
88 )
89
90
91 @nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
92 def unit(session):
93 """Run the unit test suite."""
94 default(session)
95
96
97 @nox.session(python=UNIT_TEST_PYTHON_VERSIONS[-1])
98 def unit_noextras(session):
99 """Run the unit test suite."""
100 default(session, install_extras=False)
101
102
103 @nox.session(python=DEFAULT_PYTHON_VERSION)
104 def pytype(session):
105 """Run type checks."""
106 # An indirect dependecy attrs==21.1.0 breaks the check, and installing a less
107 # recent version avoids the error until a possibly better fix is found.
108 # https://github.com/googleapis/python-bigquery/issues/655
109 session.install("attrs==20.3.0")
110 session.install("-e", ".[all]")
111 session.install("ipython")
112 session.install(PYTYPE_VERSION)
113 session.run("pytype")
114
115
116 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
117 def system(session):
118 """Run the system test suite."""
119
120 constraints_path = str(
121 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
122 )
123
124 # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
125 if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
126 session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
127
128 # Sanity check: Only run system tests if the environment variable is set.
129 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
130 session.skip("Credentials must be set via environment variable.")
131
132 # Use pre-release gRPC for system tests.
133 session.install("--pre", "grpcio", "-c", constraints_path)
134
135 # Install all test dependencies, then install local packages in place.
136 session.install(
137 "mock", "pytest", "psutil", "google-cloud-testutils", "-c", constraints_path
138 )
139 if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") == "true":
140 # mTLS test requires pyopenssl and latest google-cloud-storage
141 session.install("google-cloud-storage", "pyopenssl")
142 else:
143 session.install("google-cloud-storage", "-c", constraints_path)
144
145 session.install("-e", ".[all]", "-c", constraints_path)
146 session.install("ipython", "-c", constraints_path)
147
148 # Run py.test against the system tests.
149 session.run("py.test", "--quiet", os.path.join("tests", "system"), *session.posargs)
150
151
152 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
153 def snippets(session):
154 """Run the snippets test suite."""
155
156 # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.
157 if os.environ.get("RUN_SNIPPETS_TESTS", "true") == "false":
158 session.skip("RUN_SNIPPETS_TESTS is set to false, skipping")
159
160 # Sanity check: Only run snippets tests if the environment variable is set.
161 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
162 session.skip("Credentials must be set via environment variable.")
163
164 constraints_path = str(
165 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
166 )
167
168 # Install all test dependencies, then install local packages in place.
169 session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
170 session.install("google-cloud-storage", "-c", constraints_path)
171 session.install("grpcio", "-c", constraints_path)
172
173 session.install("-e", ".[all]", "-c", constraints_path)
174
175 # Run py.test against the snippets tests.
176 # Skip tests in samples/snippets, as those are run in a different session
177 # using the nox config from that directory.
178 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
179 session.run(
180 "py.test",
181 "samples",
182 "--ignore=samples/snippets",
183 "--ignore=samples/geography",
184 *session.posargs,
185 )
186
187
188 @nox.session(python=DEFAULT_PYTHON_VERSION)
189 def cover(session):
190 """Run the final coverage report.
191
192 This outputs the coverage report aggregating coverage from the unit
193 test runs (not system test runs), and then erases coverage data.
194 """
195 session.install("coverage", "pytest-cov")
196 session.run("coverage", "report", "--show-missing", "--fail-under=100")
197 session.run("coverage", "erase")
198
199
200 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
201 def prerelease_deps(session):
202 """Run all tests with prerelease versions of dependencies installed.
203
204 https://github.com/googleapis/python-bigquery/issues/95
205 """
206 # PyArrow prerelease packages are published to an alternative PyPI host.
207 # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages
208 session.install(
209 "--extra-index-url", "https://pypi.fury.io/arrow-nightlies/", "--pre", "pyarrow"
210 )
211 session.install("--pre", "grpcio", "pandas")
212 session.install(
213 "freezegun",
214 "google-cloud-storage",
215 "google-cloud-testutils",
216 "IPython",
217 "mock",
218 "psutil",
219 "pytest",
220 "pytest-cov",
221 )
222 session.install("-e", ".[all]")
223
224 # Print out prerelease package versions.
225 session.run("python", "-c", "import grpc; print(grpc.__version__)")
226 session.run("python", "-c", "import pandas; print(pandas.__version__)")
227 session.run("python", "-c", "import pyarrow; print(pyarrow.__version__)")
228
229 # Run all tests, except a few samples tests which require extra dependencies.
230 session.run("py.test", "tests/unit")
231 session.run("py.test", "tests/system")
232 session.run("py.test", "samples/tests")
233
234
235 @nox.session(python=DEFAULT_PYTHON_VERSION)
236 def lint(session):
237 """Run linters.
238
239 Returns a failure if the linters find linting errors or sufficiently
240 serious code quality issues.
241 """
242
243 session.install("flake8", BLACK_VERSION)
244 session.install("-e", ".")
245 session.run("flake8", os.path.join("google", "cloud", "bigquery"))
246 session.run("flake8", "tests")
247 session.run("flake8", os.path.join("docs", "samples"))
248 session.run("flake8", os.path.join("docs", "snippets.py"))
249 session.run("black", "--check", *BLACK_PATHS)
250
251
252 @nox.session(python=DEFAULT_PYTHON_VERSION)
253 def lint_setup_py(session):
254 """Verify that setup.py is valid (including RST check)."""
255
256 session.install("docutils", "Pygments")
257 session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
258
259
260 @nox.session(python=DEFAULT_PYTHON_VERSION)
261 def blacken(session):
262 """Run black.
263 Format code to uniform standard.
264 """
265
266 session.install(BLACK_VERSION)
267 session.run("black", *BLACK_PATHS)
268
269
270 @nox.session(python=DEFAULT_PYTHON_VERSION)
271 def docs(session):
272 """Build the docs."""
273
274 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
275 session.install("google-cloud-storage")
276 session.install("-e", ".[all]")
277
278 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
279 session.run(
280 "sphinx-build",
281 "-W", # warnings as errors
282 "-T", # show full traceback on exception
283 "-N", # no colors
284 "-b",
285 "html",
286 "-d",
287 os.path.join("docs", "_build", "doctrees", ""),
288 os.path.join("docs", ""),
289 os.path.join("docs", "_build", "html", ""),
290 )
291
292
293 @nox.session(python=DEFAULT_PYTHON_VERSION)
294 def docfx(session):
295 """Build the docfx yaml files for this library."""
296
297 session.install("-e", ".")
298 session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml")
299
300 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
301 session.run(
302 "sphinx-build",
303 "-T", # show full traceback on exception
304 "-N", # no colors
305 "-D",
306 (
307 "extensions=sphinx.ext.autodoc,"
308 "sphinx.ext.autosummary,"
309 "docfx_yaml.extension,"
310 "sphinx.ext.intersphinx,"
311 "sphinx.ext.coverage,"
312 "sphinx.ext.napoleon,"
313 "sphinx.ext.todo,"
314 "sphinx.ext.viewcode,"
315 "recommonmark"
316 ),
317 "-b",
318 "html",
319 "-d",
320 os.path.join("docs", "_build", "doctrees", ""),
321 os.path.join("docs", ""),
322 os.path.join("docs", "_build", "html", ""),
323 )
324
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -271,7 +271,7 @@
def docs(session):
"""Build the docs."""
- session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
+ session.install("ipython", "recommonmark", "sphinx==4.0.1", "sphinx_rtd_theme")
session.install("google-cloud-storage")
session.install("-e", ".[all]")
@@ -295,7 +295,9 @@
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
- session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml")
+ session.install(
+ "sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
+ )
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -271,7 +271,7 @@\n def docs(session):\n \"\"\"Build the docs.\"\"\"\n \n- session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n+ session.install(\"ipython\", \"recommonmark\", \"sphinx==4.0.1\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n \n@@ -295,7 +295,9 @@\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n \n session.install(\"-e\", \".\")\n- session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n+ session.install(\n+ \"sphinx==4.0.1\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\"\n+ )\n \n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n", "issue": "Noxfile is not updated with Sphinx version pin\n`noxfile.py` is not updated with Sphinx version pin, please help take a look!\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nPYTYPE_VERSION = \"pytype==2021.4.9\"\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"pytype\",\n \"docs\",\n]\n\n\ndef default(session, install_extras=True):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n install_target = \".[all]\" if install_extras else \".\"\n session.install(\"-e\", install_target, \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google/cloud/bigquery\",\n \"--cov=tests/unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\ndef unit_noextras(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session, install_extras=False)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef pytype(session):\n \"\"\"Run type checks.\"\"\"\n # An indirect dependecy attrs==21.1.0 breaks the check, and installing a less\n # recent version avoids the error until a possibly better fix is found.\n # https://github.com/googleapis/python-bigquery/issues/655\n session.install(\"attrs==20.3.0\")\n session.install(\"-e\", \".[all]\")\n session.install(\"ipython\")\n session.install(PYTYPE_VERSION)\n session.run(\"pytype\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n \"\"\"\n\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]}
| 4,042 | 250 |
gh_patches_debug_34765
|
rasdani/github-patches
|
git_diff
|
crytic__slither-1909
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] contract reports ether as locked when ether is sent in Yul
The following contract reports ether as locked despite it being sent in a Yul block
```
contract FPLockedEther {
receive() payable external {}
function yulSendEther() external {
bool success;
assembly {
success := call(gas(), caller(), balance(address()), 0,0,0,0)
}
}
}
```
```
Contract locking ether found:
Contract FPLockedEther (locked-ether.sol#1-13) has payable functions:
- FPLockedEther.receive() (locked-ether.sol#2-3)
But does not have a function to withdraw the ether
Reference: https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether
```
It could be that the IR is incorrect here as it should not be a `SOLIDITY_CALL`
```
Contract FPLockedEther
Function FPLockedEther.receive() (*)
Function FPLockedEther.yulSendEther() (*)
Expression: success = call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(gas()(),caller()(),balance(uint256)(address()()),0,0,0,0)
IRs:
TMP_0(uint256) = SOLIDITY_CALL gas()()
TMP_1(address) := msg.sender(address)
TMP_2 = CONVERT this to address
TMP_3(uint256) = SOLIDITY_CALL balance(uint256)(TMP_2)
TMP_4(uint256) = SOLIDITY_CALL call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(TMP_0,TMP_1,TMP_3,0,0,0,0)
success(bool) := TMP_4(uint256)
```
</issue>
<code>
[start of slither/detectors/attributes/locked_ether.py]
1 """
2 Check if ethers are locked in the contract
3 """
4 from typing import List
5
6 from slither.core.declarations.contract import Contract
7 from slither.detectors.abstract_detector import (
8 AbstractDetector,
9 DetectorClassification,
10 DETECTOR_INFO,
11 )
12 from slither.slithir.operations import (
13 HighLevelCall,
14 LowLevelCall,
15 Send,
16 Transfer,
17 NewContract,
18 LibraryCall,
19 InternalCall,
20 )
21 from slither.utils.output import Output
22
23
24 class LockedEther(AbstractDetector): # pylint: disable=too-many-nested-blocks
25
26 ARGUMENT = "locked-ether"
27 HELP = "Contracts that lock ether"
28 IMPACT = DetectorClassification.MEDIUM
29 CONFIDENCE = DetectorClassification.HIGH
30
31 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether"
32
33 WIKI_TITLE = "Contracts that lock Ether"
34 WIKI_DESCRIPTION = "Contract with a `payable` function, but without a withdrawal capacity."
35
36 # region wiki_exploit_scenario
37 WIKI_EXPLOIT_SCENARIO = """
38 ```solidity
39 pragma solidity 0.4.24;
40 contract Locked{
41 function receive() payable public{
42 }
43 }
44 ```
45 Every Ether sent to `Locked` will be lost."""
46 # endregion wiki_exploit_scenario
47
48 WIKI_RECOMMENDATION = "Remove the payable attribute or add a withdraw function."
49
50 @staticmethod
51 def do_no_send_ether(contract: Contract) -> bool:
52 functions = contract.all_functions_called
53 to_explore = functions
54 explored = []
55 while to_explore: # pylint: disable=too-many-nested-blocks
56 functions = to_explore
57 explored += to_explore
58 to_explore = []
59 for function in functions:
60 calls = [c.name for c in function.internal_calls]
61 if "suicide(address)" in calls or "selfdestruct(address)" in calls:
62 return False
63 for node in function.nodes:
64 for ir in node.irs:
65 if isinstance(
66 ir,
67 (Send, Transfer, HighLevelCall, LowLevelCall, NewContract),
68 ):
69 if ir.call_value and ir.call_value != 0:
70 return False
71 if isinstance(ir, (LowLevelCall)):
72 if ir.function_name in ["delegatecall", "callcode"]:
73 return False
74 # If a new internal call or librarycall
75 # Add it to the list to explore
76 # InternalCall if to follow internal call in libraries
77 if isinstance(ir, (InternalCall, LibraryCall)):
78 if not ir.function in explored:
79 to_explore.append(ir.function)
80
81 return True
82
83 def _detect(self) -> List[Output]:
84 results = []
85
86 for contract in self.compilation_unit.contracts_derived:
87 if contract.is_signature_only():
88 continue
89 funcs_payable = [function for function in contract.functions if function.payable]
90 if funcs_payable:
91 if self.do_no_send_ether(contract):
92 info: DETECTOR_INFO = ["Contract locking ether found:\n"]
93 info += ["\tContract ", contract, " has payable functions:\n"]
94 for function in funcs_payable:
95 info += ["\t - ", function, "\n"]
96 info += "\tBut does not have a function to withdraw the ether\n"
97
98 json = self.generate_result(info)
99
100 results.append(json)
101
102 return results
103
[end of slither/detectors/attributes/locked_ether.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/slither/detectors/attributes/locked_ether.py b/slither/detectors/attributes/locked_ether.py
--- a/slither/detectors/attributes/locked_ether.py
+++ b/slither/detectors/attributes/locked_ether.py
@@ -3,7 +3,7 @@
"""
from typing import List
-from slither.core.declarations.contract import Contract
+from slither.core.declarations import Contract, SolidityFunction
from slither.detectors.abstract_detector import (
AbstractDetector,
DetectorClassification,
@@ -17,7 +17,9 @@
NewContract,
LibraryCall,
InternalCall,
+ SolidityCall,
)
+from slither.slithir.variables import Constant
from slither.utils.output import Output
@@ -68,8 +70,28 @@
):
if ir.call_value and ir.call_value != 0:
return False
- if isinstance(ir, (LowLevelCall)):
- if ir.function_name in ["delegatecall", "callcode"]:
+ if isinstance(ir, (LowLevelCall)) and ir.function_name in [
+ "delegatecall",
+ "callcode",
+ ]:
+ return False
+ if isinstance(ir, SolidityCall):
+ call_can_send_ether = ir.function in [
+ SolidityFunction(
+ "delegatecall(uint256,uint256,uint256,uint256,uint256,uint256)"
+ ),
+ SolidityFunction(
+ "callcode(uint256,uint256,uint256,uint256,uint256,uint256,uint256)"
+ ),
+ SolidityFunction(
+ "call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)"
+ ),
+ ]
+ nonzero_call_value = call_can_send_ether and (
+ not isinstance(ir.arguments[2], Constant)
+ or ir.arguments[2].value != 0
+ )
+ if nonzero_call_value:
return False
# If a new internal call or librarycall
# Add it to the list to explore
|
{"golden_diff": "diff --git a/slither/detectors/attributes/locked_ether.py b/slither/detectors/attributes/locked_ether.py\n--- a/slither/detectors/attributes/locked_ether.py\n+++ b/slither/detectors/attributes/locked_ether.py\n@@ -3,7 +3,7 @@\n \"\"\"\n from typing import List\n \n-from slither.core.declarations.contract import Contract\n+from slither.core.declarations import Contract, SolidityFunction\n from slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n@@ -17,7 +17,9 @@\n NewContract,\n LibraryCall,\n InternalCall,\n+ SolidityCall,\n )\n+from slither.slithir.variables import Constant\n from slither.utils.output import Output\n \n \n@@ -68,8 +70,28 @@\n ):\n if ir.call_value and ir.call_value != 0:\n return False\n- if isinstance(ir, (LowLevelCall)):\n- if ir.function_name in [\"delegatecall\", \"callcode\"]:\n+ if isinstance(ir, (LowLevelCall)) and ir.function_name in [\n+ \"delegatecall\",\n+ \"callcode\",\n+ ]:\n+ return False\n+ if isinstance(ir, SolidityCall):\n+ call_can_send_ether = ir.function in [\n+ SolidityFunction(\n+ \"delegatecall(uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ SolidityFunction(\n+ \"callcode(uint256,uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ SolidityFunction(\n+ \"call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ ]\n+ nonzero_call_value = call_can_send_ether and (\n+ not isinstance(ir.arguments[2], Constant)\n+ or ir.arguments[2].value != 0\n+ )\n+ if nonzero_call_value:\n return False\n # If a new internal call or librarycall\n # Add it to the list to explore\n", "issue": "[Bug] contract reports ether as locked when ether is sent in Yul\nThe following contract reports ether as locked despite it being sent in a Yul block\r\n```\r\ncontract FPLockedEther {\r\n receive() payable external {}\r\n\r\n function yulSendEther() external {\r\n bool success;\r\n assembly {\r\n success := call(gas(), caller(), balance(address()), 0,0,0,0)\r\n }\r\n }\r\n}\r\n```\r\n```\r\nContract locking ether found:\r\n\tContract FPLockedEther (locked-ether.sol#1-13) has payable functions:\r\n\t - FPLockedEther.receive() (locked-ether.sol#2-3)\r\n\tBut does not have a function to withdraw the ether\r\nReference: https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether\r\n```\r\n\r\nIt could be that the IR is incorrect here as it should not be a `SOLIDITY_CALL`\r\n```\r\nContract FPLockedEther\r\n\tFunction FPLockedEther.receive() (*)\r\n\tFunction FPLockedEther.yulSendEther() (*)\r\n\t\tExpression: success = call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(gas()(),caller()(),balance(uint256)(address()()),0,0,0,0)\r\n\t\tIRs:\r\n\t\t\tTMP_0(uint256) = SOLIDITY_CALL gas()()\r\n\t\t\tTMP_1(address) := msg.sender(address)\r\n\t\t\tTMP_2 = CONVERT this to address\r\n\t\t\tTMP_3(uint256) = SOLIDITY_CALL balance(uint256)(TMP_2)\r\n\t\t\tTMP_4(uint256) = SOLIDITY_CALL call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(TMP_0,TMP_1,TMP_3,0,0,0,0)\r\n\t\t\tsuccess(bool) := TMP_4(uint256)\r\n```\n", "before_files": [{"content": "\"\"\"\n Check if ethers are locked in the contract\n\"\"\"\nfrom typing import List\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import (\n HighLevelCall,\n LowLevelCall,\n Send,\n Transfer,\n NewContract,\n LibraryCall,\n InternalCall,\n)\nfrom slither.utils.output import Output\n\n\nclass LockedEther(AbstractDetector): # pylint: disable=too-many-nested-blocks\n\n ARGUMENT = \"locked-ether\"\n HELP = \"Contracts that lock ether\"\n IMPACT = DetectorClassification.MEDIUM\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether\"\n\n WIKI_TITLE = \"Contracts that lock Ether\"\n WIKI_DESCRIPTION = \"Contract with a `payable` function, but without a withdrawal capacity.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\npragma solidity 0.4.24;\ncontract Locked{\n function receive() payable public{\n }\n}\n```\nEvery Ether sent to `Locked` will be lost.\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Remove the payable attribute or add a withdraw function.\"\n\n @staticmethod\n def do_no_send_ether(contract: Contract) -> bool:\n functions = contract.all_functions_called\n to_explore = functions\n explored = []\n while to_explore: # pylint: disable=too-many-nested-blocks\n functions = to_explore\n explored += to_explore\n to_explore = []\n for function in functions:\n calls = [c.name for c in function.internal_calls]\n if \"suicide(address)\" in calls or \"selfdestruct(address)\" in calls:\n return False\n for node in function.nodes:\n for ir in node.irs:\n if isinstance(\n ir,\n (Send, Transfer, HighLevelCall, LowLevelCall, NewContract),\n ):\n if ir.call_value and ir.call_value != 0:\n return False\n if isinstance(ir, (LowLevelCall)):\n if ir.function_name in [\"delegatecall\", \"callcode\"]:\n return False\n # If a new internal call or librarycall\n # Add it to the list to explore\n # InternalCall if to follow internal call in libraries\n if isinstance(ir, (InternalCall, LibraryCall)):\n if not ir.function in explored:\n to_explore.append(ir.function)\n\n return True\n\n def _detect(self) -> List[Output]:\n results = []\n\n for contract in self.compilation_unit.contracts_derived:\n if contract.is_signature_only():\n continue\n funcs_payable = [function for function in contract.functions if function.payable]\n if funcs_payable:\n if self.do_no_send_ether(contract):\n info: DETECTOR_INFO = [\"Contract locking ether found:\\n\"]\n info += [\"\\tContract \", contract, \" has payable functions:\\n\"]\n for function in funcs_payable:\n info += [\"\\t - \", function, \"\\n\"]\n info += \"\\tBut does not have a function to withdraw the ether\\n\"\n\n json = self.generate_result(info)\n\n results.append(json)\n\n return results\n", "path": "slither/detectors/attributes/locked_ether.py"}]}
| 1,926 | 485 |
gh_patches_debug_22330
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-1744
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Markdown preview fails CSRF validation checks
Caused by the name change of the CSRF cookie.
</issue>
<code>
[start of app/grandchallenge/core/widgets.py]
1 from django import forms
2 from markdownx.widgets import AdminMarkdownxWidget, MarkdownxWidget
3
4
5 class JSONEditorWidget(forms.Textarea):
6 template_name = "jsoneditor/jsoneditor_widget.html"
7
8 def __init__(self, schema=None, attrs=None):
9 super().__init__(attrs)
10 self.schema = schema
11
12 def get_context(self, name, value, attrs):
13 context = super().get_context(name, value, attrs)
14 context.update({"schema": self.schema})
15 return context
16
17 class Media:
18 css = {
19 "all": (
20 "https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.css",
21 )
22 }
23 js = (
24 "https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.js",
25 )
26
27
28 class MarkdownEditorWidget(MarkdownxWidget):
29 class Media(MarkdownxWidget.Media):
30 js = [
31 *MarkdownxWidget.Media.js,
32 "vendor/js/markdown-toolbar-element/index.umd.js",
33 ]
34
35
36 class MarkdownEditorAdminWidget(AdminMarkdownxWidget):
37 class Media(AdminMarkdownxWidget.Media):
38 css = {
39 "all": [
40 *AdminMarkdownxWidget.Media.css["all"],
41 "vendor/css/base.min.css",
42 "vendor/fa/css/all.css",
43 ]
44 }
45 js = [
46 *AdminMarkdownxWidget.Media.js,
47 "vendor/js/markdown-toolbar-element/index.umd.js",
48 ]
49
[end of app/grandchallenge/core/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/grandchallenge/core/widgets.py b/app/grandchallenge/core/widgets.py
--- a/app/grandchallenge/core/widgets.py
+++ b/app/grandchallenge/core/widgets.py
@@ -26,23 +26,29 @@
class MarkdownEditorWidget(MarkdownxWidget):
- class Media(MarkdownxWidget.Media):
- js = [
- *MarkdownxWidget.Media.js,
- "vendor/js/markdown-toolbar-element/index.umd.js",
- ]
+ @property
+ def media(self):
+ return forms.Media(
+ js=(
+ "js/markdownx.js",
+ "vendor/js/markdown-toolbar-element/index.umd.js",
+ )
+ )
class MarkdownEditorAdminWidget(AdminMarkdownxWidget):
- class Media(AdminMarkdownxWidget.Media):
- css = {
- "all": [
- *AdminMarkdownxWidget.Media.css["all"],
- "vendor/css/base.min.css",
- "vendor/fa/css/all.css",
- ]
- }
- js = [
- *AdminMarkdownxWidget.Media.js,
- "vendor/js/markdown-toolbar-element/index.umd.js",
- ]
+ @property
+ def media(self):
+ return forms.Media(
+ css={
+ "all": [
+ *AdminMarkdownxWidget.Media.css["all"],
+ "vendor/css/base.min.css",
+ "vendor/fa/css/all.css",
+ ]
+ },
+ js=[
+ "js/markdownx.js",
+ "vendor/js/markdown-toolbar-element/index.umd.js",
+ ],
+ )
|
{"golden_diff": "diff --git a/app/grandchallenge/core/widgets.py b/app/grandchallenge/core/widgets.py\n--- a/app/grandchallenge/core/widgets.py\n+++ b/app/grandchallenge/core/widgets.py\n@@ -26,23 +26,29 @@\n \n \n class MarkdownEditorWidget(MarkdownxWidget):\n- class Media(MarkdownxWidget.Media):\n- js = [\n- *MarkdownxWidget.Media.js,\n- \"vendor/js/markdown-toolbar-element/index.umd.js\",\n- ]\n+ @property\n+ def media(self):\n+ return forms.Media(\n+ js=(\n+ \"js/markdownx.js\",\n+ \"vendor/js/markdown-toolbar-element/index.umd.js\",\n+ )\n+ )\n \n \n class MarkdownEditorAdminWidget(AdminMarkdownxWidget):\n- class Media(AdminMarkdownxWidget.Media):\n- css = {\n- \"all\": [\n- *AdminMarkdownxWidget.Media.css[\"all\"],\n- \"vendor/css/base.min.css\",\n- \"vendor/fa/css/all.css\",\n- ]\n- }\n- js = [\n- *AdminMarkdownxWidget.Media.js,\n- \"vendor/js/markdown-toolbar-element/index.umd.js\",\n- ]\n+ @property\n+ def media(self):\n+ return forms.Media(\n+ css={\n+ \"all\": [\n+ *AdminMarkdownxWidget.Media.css[\"all\"],\n+ \"vendor/css/base.min.css\",\n+ \"vendor/fa/css/all.css\",\n+ ]\n+ },\n+ js=[\n+ \"js/markdownx.js\",\n+ \"vendor/js/markdown-toolbar-element/index.umd.js\",\n+ ],\n+ )\n", "issue": "Markdown preview fails CSRF validation checks\nCaused by the name change of the CSRF cookie.\n", "before_files": [{"content": "from django import forms\nfrom markdownx.widgets import AdminMarkdownxWidget, MarkdownxWidget\n\n\nclass JSONEditorWidget(forms.Textarea):\n template_name = \"jsoneditor/jsoneditor_widget.html\"\n\n def __init__(self, schema=None, attrs=None):\n super().__init__(attrs)\n self.schema = schema\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context.update({\"schema\": self.schema})\n return context\n\n class Media:\n css = {\n \"all\": (\n \"https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.css\",\n )\n }\n js = (\n \"https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.js\",\n )\n\n\nclass MarkdownEditorWidget(MarkdownxWidget):\n class Media(MarkdownxWidget.Media):\n js = [\n *MarkdownxWidget.Media.js,\n \"vendor/js/markdown-toolbar-element/index.umd.js\",\n ]\n\n\nclass MarkdownEditorAdminWidget(AdminMarkdownxWidget):\n class Media(AdminMarkdownxWidget.Media):\n css = {\n \"all\": [\n *AdminMarkdownxWidget.Media.css[\"all\"],\n \"vendor/css/base.min.css\",\n \"vendor/fa/css/all.css\",\n ]\n }\n js = [\n *AdminMarkdownxWidget.Media.js,\n \"vendor/js/markdown-toolbar-element/index.umd.js\",\n ]\n", "path": "app/grandchallenge/core/widgets.py"}]}
| 963 | 355 |
gh_patches_debug_17287
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-999
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PyDeck warning Mapbox API key not set
# Summary
Migrated a deck_gl chart to PyDeck API. When page is run get a UserWarning that the Mapbox API key is not set. Old deck_gl_chart() function does not display the warning.
# Steps to reproduce
1: Get a personal mapbox token, Verify it is set using streamlit config show
[mapbox]
```
# Configure Streamlit to use a custom Mapbox token for elements like st.deck_gl_chart and st.map. If you don't do this you'll be using Streamlit's own token, which has limitations and is not guaranteed to always work. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels)
# Default: "pk.eyJ1IjoidGhpYWdvdCIsImEiOiJjamh3bm85NnkwMng4M3dydnNveWwzeWNzIn0.vCBDzNsEF2uFSFk2AM0WZQ"
# The value below was set in C:\Users\...path...\.streamlit\config.toml
token = "pk.eyJ1Ijoia25......................."
```
2. Run a PyDeck chart, any demo should do
3. Inspect the output from streamlit run app.py in the shell
## Expected behavior:
map displays, no message in shell
## Actual behavior:
Map displays, shell displays a UserWarning
```
You can now view your Streamlit app in your browser.
URL: http://localhost:8501
c:\apps\anaconda3\envs\ccadash\lib\site-packages\pydeck\bindings\deck.py:82: UserWarning: Mapbox API key is not set. This may impact available features of pydeck.
UserWarning,
```
## Is this a regression?
That is, did this use to work the way you expected in the past?
yes
# Debug info
- Streamlit version:0.53.0
- Python version: 3.7.3
- Using Conda? PipEnv? PyEnv? Pex? Conda
- OS version: Windows 10
- Browser version: Chrome Version 79.0.3945.117 (Official Build) (64-bit)
# Additional information
If needed, add any other context about the problem here. For exmaple, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!
</issue>
<code>
[start of lib/streamlit/bootstrap.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2018-2020 Streamlit Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17 import signal
18 import sys
19
20 import click
21 import tornado.ioloop
22
23 from streamlit import config
24 from streamlit import net_util
25 from streamlit import url_util
26 from streamlit import env_util
27 from streamlit import util
28 from streamlit.Report import Report
29 from streamlit.logger import get_logger
30 from streamlit.server.Server import Server
31
32 LOGGER = get_logger(__name__)
33
34 # Wait for 1 second before opening a browser. This gives old tabs a chance to
35 # reconnect.
36 # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.
37 BROWSER_WAIT_TIMEOUT_SEC = 1
38
39
40 def _set_up_signal_handler():
41 LOGGER.debug("Setting up signal handler")
42
43 def signal_handler(signal_number, stack_frame):
44 # The server will shut down its threads and stop the ioloop
45 Server.get_current().stop()
46
47 signal.signal(signal.SIGTERM, signal_handler)
48 signal.signal(signal.SIGINT, signal_handler)
49 if sys.platform == "win32":
50 signal.signal(signal.SIGBREAK, signal_handler)
51 else:
52 signal.signal(signal.SIGQUIT, signal_handler)
53
54
55 def _fix_sys_path(script_path):
56 """Add the script's folder to the sys path.
57
58 Python normally does this automatically, but since we exec the script
59 ourselves we need to do it instead.
60 """
61 sys.path.insert(0, os.path.dirname(script_path))
62
63
64 def _fix_matplotlib_crash():
65 """Set Matplotlib backend to avoid a crash.
66
67 The default Matplotlib backend crashes Python on OSX when run on a thread
68 that's not the main thread, so here we set a safer backend as a fix.
69 Users can always disable this behavior by setting the config
70 runner.fixMatplotlib = false.
71
72 This fix is OS-independent. We didn't see a good reason to make this
73 Mac-only. Consistency within Streamlit seemed more important.
74 """
75 if config.get_option("runner.fixMatplotlib"):
76 try:
77 # TODO: a better option may be to set
78 # os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards
79 # the top of __init__.py, before importing anything that imports
80 # pandas (which imports matplotlib). Alternately, we could set
81 # this environment variable in a new entrypoint defined in
82 # setup.py. Both of these introduce additional trickiness: they
83 # need to run without consulting streamlit.config.get_option,
84 # because this would import streamlit, and therefore matplotlib.
85 import matplotlib
86
87 matplotlib.use("Agg")
88 except ImportError:
89 pass
90
91
92 def _fix_tornado_crash():
93 """Set default asyncio policy to be compatible with Tornado 6.
94
95 Tornado 6 (at least) is not compatible with the default
96 asyncio implementation on Windows. So here we
97 pick the older SelectorEventLoopPolicy when the OS is Windows
98 if the known-incompatible default policy is in use.
99
100 This has to happen as early as possible to make it a low priority and
101 overrideable
102
103 See: https://github.com/tornadoweb/tornado/issues/2608
104
105 FIXME: if/when tornado supports the defaults in asyncio,
106 remove and bump tornado requirement for py38
107 """
108 if env_util.IS_WINDOWS and sys.version_info >= (3, 8):
109 import asyncio
110
111 try:
112 from asyncio import (
113 WindowsProactorEventLoopPolicy,
114 WindowsSelectorEventLoopPolicy,
115 )
116 except ImportError:
117 pass
118 # Not affected
119 else:
120 if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
121 # WindowsProactorEventLoopPolicy is not compatible with
122 # Tornado 6 fallback to the pre-3.8 default of Selector
123 asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
124
125
126 def _fix_sys_argv(script_path, args):
127 """sys.argv needs to exclude streamlit arguments and parameters
128 and be set to what a user's script may expect.
129 """
130 import sys
131
132 sys.argv = [script_path] + list(args)
133
134
135 def _on_server_start(server):
136 _print_url()
137
138 def maybe_open_browser():
139 if config.get_option("server.headless"):
140 # Don't open browser when in headless mode.
141 return
142
143 if server.browser_is_connected:
144 # Don't auto-open browser if there's already a browser connected.
145 # This can happen if there's an old tab repeatedly trying to
146 # connect, and it happens to success before we launch the browser.
147 return
148
149 if config.is_manually_set("browser.serverAddress"):
150 addr = config.get_option("browser.serverAddress")
151 else:
152 addr = "localhost"
153
154 util.open_browser(Report.get_url(addr))
155
156 # Schedule the browser to open using the IO Loop on the main thread, but
157 # only if no other browser connects within 1s.
158 ioloop = tornado.ioloop.IOLoop.current()
159 ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)
160
161
162 def _print_url():
163 title_message = "You can now view your Streamlit app in your browser."
164 named_urls = []
165
166 if config.is_manually_set("browser.serverAddress"):
167 named_urls = [
168 ("URL", Report.get_url(config.get_option("browser.serverAddress")))
169 ]
170
171 elif config.get_option("server.headless"):
172 named_urls = [
173 ("Network URL", Report.get_url(net_util.get_internal_ip())),
174 ("External URL", Report.get_url(net_util.get_external_ip())),
175 ]
176
177 else:
178 named_urls = [
179 ("Local URL", Report.get_url("localhost")),
180 ("Network URL", Report.get_url(net_util.get_internal_ip())),
181 ]
182
183 click.secho("")
184 click.secho(" %s" % title_message, fg="blue", bold=True)
185 click.secho("")
186
187 for url_name, url in named_urls:
188 url_util.print_url(url_name, url)
189
190 click.secho("")
191
192
193 def run(script_path, command_line, args):
194 """Run a script in a separate thread and start a server for the app.
195
196 This starts a blocking ioloop.
197
198 Parameters
199 ----------
200 script_path : str
201 command_line : str
202 args : [str]
203
204 """
205 _fix_sys_path(script_path)
206 _fix_matplotlib_crash()
207 _fix_tornado_crash()
208 _fix_sys_argv(script_path, args)
209
210 # Install a signal handler that will shut down the ioloop
211 # and close all our threads
212 _set_up_signal_handler()
213
214 ioloop = tornado.ioloop.IOLoop.current()
215
216 # Create and start the server.
217 server = Server(ioloop, script_path, command_line)
218 server.start(_on_server_start)
219
220 # (Must com after start(), because this starts a new thread and start() may
221 # call sys.exit() which doesn't kill other threads.
222 server.add_preheated_report_session()
223
224 # Start the ioloop. This function will not return until the
225 # server is shut down.
226 ioloop.start()
227
[end of lib/streamlit/bootstrap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/streamlit/bootstrap.py b/lib/streamlit/bootstrap.py
--- a/lib/streamlit/bootstrap.py
+++ b/lib/streamlit/bootstrap.py
@@ -159,6 +159,12 @@
ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)
+def _fix_pydeck_mapbox_api_warning():
+ """Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception"""
+
+ os.environ["MAPBOX_API_KEY"] = config.get_option("mapbox.token")
+
+
def _print_url():
title_message = "You can now view your Streamlit app in your browser."
named_urls = []
@@ -206,6 +212,7 @@
_fix_matplotlib_crash()
_fix_tornado_crash()
_fix_sys_argv(script_path, args)
+ _fix_pydeck_mapbox_api_warning()
# Install a signal handler that will shut down the ioloop
# and close all our threads
|
{"golden_diff": "diff --git a/lib/streamlit/bootstrap.py b/lib/streamlit/bootstrap.py\n--- a/lib/streamlit/bootstrap.py\n+++ b/lib/streamlit/bootstrap.py\n@@ -159,6 +159,12 @@\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n \n \n+def _fix_pydeck_mapbox_api_warning():\n+ \"\"\"Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception\"\"\"\n+\n+ os.environ[\"MAPBOX_API_KEY\"] = config.get_option(\"mapbox.token\")\n+\n+\n def _print_url():\n title_message = \"You can now view your Streamlit app in your browser.\"\n named_urls = []\n@@ -206,6 +212,7 @@\n _fix_matplotlib_crash()\n _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n+ _fix_pydeck_mapbox_api_warning()\n \n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n", "issue": "PyDeck warning Mapbox API key not set\n# Summary\r\n\r\nMigrated a deck_gl chart to PyDeck API. When page is run get a UserWarning that the Mapbox API key is not set. Old deck_gl_chart() function does not display the warning. \r\n\r\n# Steps to reproduce\r\n\r\n1: Get a personal mapbox token, Verify it is set using streamlit config show\r\n\r\n[mapbox]\r\n\r\n```\r\n# Configure Streamlit to use a custom Mapbox token for elements like st.deck_gl_chart and st.map. If you don't do this you'll be using Streamlit's own token, which has limitations and is not guaranteed to always work. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels)\r\n# Default: \"pk.eyJ1IjoidGhpYWdvdCIsImEiOiJjamh3bm85NnkwMng4M3dydnNveWwzeWNzIn0.vCBDzNsEF2uFSFk2AM0WZQ\"\r\n# The value below was set in C:\\Users\\...path...\\.streamlit\\config.toml\r\ntoken = \"pk.eyJ1Ijoia25.......................\"\r\n```\r\n\r\n2. Run a PyDeck chart, any demo should do\r\n3. Inspect the output from streamlit run app.py in the shell\r\n\r\n## Expected behavior:\r\n\r\nmap displays, no message in shell\r\n\r\n## Actual behavior:\r\n\r\nMap displays, shell displays a UserWarning\r\n\r\n```\r\nYou can now view your Streamlit app in your browser.\r\n\r\n URL: http://localhost:8501\r\n\r\nc:\\apps\\anaconda3\\envs\\ccadash\\lib\\site-packages\\pydeck\\bindings\\deck.py:82: UserWarning: Mapbox API key is not set. This may impact available features of pydeck.\r\n UserWarning,\r\n```\r\n\r\n## Is this a regression?\r\n\r\nThat is, did this use to work the way you expected in the past?\r\nyes\r\n\r\n# Debug info\r\n\r\n- Streamlit version:0.53.0\r\n- Python version: 3.7.3\r\n- Using Conda? PipEnv? PyEnv? Pex? Conda\r\n- OS version: Windows 10\r\n- Browser version: Chrome Version 79.0.3945.117 (Official Build) (64-bit)\r\n\r\n# Additional information\r\n\r\nIf needed, add any other context about the problem here. For exmaple, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport signal\nimport sys\n\nimport click\nimport tornado.ioloop\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import url_util\nfrom streamlit import env_util\nfrom streamlit import util\nfrom streamlit.Report import Report\nfrom streamlit.logger import get_logger\nfrom streamlit.server.Server import Server\n\nLOGGER = get_logger(__name__)\n\n# Wait for 1 second before opening a browser. This gives old tabs a chance to\n# reconnect.\n# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\nBROWSER_WAIT_TIMEOUT_SEC = 1\n\n\ndef _set_up_signal_handler():\n LOGGER.debug(\"Setting up signal handler\")\n\n def signal_handler(signal_number, stack_frame):\n # The server will shut down its threads and stop the ioloop\n Server.get_current().stop()\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n if sys.platform == \"win32\":\n signal.signal(signal.SIGBREAK, signal_handler)\n else:\n signal.signal(signal.SIGQUIT, signal_handler)\n\n\ndef _fix_sys_path(script_path):\n \"\"\"Add the script's folder to the sys path.\n\n Python normally does this automatically, but since we exec the script\n ourselves we need to do it instead.\n \"\"\"\n sys.path.insert(0, os.path.dirname(script_path))\n\n\ndef _fix_matplotlib_crash():\n \"\"\"Set Matplotlib backend to avoid a crash.\n\n The default Matplotlib backend crashes Python on OSX when run on a thread\n that's not the main thread, so here we set a safer backend as a fix.\n Users can always disable this behavior by setting the config\n runner.fixMatplotlib = false.\n\n This fix is OS-independent. We didn't see a good reason to make this\n Mac-only. Consistency within Streamlit seemed more important.\n \"\"\"\n if config.get_option(\"runner.fixMatplotlib\"):\n try:\n # TODO: a better option may be to set\n # os.environ[\"MPLBACKEND\"] = \"Agg\". We'd need to do this towards\n # the top of __init__.py, before importing anything that imports\n # pandas (which imports matplotlib). Alternately, we could set\n # this environment variable in a new entrypoint defined in\n # setup.py. Both of these introduce additional trickiness: they\n # need to run without consulting streamlit.config.get_option,\n # because this would import streamlit, and therefore matplotlib.\n import matplotlib\n\n matplotlib.use(\"Agg\")\n except ImportError:\n pass\n\n\ndef _fix_tornado_crash():\n \"\"\"Set default asyncio policy to be compatible with Tornado 6.\n\n Tornado 6 (at least) is not compatible with the default\n asyncio implementation on Windows. So here we\n pick the older SelectorEventLoopPolicy when the OS is Windows\n if the known-incompatible default policy is in use.\n\n This has to happen as early as possible to make it a low priority and\n overrideable\n\n See: https://github.com/tornadoweb/tornado/issues/2608\n\n FIXME: if/when tornado supports the defaults in asyncio,\n remove and bump tornado requirement for py38\n \"\"\"\n if env_util.IS_WINDOWS and sys.version_info >= (3, 8):\n import asyncio\n\n try:\n from asyncio import (\n WindowsProactorEventLoopPolicy,\n WindowsSelectorEventLoopPolicy,\n )\n except ImportError:\n pass\n # Not affected\n else:\n if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:\n # WindowsProactorEventLoopPolicy is not compatible with\n # Tornado 6 fallback to the pre-3.8 default of Selector\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n \"\"\"\n import sys\n\n sys.argv = [script_path] + list(args)\n\n\ndef _on_server_start(server):\n _print_url()\n\n def maybe_open_browser():\n if config.get_option(\"server.headless\"):\n # Don't open browser when in headless mode.\n return\n\n if server.browser_is_connected:\n # Don't auto-open browser if there's already a browser connected.\n # This can happen if there's an old tab repeatedly trying to\n # connect, and it happens to success before we launch the browser.\n return\n\n if config.is_manually_set(\"browser.serverAddress\"):\n addr = config.get_option(\"browser.serverAddress\")\n else:\n addr = \"localhost\"\n\n util.open_browser(Report.get_url(addr))\n\n # Schedule the browser to open using the IO Loop on the main thread, but\n # only if no other browser connects within 1s.\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n\n\ndef _print_url():\n title_message = \"You can now view your Streamlit app in your browser.\"\n named_urls = []\n\n if config.is_manually_set(\"browser.serverAddress\"):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"browser.serverAddress\")))\n ]\n\n elif config.get_option(\"server.headless\"):\n named_urls = [\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n (\"External URL\", Report.get_url(net_util.get_external_ip())),\n ]\n\n else:\n named_urls = [\n (\"Local URL\", Report.get_url(\"localhost\")),\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n ]\n\n click.secho(\"\")\n click.secho(\" %s\" % title_message, fg=\"blue\", bold=True)\n click.secho(\"\")\n\n for url_name, url in named_urls:\n url_util.print_url(url_name, url)\n\n click.secho(\"\")\n\n\ndef run(script_path, command_line, args):\n \"\"\"Run a script in a separate thread and start a server for the app.\n\n This starts a blocking ioloop.\n\n Parameters\n ----------\n script_path : str\n command_line : str\n args : [str]\n\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n\n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n _set_up_signal_handler()\n\n ioloop = tornado.ioloop.IOLoop.current()\n\n # Create and start the server.\n server = Server(ioloop, script_path, command_line)\n server.start(_on_server_start)\n\n # (Must com after start(), because this starts a new thread and start() may\n # call sys.exit() which doesn't kill other threads.\n server.add_preheated_report_session()\n\n # Start the ioloop. This function will not return until the\n # server is shut down.\n ioloop.start()\n", "path": "lib/streamlit/bootstrap.py"}]}
| 3,378 | 221 |
gh_patches_debug_34538
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-480
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Aliasing for !resources command.
Currently the !resources command links to both the [PyDis resource](https://pythondiscord.com/pages/resources/
) page, and the [PyDis tools](https://pythondiscord.com/pages/tools/) page.
I feel that the alias of !tools should be added to the command, or the links should be separated into two commands, to both make it easier for users to access the tools page, but also increase awareness of the page.
</issue>
<code>
[start of bot/cogs/site.py]
1 import logging
2
3 from discord import Colour, Embed
4 from discord.ext.commands import Bot, Cog, Context, group
5
6 from bot.constants import Channels, STAFF_ROLES, URLs
7 from bot.decorators import redirect_output
8 from bot.pagination import LinePaginator
9
10 log = logging.getLogger(__name__)
11
12 PAGES_URL = f"{URLs.site_schema}{URLs.site}/pages"
13
14
15 class Site(Cog):
16 """Commands for linking to different parts of the site."""
17
18 def __init__(self, bot: Bot):
19 self.bot = bot
20
21 @group(name="site", aliases=("s",), invoke_without_command=True)
22 async def site_group(self, ctx: Context) -> None:
23 """Commands for getting info about our website."""
24 await ctx.invoke(self.bot.get_command("help"), "site")
25
26 @site_group.command(name="home", aliases=("about",))
27 async def site_main(self, ctx: Context) -> None:
28 """Info about the website itself."""
29 url = f"{URLs.site_schema}{URLs.site}/"
30
31 embed = Embed(title="Python Discord website")
32 embed.set_footer(text=url)
33 embed.colour = Colour.blurple()
34 embed.description = (
35 f"[Our official website]({url}) is an open-source community project "
36 "created with Python and Flask. It contains information about the server "
37 "itself, lets you sign up for upcoming events, has its own wiki, contains "
38 "a list of valuable learning resources, and much more."
39 )
40
41 await ctx.send(embed=embed)
42
43 @site_group.command(name="resources")
44 async def site_resources(self, ctx: Context) -> None:
45 """Info about the site's Resources page."""
46 learning_url = f"{PAGES_URL}/resources"
47 tools_url = f"{PAGES_URL}/tools"
48
49 embed = Embed(title="Resources & Tools")
50 embed.set_footer(text=f"{learning_url} | {tools_url}")
51 embed.colour = Colour.blurple()
52 embed.description = (
53 f"The [Resources page]({learning_url}) on our website contains a "
54 "list of hand-selected goodies that we regularly recommend "
55 f"to both beginners and experts. The [Tools page]({tools_url}) "
56 "contains a couple of the most popular tools for programming in "
57 "Python."
58 )
59
60 await ctx.send(embed=embed)
61
62 @site_group.command(name="help")
63 async def site_help(self, ctx: Context) -> None:
64 """Info about the site's Getting Help page."""
65 url = f"{PAGES_URL}/asking-good-questions"
66
67 embed = Embed(title="Asking Good Questions")
68 embed.set_footer(text=url)
69 embed.colour = Colour.blurple()
70 embed.description = (
71 "Asking the right question about something that's new to you can sometimes be tricky. "
72 f"To help with this, we've created a [guide to asking good questions]({url}) on our website. "
73 "It contains everything you need to get the very best help from our community."
74 )
75
76 await ctx.send(embed=embed)
77
78 @site_group.command(name="faq")
79 async def site_faq(self, ctx: Context) -> None:
80 """Info about the site's FAQ page."""
81 url = f"{PAGES_URL}/frequently-asked-questions"
82
83 embed = Embed(title="FAQ")
84 embed.set_footer(text=url)
85 embed.colour = Colour.blurple()
86 embed.description = (
87 "As the largest Python community on Discord, we get hundreds of questions every day. "
88 "Many of these questions have been asked before. We've compiled a list of the most "
89 "frequently asked questions along with their answers, which can be found on "
90 f"our [FAQ page]({url})."
91 )
92
93 await ctx.send(embed=embed)
94
95 @site_group.command(aliases=['r', 'rule'], name='rules')
96 @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)
97 async def site_rules(self, ctx: Context, *rules: int) -> None:
98 """Provides a link to all rules or, if specified, displays specific rule(s)."""
99 rules_embed = Embed(title='Rules', color=Colour.blurple())
100 rules_embed.url = f"{PAGES_URL}/rules"
101
102 if not rules:
103 # Rules were not submitted. Return the default description.
104 rules_embed.description = (
105 "The rules and guidelines that apply to this community can be found on"
106 f" our [rules page]({PAGES_URL}/rules). We expect"
107 " all members of the community to have read and understood these."
108 )
109
110 await ctx.send(embed=rules_embed)
111 return
112
113 full_rules = await self.bot.api_client.get('rules', params={'link_format': 'md'})
114 invalid_indices = tuple(
115 pick
116 for pick in rules
117 if pick < 0 or pick >= len(full_rules)
118 )
119
120 if invalid_indices:
121 indices = ', '.join(map(str, invalid_indices))
122 await ctx.send(f":x: Invalid rule indices {indices}")
123 return
124
125 final_rules = tuple(f"**{pick}.** {full_rules[pick]}" for pick in rules)
126
127 await LinePaginator.paginate(final_rules, ctx, rules_embed, max_lines=3)
128
129
130 def setup(bot: Bot) -> None:
131 """Site cog load."""
132 bot.add_cog(Site(bot))
133 log.info("Cog loaded: Site")
134
[end of bot/cogs/site.py]
[start of bot/cogs/alias.py]
1 import inspect
2 import logging
3 from typing import Union
4
5 from discord import Colour, Embed, Member, User
6 from discord.ext.commands import Bot, Cog, Command, Context, clean_content, command, group
7
8 from bot.cogs.watchchannels.watchchannel import proxy_user
9 from bot.converters import TagNameConverter
10 from bot.pagination import LinePaginator
11
12 log = logging.getLogger(__name__)
13
14
15 class Alias (Cog):
16 """Aliases for commonly used commands."""
17
18 def __init__(self, bot: Bot):
19 self.bot = bot
20
21 async def invoke(self, ctx: Context, cmd_name: str, *args, **kwargs) -> None:
22 """Invokes a command with args and kwargs."""
23 log.debug(f"{cmd_name} was invoked through an alias")
24 cmd = self.bot.get_command(cmd_name)
25 if not cmd:
26 return log.warning(f'Did not find command "{cmd_name}" to invoke.')
27 elif not await cmd.can_run(ctx):
28 return log.warning(
29 f'{str(ctx.author)} tried to run the command "{cmd_name}"'
30 )
31
32 await ctx.invoke(cmd, *args, **kwargs)
33
34 @command(name='aliases')
35 async def aliases_command(self, ctx: Context) -> None:
36 """Show configured aliases on the bot."""
37 embed = Embed(
38 title='Configured aliases',
39 colour=Colour.blue()
40 )
41 await LinePaginator.paginate(
42 (
43 f"• `{ctx.prefix}{value.name}` "
44 f"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`"
45 for name, value in inspect.getmembers(self)
46 if isinstance(value, Command) and name.endswith('_alias')
47 ),
48 ctx, embed, empty=False, max_lines=20
49 )
50
51 @command(name="resources", aliases=("resource",), hidden=True)
52 async def site_resources_alias(self, ctx: Context) -> None:
53 """Alias for invoking <prefix>site resources."""
54 await self.invoke(ctx, "site resources")
55
56 @command(name="watch", hidden=True)
57 async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:
58 """Alias for invoking <prefix>bigbrother watch [user] [reason]."""
59 await self.invoke(ctx, "bigbrother watch", user, reason=reason)
60
61 @command(name="unwatch", hidden=True)
62 async def bigbrother_unwatch_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:
63 """Alias for invoking <prefix>bigbrother unwatch [user] [reason]."""
64 await self.invoke(ctx, "bigbrother unwatch", user, reason=reason)
65
66 @command(name="home", hidden=True)
67 async def site_home_alias(self, ctx: Context) -> None:
68 """Alias for invoking <prefix>site home."""
69 await self.invoke(ctx, "site home")
70
71 @command(name="faq", hidden=True)
72 async def site_faq_alias(self, ctx: Context) -> None:
73 """Alias for invoking <prefix>site faq."""
74 await self.invoke(ctx, "site faq")
75
76 @command(name="rules", hidden=True)
77 async def site_rules_alias(self, ctx: Context) -> None:
78 """Alias for invoking <prefix>site rules."""
79 await self.invoke(ctx, "site rules")
80
81 @command(name="reload", hidden=True)
82 async def cogs_reload_alias(self, ctx: Context, *, cog_name: str) -> None:
83 """Alias for invoking <prefix>cogs reload [cog_name]."""
84 await self.invoke(ctx, "cogs reload", cog_name)
85
86 @command(name="defon", hidden=True)
87 async def defcon_enable_alias(self, ctx: Context) -> None:
88 """Alias for invoking <prefix>defcon enable."""
89 await self.invoke(ctx, "defcon enable")
90
91 @command(name="defoff", hidden=True)
92 async def defcon_disable_alias(self, ctx: Context) -> None:
93 """Alias for invoking <prefix>defcon disable."""
94 await self.invoke(ctx, "defcon disable")
95
96 @command(name="exception", hidden=True)
97 async def tags_get_traceback_alias(self, ctx: Context) -> None:
98 """Alias for invoking <prefix>tags get traceback."""
99 await self.invoke(ctx, "tags get", tag_name="traceback")
100
101 @group(name="get",
102 aliases=("show", "g"),
103 hidden=True,
104 invoke_without_command=True)
105 async def get_group_alias(self, ctx: Context) -> None:
106 """Group for reverse aliases for commands like `tags get`, allowing for `get tags` or `get docs`."""
107 pass
108
109 @get_group_alias.command(name="tags", aliases=("tag", "t"), hidden=True)
110 async def tags_get_alias(
111 self, ctx: Context, *, tag_name: TagNameConverter = None
112 ) -> None:
113 """
114 Alias for invoking <prefix>tags get [tag_name].
115
116 tag_name: str - tag to be viewed.
117 """
118 await self.invoke(ctx, "tags get", tag_name=tag_name)
119
120 @get_group_alias.command(name="docs", aliases=("doc", "d"), hidden=True)
121 async def docs_get_alias(
122 self, ctx: Context, symbol: clean_content = None
123 ) -> None:
124 """Alias for invoking <prefix>docs get [symbol]."""
125 await self.invoke(ctx, "docs get", symbol)
126
127 @command(name="nominate", hidden=True)
128 async def nomination_add_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:
129 """Alias for invoking <prefix>talentpool add [user] [reason]."""
130 await self.invoke(ctx, "talentpool add", user, reason=reason)
131
132 @command(name="unnominate", hidden=True)
133 async def nomination_end_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:
134 """Alias for invoking <prefix>nomination end [user] [reason]."""
135 await self.invoke(ctx, "nomination end", user, reason=reason)
136
137 @command(name="nominees", hidden=True)
138 async def nominees_alias(self, ctx: Context) -> None:
139 """Alias for invoking <prefix>tp watched."""
140 await self.invoke(ctx, "talentpool watched")
141
142
143 def setup(bot: Bot) -> None:
144 """Alias cog load."""
145 bot.add_cog(Alias(bot))
146 log.info("Cog loaded: Alias")
147
[end of bot/cogs/alias.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py
--- a/bot/cogs/alias.py
+++ b/bot/cogs/alias.py
@@ -53,6 +53,11 @@
"""Alias for invoking <prefix>site resources."""
await self.invoke(ctx, "site resources")
+ @command(name="tools", hidden=True)
+ async def site_tools_alias(self, ctx: Context) -> None:
+ """Alias for invoking <prefix>site tools."""
+ await self.invoke(ctx, "site tools")
+
@command(name="watch", hidden=True)
async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:
"""Alias for invoking <prefix>bigbrother watch [user] [reason]."""
diff --git a/bot/cogs/site.py b/bot/cogs/site.py
--- a/bot/cogs/site.py
+++ b/bot/cogs/site.py
@@ -44,17 +44,29 @@
async def site_resources(self, ctx: Context) -> None:
"""Info about the site's Resources page."""
learning_url = f"{PAGES_URL}/resources"
- tools_url = f"{PAGES_URL}/tools"
- embed = Embed(title="Resources & Tools")
- embed.set_footer(text=f"{learning_url} | {tools_url}")
+ embed = Embed(title="Resources")
+ embed.set_footer(text=f"{learning_url}")
embed.colour = Colour.blurple()
embed.description = (
f"The [Resources page]({learning_url}) on our website contains a "
- "list of hand-selected goodies that we regularly recommend "
- f"to both beginners and experts. The [Tools page]({tools_url}) "
- "contains a couple of the most popular tools for programming in "
- "Python."
+ "list of hand-selected learning resources that we regularly recommend "
+ f"to both beginners and experts."
+ )
+
+ await ctx.send(embed=embed)
+
+ @site_group.command(name="tools")
+ async def site_tools(self, ctx: Context) -> None:
+ """Info about the site's Tools page."""
+ tools_url = f"{PAGES_URL}/tools"
+
+ embed = Embed(title="Tools")
+ embed.set_footer(text=f"{tools_url}")
+ embed.colour = Colour.blurple()
+ embed.description = (
+ f"The [Tools page]({tools_url}) on our website contains a "
+ f"couple of the most popular tools for programming in Python."
)
await ctx.send(embed=embed)
|
{"golden_diff": "diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py\n--- a/bot/cogs/alias.py\n+++ b/bot/cogs/alias.py\n@@ -53,6 +53,11 @@\n \"\"\"Alias for invoking <prefix>site resources.\"\"\"\n await self.invoke(ctx, \"site resources\")\n \n+ @command(name=\"tools\", hidden=True)\n+ async def site_tools_alias(self, ctx: Context) -> None:\n+ \"\"\"Alias for invoking <prefix>site tools.\"\"\"\n+ await self.invoke(ctx, \"site tools\")\n+\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother watch [user] [reason].\"\"\"\ndiff --git a/bot/cogs/site.py b/bot/cogs/site.py\n--- a/bot/cogs/site.py\n+++ b/bot/cogs/site.py\n@@ -44,17 +44,29 @@\n async def site_resources(self, ctx: Context) -> None:\n \"\"\"Info about the site's Resources page.\"\"\"\n learning_url = f\"{PAGES_URL}/resources\"\n- tools_url = f\"{PAGES_URL}/tools\"\n \n- embed = Embed(title=\"Resources & Tools\")\n- embed.set_footer(text=f\"{learning_url} | {tools_url}\")\n+ embed = Embed(title=\"Resources\")\n+ embed.set_footer(text=f\"{learning_url}\")\n embed.colour = Colour.blurple()\n embed.description = (\n f\"The [Resources page]({learning_url}) on our website contains a \"\n- \"list of hand-selected goodies that we regularly recommend \"\n- f\"to both beginners and experts. The [Tools page]({tools_url}) \"\n- \"contains a couple of the most popular tools for programming in \"\n- \"Python.\"\n+ \"list of hand-selected learning resources that we regularly recommend \"\n+ f\"to both beginners and experts.\"\n+ )\n+\n+ await ctx.send(embed=embed)\n+\n+ @site_group.command(name=\"tools\")\n+ async def site_tools(self, ctx: Context) -> None:\n+ \"\"\"Info about the site's Tools page.\"\"\"\n+ tools_url = f\"{PAGES_URL}/tools\"\n+\n+ embed = Embed(title=\"Tools\")\n+ embed.set_footer(text=f\"{tools_url}\")\n+ embed.colour = Colour.blurple()\n+ embed.description = (\n+ f\"The [Tools page]({tools_url}) on our website contains a \"\n+ f\"couple of the most popular tools for programming in Python.\"\n )\n \n await ctx.send(embed=embed)\n", "issue": "Aliasing for !resources command.\nCurrently the !resources command links to both the [PyDis resource](https://pythondiscord.com/pages/resources/\r\n) page, and the [PyDis tools](https://pythondiscord.com/pages/tools/) page.\r\n\r\nI feel that the alias of !tools should be added to the command, or the links should be separated into two commands, to both make it easier for users to access the tools page, but also increase awareness of the page.\n", "before_files": [{"content": "import logging\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, STAFF_ROLES, URLs\nfrom bot.decorators import redirect_output\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nPAGES_URL = f\"{URLs.site_schema}{URLs.site}/pages\"\n\n\nclass Site(Cog):\n \"\"\"Commands for linking to different parts of the site.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @group(name=\"site\", aliases=(\"s\",), invoke_without_command=True)\n async def site_group(self, ctx: Context) -> None:\n \"\"\"Commands for getting info about our website.\"\"\"\n await ctx.invoke(self.bot.get_command(\"help\"), \"site\")\n\n @site_group.command(name=\"home\", aliases=(\"about\",))\n async def site_main(self, ctx: Context) -> None:\n \"\"\"Info about the website itself.\"\"\"\n url = f\"{URLs.site_schema}{URLs.site}/\"\n\n embed = Embed(title=\"Python Discord website\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n f\"[Our official website]({url}) is an open-source community project \"\n \"created with Python and Flask. It contains information about the server \"\n \"itself, lets you sign up for upcoming events, has its own wiki, contains \"\n \"a list of valuable learning resources, and much more.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"resources\")\n async def site_resources(self, ctx: Context) -> None:\n \"\"\"Info about the site's Resources page.\"\"\"\n learning_url = f\"{PAGES_URL}/resources\"\n tools_url = f\"{PAGES_URL}/tools\"\n\n embed = Embed(title=\"Resources & Tools\")\n embed.set_footer(text=f\"{learning_url} | {tools_url}\")\n embed.colour = Colour.blurple()\n embed.description = (\n f\"The [Resources page]({learning_url}) on our website contains a \"\n \"list of hand-selected goodies that we regularly recommend \"\n f\"to both beginners and experts. The [Tools page]({tools_url}) \"\n \"contains a couple of the most popular tools for programming in \"\n \"Python.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"help\")\n async def site_help(self, ctx: Context) -> None:\n \"\"\"Info about the site's Getting Help page.\"\"\"\n url = f\"{PAGES_URL}/asking-good-questions\"\n\n embed = Embed(title=\"Asking Good Questions\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n \"Asking the right question about something that's new to you can sometimes be tricky. \"\n f\"To help with this, we've created a [guide to asking good questions]({url}) on our website. \"\n \"It contains everything you need to get the very best help from our community.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"faq\")\n async def site_faq(self, ctx: Context) -> None:\n \"\"\"Info about the site's FAQ page.\"\"\"\n url = f\"{PAGES_URL}/frequently-asked-questions\"\n\n embed = Embed(title=\"FAQ\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n \"As the largest Python community on Discord, we get hundreds of questions every day. \"\n \"Many of these questions have been asked before. We've compiled a list of the most \"\n \"frequently asked questions along with their answers, which can be found on \"\n f\"our [FAQ page]({url}).\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(aliases=['r', 'rule'], name='rules')\n @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)\n async def site_rules(self, ctx: Context, *rules: int) -> None:\n \"\"\"Provides a link to all rules or, if specified, displays specific rule(s).\"\"\"\n rules_embed = Embed(title='Rules', color=Colour.blurple())\n rules_embed.url = f\"{PAGES_URL}/rules\"\n\n if not rules:\n # Rules were not submitted. Return the default description.\n rules_embed.description = (\n \"The rules and guidelines that apply to this community can be found on\"\n f\" our [rules page]({PAGES_URL}/rules). We expect\"\n \" all members of the community to have read and understood these.\"\n )\n\n await ctx.send(embed=rules_embed)\n return\n\n full_rules = await self.bot.api_client.get('rules', params={'link_format': 'md'})\n invalid_indices = tuple(\n pick\n for pick in rules\n if pick < 0 or pick >= len(full_rules)\n )\n\n if invalid_indices:\n indices = ', '.join(map(str, invalid_indices))\n await ctx.send(f\":x: Invalid rule indices {indices}\")\n return\n\n final_rules = tuple(f\"**{pick}.** {full_rules[pick]}\" for pick in rules)\n\n await LinePaginator.paginate(final_rules, ctx, rules_embed, max_lines=3)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Site cog load.\"\"\"\n bot.add_cog(Site(bot))\n log.info(\"Cog loaded: Site\")\n", "path": "bot/cogs/site.py"}, {"content": "import inspect\nimport logging\nfrom typing import Union\n\nfrom discord import Colour, Embed, Member, User\nfrom discord.ext.commands import Bot, Cog, Command, Context, clean_content, command, group\n\nfrom bot.cogs.watchchannels.watchchannel import proxy_user\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Alias (Cog):\n \"\"\"Aliases for commonly used commands.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n async def invoke(self, ctx: Context, cmd_name: str, *args, **kwargs) -> None:\n \"\"\"Invokes a command with args and kwargs.\"\"\"\n log.debug(f\"{cmd_name} was invoked through an alias\")\n cmd = self.bot.get_command(cmd_name)\n if not cmd:\n return log.warning(f'Did not find command \"{cmd_name}\" to invoke.')\n elif not await cmd.can_run(ctx):\n return log.warning(\n f'{str(ctx.author)} tried to run the command \"{cmd_name}\"'\n )\n\n await ctx.invoke(cmd, *args, **kwargs)\n\n @command(name='aliases')\n async def aliases_command(self, ctx: Context) -> None:\n \"\"\"Show configured aliases on the bot.\"\"\"\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"\u2022 `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )\n\n @command(name=\"resources\", aliases=(\"resource\",), hidden=True)\n async def site_resources_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site resources.\"\"\"\n await self.invoke(ctx, \"site resources\")\n\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother watch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother watch\", user, reason=reason)\n\n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother unwatch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother unwatch\", user, reason=reason)\n\n @command(name=\"home\", hidden=True)\n async def site_home_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site home.\"\"\"\n await self.invoke(ctx, \"site home\")\n\n @command(name=\"faq\", hidden=True)\n async def site_faq_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site faq.\"\"\"\n await self.invoke(ctx, \"site faq\")\n\n @command(name=\"rules\", hidden=True)\n async def site_rules_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site rules.\"\"\"\n await self.invoke(ctx, \"site rules\")\n\n @command(name=\"reload\", hidden=True)\n async def cogs_reload_alias(self, ctx: Context, *, cog_name: str) -> None:\n \"\"\"Alias for invoking <prefix>cogs reload [cog_name].\"\"\"\n await self.invoke(ctx, \"cogs reload\", cog_name)\n\n @command(name=\"defon\", hidden=True)\n async def defcon_enable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon enable.\"\"\"\n await self.invoke(ctx, \"defcon enable\")\n\n @command(name=\"defoff\", hidden=True)\n async def defcon_disable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon disable.\"\"\"\n await self.invoke(ctx, \"defcon disable\")\n\n @command(name=\"exception\", hidden=True)\n async def tags_get_traceback_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tags get traceback.\"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=\"traceback\")\n\n @group(name=\"get\",\n aliases=(\"show\", \"g\"),\n hidden=True,\n invoke_without_command=True)\n async def get_group_alias(self, ctx: Context) -> None:\n \"\"\"Group for reverse aliases for commands like `tags get`, allowing for `get tags` or `get docs`.\"\"\"\n pass\n\n @get_group_alias.command(name=\"tags\", aliases=(\"tag\", \"t\"), hidden=True)\n async def tags_get_alias(\n self, ctx: Context, *, tag_name: TagNameConverter = None\n ) -> None:\n \"\"\"\n Alias for invoking <prefix>tags get [tag_name].\n\n tag_name: str - tag to be viewed.\n \"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=tag_name)\n\n @get_group_alias.command(name=\"docs\", aliases=(\"doc\", \"d\"), hidden=True)\n async def docs_get_alias(\n self, ctx: Context, symbol: clean_content = None\n ) -> None:\n \"\"\"Alias for invoking <prefix>docs get [symbol].\"\"\"\n await self.invoke(ctx, \"docs get\", symbol)\n\n @command(name=\"nominate\", hidden=True)\n async def nomination_add_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>talentpool add [user] [reason].\"\"\"\n await self.invoke(ctx, \"talentpool add\", user, reason=reason)\n\n @command(name=\"unnominate\", hidden=True)\n async def nomination_end_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>nomination end [user] [reason].\"\"\"\n await self.invoke(ctx, \"nomination end\", user, reason=reason)\n\n @command(name=\"nominees\", hidden=True)\n async def nominees_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tp watched.\"\"\"\n await self.invoke(ctx, \"talentpool watched\")\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Alias cog load.\"\"\"\n bot.add_cog(Alias(bot))\n log.info(\"Cog loaded: Alias\")\n", "path": "bot/cogs/alias.py"}]}
| 3,897 | 588 |
gh_patches_debug_31054
|
rasdani/github-patches
|
git_diff
|
robocorp__rpaframework-662
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`RPA.core.webdriver` used by Selenium can't download latest Mac arm64 webdriver
💻 System affected: M1/M2 Macs with no working `chromedriver` available in PATH under Chrome browser version **106.0.5249.61** and onwards.
[Fix](https://github.com/SergeyPirogov/webdriver_manager/issues/446#issuecomment-1274558712) is already merged upstream but has not yet been released in PyPI.
### Reproduce
```python
inv code.test-robot -r browser -t "Open Browser With Dict Options"
```
### Solution
The desired fix would be to bump the `webdriver-manager` package `>3.8.3` once such version gets released.
### Workaround
Meanwhile, we can implement the [workaround](https://github.com/SergeyPirogov/webdriver_manager/pull/445/files#diff-38d3c158b7a31027012330b5537df014f3ec50ffa657a2787d449bd3e3c9367bL32-R33) ourselves too as described above (`mac64_m1` -> `mac_arm64`) so we can hit the right web driver download URL.
</issue>
<code>
[start of packages/core/src/RPA/core/webdriver.py]
1 import contextlib
2 import logging
3 import os
4 import platform
5 import stat
6 from pathlib import Path
7 from typing import Optional
8
9 import requests
10 from requests import Response
11 from selenium import webdriver
12 from selenium.webdriver.common.service import Service
13 from selenium.webdriver.remote.webdriver import WebDriver
14 from webdriver_manager.chrome import ChromeDriverManager
15 from webdriver_manager.core.download_manager import WDMDownloadManager
16 from webdriver_manager.core.http import WDMHttpClient
17 from webdriver_manager.core.manager import DriverManager
18 from webdriver_manager.core.utils import os_name as get_os_name
19 from webdriver_manager.firefox import GeckoDriverManager
20 from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager
21 from webdriver_manager.opera import OperaDriverManager
22
23 from RPA.core.robocorp import robocorp_home
24
25
26 LOGGER = logging.getLogger(__name__)
27
28 DRIVER_ROOT = robocorp_home() / "webdrivers"
29 DRIVER_PREFERENCE = {
30 "Windows": ["Chrome", "Firefox", "ChromiumEdge"],
31 "Linux": ["Chrome", "Firefox", "ChromiumEdge"],
32 "Darwin": ["Chrome", "Firefox", "ChromiumEdge", "Safari"],
33 "default": ["Chrome", "Firefox"],
34 }
35 AVAILABLE_DRIVERS = {
36 # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.
37 "chrome": ChromeDriverManager,
38 "firefox": GeckoDriverManager,
39 "gecko": GeckoDriverManager,
40 "mozilla": GeckoDriverManager,
41 # NOTE: Selenium 4 dropped support for Opera.
42 # (https://github.com/SeleniumHQ/selenium/issues/10835)
43 "opera": OperaDriverManager,
44 # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.
45 "edge": EdgeChromiumDriverManager,
46 "chromiumedge": EdgeChromiumDriverManager,
47 # NOTE: IE is discontinued and not supported/encouraged anymore.
48 "ie": IEDriverManager,
49 }
50
51
52 class Downloader(WDMHttpClient):
53
54 """Custom downloader which disables download progress reporting."""
55
56 def get(self, url, **kwargs) -> Response:
57 resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)
58 self.validate_response(resp)
59 return resp
60
61
62 @contextlib.contextmanager
63 def suppress_logging():
64 """Suppress webdriver-manager logging."""
65 wdm_log = "WDM_LOG"
66 original_value = os.getenv(wdm_log, "")
67 try:
68 os.environ[wdm_log] = str(logging.NOTSET)
69 yield
70 finally:
71 os.environ[wdm_log] = original_value
72
73
74 def start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:
75 """Start a webdriver with the given options."""
76 browser = browser.strip()
77 webdriver_factory = getattr(webdriver, browser, None)
78 if not webdriver_factory:
79 raise ValueError(f"Unsupported browser: {browser}")
80
81 # NOTE: It is recommended to pass a `service` rather than deprecated `options`.
82 driver = webdriver_factory(service=service, **options)
83 return driver
84
85
86 def _to_manager(browser: str, root: Path = DRIVER_ROOT) -> DriverManager:
87 browser = browser.strip()
88 manager_factory = AVAILABLE_DRIVERS.get(browser.lower())
89 if not manager_factory:
90 raise ValueError(
91 f"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})"
92 )
93
94 download_manager = WDMDownloadManager(Downloader())
95 manager = manager_factory(path=str(root), download_manager=download_manager)
96 return manager
97
98
99 def _set_executable(path: str) -> None:
100 st = os.stat(path)
101 os.chmod(
102 path,
103 st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,
104 )
105
106
107 def download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:
108 """Download a webdriver binary for the given browser and return the path to it."""
109 manager = _to_manager(browser, root)
110 driver = manager.driver
111 resolved_os = getattr(driver, "os_type", driver.get_os_type())
112 os_name = get_os_name()
113 if os_name not in resolved_os:
114 LOGGER.warning(
115 "Attempting to download incompatible driver for OS %r on OS %r! Skip",
116 resolved_os,
117 os_name,
118 )
119 return None # incompatible driver download attempt
120
121 with suppress_logging():
122 path: str = manager.install()
123 if platform.system() != "Windows":
124 _set_executable(path)
125 LOGGER.debug("Downloaded webdriver to: %s", path)
126 return path
127
[end of packages/core/src/RPA/core/webdriver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/packages/core/src/RPA/core/webdriver.py b/packages/core/src/RPA/core/webdriver.py
--- a/packages/core/src/RPA/core/webdriver.py
+++ b/packages/core/src/RPA/core/webdriver.py
@@ -7,6 +7,7 @@
from typing import Optional
import requests
+from packaging import version
from requests import Response
from selenium import webdriver
from selenium.webdriver.common.service import Service
@@ -53,7 +54,23 @@
"""Custom downloader which disables download progress reporting."""
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.driver = None
+
+ def _fix_mac_arm_url(self, url) -> str:
+ if "m1" not in self.driver.get_os_type():
+ return url
+
+ # FIXME(cmin764): Remove this when the issue below gets closed
+ # https://github.com/SergeyPirogov/webdriver_manager/issues/446
+ browser_version = self.driver.get_version()
+ if version.parse(browser_version) >= version.parse("106.0.5249.61"):
+ url = url.replace("mac64_m1", "mac_arm64")
+ return url
+
def get(self, url, **kwargs) -> Response:
+ url = self._fix_mac_arm_url(url)
resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)
self.validate_response(resp)
return resp
@@ -91,8 +108,10 @@
f"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})"
)
- download_manager = WDMDownloadManager(Downloader())
+ downloader = Downloader()
+ download_manager = WDMDownloadManager(downloader)
manager = manager_factory(path=str(root), download_manager=download_manager)
+ downloader.driver = manager.driver
return manager
|
{"golden_diff": "diff --git a/packages/core/src/RPA/core/webdriver.py b/packages/core/src/RPA/core/webdriver.py\n--- a/packages/core/src/RPA/core/webdriver.py\n+++ b/packages/core/src/RPA/core/webdriver.py\n@@ -7,6 +7,7 @@\n from typing import Optional\n \n import requests\n+from packaging import version\n from requests import Response\n from selenium import webdriver\n from selenium.webdriver.common.service import Service\n@@ -53,7 +54,23 @@\n \n \"\"\"Custom downloader which disables download progress reporting.\"\"\"\n \n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.driver = None\n+\n+ def _fix_mac_arm_url(self, url) -> str:\n+ if \"m1\" not in self.driver.get_os_type():\n+ return url\n+\n+ # FIXME(cmin764): Remove this when the issue below gets closed\n+ # https://github.com/SergeyPirogov/webdriver_manager/issues/446\n+ browser_version = self.driver.get_version()\n+ if version.parse(browser_version) >= version.parse(\"106.0.5249.61\"):\n+ url = url.replace(\"mac64_m1\", \"mac_arm64\")\n+ return url\n+\n def get(self, url, **kwargs) -> Response:\n+ url = self._fix_mac_arm_url(url)\n resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)\n self.validate_response(resp)\n return resp\n@@ -91,8 +108,10 @@\n f\"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})\"\n )\n \n- download_manager = WDMDownloadManager(Downloader())\n+ downloader = Downloader()\n+ download_manager = WDMDownloadManager(downloader)\n manager = manager_factory(path=str(root), download_manager=download_manager)\n+ downloader.driver = manager.driver\n return manager\n", "issue": "`RPA.core.webdriver` used by Selenium can't download latest Mac arm64 webdriver\n\ud83d\udcbb System affected: M1/M2 Macs with no working `chromedriver` available in PATH under Chrome browser version **106.0.5249.61** and onwards.\r\n\r\n[Fix](https://github.com/SergeyPirogov/webdriver_manager/issues/446#issuecomment-1274558712) is already merged upstream but has not yet been released in PyPI.\r\n\r\n### Reproduce\r\n\r\n```python\r\ninv code.test-robot -r browser -t \"Open Browser With Dict Options\"\r\n```\r\n\r\n### Solution\r\nThe desired fix would be to bump the `webdriver-manager` package `>3.8.3` once such version gets released.\r\n\r\n### Workaround\r\nMeanwhile, we can implement the [workaround](https://github.com/SergeyPirogov/webdriver_manager/pull/445/files#diff-38d3c158b7a31027012330b5537df014f3ec50ffa657a2787d449bd3e3c9367bL32-R33) ourselves too as described above (`mac64_m1` -> `mac_arm64`) so we can hit the right web driver download URL.\n", "before_files": [{"content": "import contextlib\nimport logging\nimport os\nimport platform\nimport stat\nfrom pathlib import Path\nfrom typing import Optional\n\nimport requests\nfrom requests import Response\nfrom selenium import webdriver\nfrom selenium.webdriver.common.service import Service\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.core.download_manager import WDMDownloadManager\nfrom webdriver_manager.core.http import WDMHttpClient\nfrom webdriver_manager.core.manager import DriverManager\nfrom webdriver_manager.core.utils import os_name as get_os_name\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager\nfrom webdriver_manager.opera import OperaDriverManager\n\nfrom RPA.core.robocorp import robocorp_home\n\n\nLOGGER = logging.getLogger(__name__)\n\nDRIVER_ROOT = robocorp_home() / \"webdrivers\"\nDRIVER_PREFERENCE = {\n \"Windows\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\"],\n \"Linux\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\"],\n \"Darwin\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\", \"Safari\"],\n \"default\": [\"Chrome\", \"Firefox\"],\n}\nAVAILABLE_DRIVERS = {\n # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.\n \"chrome\": ChromeDriverManager,\n \"firefox\": GeckoDriverManager,\n \"gecko\": GeckoDriverManager,\n \"mozilla\": GeckoDriverManager,\n # NOTE: Selenium 4 dropped support for Opera.\n # (https://github.com/SeleniumHQ/selenium/issues/10835)\n \"opera\": OperaDriverManager,\n # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.\n \"edge\": EdgeChromiumDriverManager,\n \"chromiumedge\": EdgeChromiumDriverManager,\n # NOTE: IE is discontinued and not supported/encouraged anymore.\n \"ie\": IEDriverManager,\n}\n\n\nclass Downloader(WDMHttpClient):\n\n \"\"\"Custom downloader which disables download progress reporting.\"\"\"\n\n def get(self, url, **kwargs) -> Response:\n resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)\n self.validate_response(resp)\n return resp\n\n\[email protected]\ndef suppress_logging():\n \"\"\"Suppress webdriver-manager logging.\"\"\"\n wdm_log = \"WDM_LOG\"\n original_value = os.getenv(wdm_log, \"\")\n try:\n os.environ[wdm_log] = str(logging.NOTSET)\n yield\n finally:\n os.environ[wdm_log] = original_value\n\n\ndef start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:\n \"\"\"Start a webdriver with the given options.\"\"\"\n browser = browser.strip()\n webdriver_factory = getattr(webdriver, browser, None)\n if not webdriver_factory:\n raise ValueError(f\"Unsupported browser: {browser}\")\n\n # NOTE: It is recommended to pass a `service` rather than deprecated `options`.\n driver = webdriver_factory(service=service, **options)\n return driver\n\n\ndef _to_manager(browser: str, root: Path = DRIVER_ROOT) -> DriverManager:\n browser = browser.strip()\n manager_factory = AVAILABLE_DRIVERS.get(browser.lower())\n if not manager_factory:\n raise ValueError(\n f\"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})\"\n )\n\n download_manager = WDMDownloadManager(Downloader())\n manager = manager_factory(path=str(root), download_manager=download_manager)\n return manager\n\n\ndef _set_executable(path: str) -> None:\n st = os.stat(path)\n os.chmod(\n path,\n st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,\n )\n\n\ndef download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:\n \"\"\"Download a webdriver binary for the given browser and return the path to it.\"\"\"\n manager = _to_manager(browser, root)\n driver = manager.driver\n resolved_os = getattr(driver, \"os_type\", driver.get_os_type())\n os_name = get_os_name()\n if os_name not in resolved_os:\n LOGGER.warning(\n \"Attempting to download incompatible driver for OS %r on OS %r! Skip\",\n resolved_os,\n os_name,\n )\n return None # incompatible driver download attempt\n\n with suppress_logging():\n path: str = manager.install()\n if platform.system() != \"Windows\":\n _set_executable(path)\n LOGGER.debug(\"Downloaded webdriver to: %s\", path)\n return path\n", "path": "packages/core/src/RPA/core/webdriver.py"}]}
| 2,104 | 441 |
gh_patches_debug_7037
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1080
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception raised with sizeof of some spans
With `master`:
```
Exception in thread AgentWriter:
Traceback (most recent call last):
File "/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/_worker.py", line 67, in _target
self.run_periodic()
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/internal/writer.py", line 65, in flush_queue
traces_queue_size = sum(map(sizeof.sizeof, traces))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 31, in sizeof
return _sizeof(o)
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 8, in <genexpr>
return (getattr(o, slot) for slot in o.__slots__)
AttributeError: rate_limit
```
Might be an issue where the attribute is declared in `__slots__` but not set.
</issue>
<code>
[start of ddtrace/utils/sizeof.py]
1 import collections
2 import sys
3 from itertools import chain
4
5
6 def iter_object(o):
7 if hasattr(o, '__slots__'):
8 return (getattr(o, slot) for slot in o.__slots__)
9 elif hasattr(o, '__dict__'):
10 return list(o.__dict__.items())
11 elif isinstance(o, dict):
12 # Make a copy to avoid corruption
13 return chain.from_iterable(list(o.items()))
14 elif isinstance(o, (list, set, frozenset, tuple, collections.deque)):
15 # Make a copy to avoid corruption
16 return iter(list(o))
17 return []
18
19
20 def sizeof(o):
21 """Returns the approximate memory footprint an object and all of its contents."""
22 seen = set()
23
24 def _sizeof(o):
25 # do not double count the same object
26 if id(o) in seen:
27 return 0
28 seen.add(id(o))
29 return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
30
31 return _sizeof(o)
32
[end of ddtrace/utils/sizeof.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/utils/sizeof.py b/ddtrace/utils/sizeof.py
--- a/ddtrace/utils/sizeof.py
+++ b/ddtrace/utils/sizeof.py
@@ -2,10 +2,16 @@
import sys
from itertools import chain
+_UNSET = object()
+
def iter_object(o):
if hasattr(o, '__slots__'):
- return (getattr(o, slot) for slot in o.__slots__)
+ return (
+ s
+ for s in (getattr(o, slot, _UNSET) for slot in o.__slots__)
+ if s != _UNSET
+ )
elif hasattr(o, '__dict__'):
return list(o.__dict__.items())
elif isinstance(o, dict):
|
{"golden_diff": "diff --git a/ddtrace/utils/sizeof.py b/ddtrace/utils/sizeof.py\n--- a/ddtrace/utils/sizeof.py\n+++ b/ddtrace/utils/sizeof.py\n@@ -2,10 +2,16 @@\n import sys\n from itertools import chain\n \n+_UNSET = object()\n+\n \n def iter_object(o):\n if hasattr(o, '__slots__'):\n- return (getattr(o, slot) for slot in o.__slots__)\n+ return (\n+ s\n+ for s in (getattr(o, slot, _UNSET) for slot in o.__slots__)\n+ if s != _UNSET\n+ )\n elif hasattr(o, '__dict__'):\n return list(o.__dict__.items())\n elif isinstance(o, dict):\n", "issue": "Exception raised with sizeof of some spans\nWith `master`:\r\n\r\n```\r\nException in thread AgentWriter:\r\nTraceback (most recent call last):\r\n File \"/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py\", line 801, in __bootstrap_inner\r\n self.run()\r\n File \"/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py\", line 754, in run\r\n self.__target(*self.__args, **self.__kwargs)\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/_worker.py\", line 67, in _target\r\n self.run_periodic()\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/internal/writer.py\", line 65, in flush_queue\r\n traces_queue_size = sum(map(sizeof.sizeof, traces))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 31, in sizeof\r\n return _sizeof(o)\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 8, in <genexpr>\r\n return (getattr(o, slot) for slot in o.__slots__)\r\nAttributeError: rate_limit\r\n```\r\n\r\nMight be an issue where the attribute is declared in `__slots__` but not set.\n", "before_files": [{"content": "import collections\nimport sys\nfrom itertools import chain\n\n\ndef iter_object(o):\n if hasattr(o, '__slots__'):\n return (getattr(o, slot) for slot in o.__slots__)\n elif hasattr(o, '__dict__'):\n return list(o.__dict__.items())\n elif isinstance(o, dict):\n # Make a copy to avoid corruption\n return chain.from_iterable(list(o.items()))\n elif isinstance(o, (list, set, frozenset, tuple, collections.deque)):\n # Make a copy to avoid corruption\n return iter(list(o))\n return []\n\n\ndef sizeof(o):\n \"\"\"Returns the approximate memory footprint an object and all of its contents.\"\"\"\n seen = set()\n\n def _sizeof(o):\n # do not double count the same object\n if id(o) in seen:\n return 0\n seen.add(id(o))\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\n\n return _sizeof(o)\n", "path": "ddtrace/utils/sizeof.py"}]}
| 1,345 | 161 |
gh_patches_debug_29780
|
rasdani/github-patches
|
git_diff
|
apluslms__a-plus-1045
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problems with Enrollment questionnaire and SISU enrollments
Quite recently a new "PENDING" enrollment state was added to support enrollment questionnaires with courses where students are enrolled from SISU. On summer Y1 course this feature was first time in use in production, but does not appear to work properly. Students are set to PENDING state properly when fetched from SISU, but are not redirected to enrollment questionnaire properly as they should. Instead they get "You must enrol on this course through Student Information System." note which should not be shown in this case.
</issue>
<code>
[start of course/viewbase.py]
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied
3 from django.http import Http404
4 from django.shortcuts import get_object_or_404, redirect, render
5 from django.utils import translation
6 from django.utils.translation import gettext_lazy as _
7 from django.utils.translation import get_language, get_language_info
8
9 from authorization.permissions import ACCESS
10 from exercise.cache.content import CachedContent
11 from lib.helpers import remove_query_param_from_url, update_url_params
12 from lib.viewbase import BaseTemplateView
13 from userprofile.viewbase import UserProfileMixin
14 from .cache.students import CachedStudent
15 from .exceptions import TranslationNotFound
16 from .permissions import (
17 CourseVisiblePermission,
18 CourseModulePermission,
19 )
20 from .models import Course, CourseInstance, CourseModule, UserTagging
21
22
23 class CourseMixin(UserProfileMixin):
24 course_kw = "course_slug"
25
26 def get_resource_objects(self):
27 super().get_resource_objects()
28 self.course = get_object_or_404(
29 Course,
30 url=self._get_kwarg(self.course_kw)
31 )
32 self.note("course")
33
34
35 class CourseBaseView(CourseMixin, BaseTemplateView):
36 pass
37
38
39 class CourseInstanceBaseMixin(object):
40 course_kw = CourseMixin.course_kw
41 instance_kw = "instance_slug"
42 course_permission_classes = (
43 CourseVisiblePermission,
44 )
45
46 def get_permissions(self):
47 perms = super().get_permissions()
48 perms.extend((Perm() for Perm in self.course_permission_classes))
49 return perms
50
51 # get_course_instance_object
52
53 def get_resource_objects(self):
54 super().get_resource_objects()
55 user = self.request.user
56 instance = self.get_course_instance_object()
57 if instance is not None:
58 self.instance = instance
59 self.course = self.instance.course
60 self.content = CachedContent(self.instance)
61 self.user_course_data = None
62 is_real_user = user.is_authenticated and not user.is_anonymous
63 if is_real_user:
64 self.user_course_data = self.instance.get_enrollment_for(user)
65 self.is_student = self.instance.is_student(user)
66 self.is_assistant = self.instance.is_assistant(user)
67 self.is_teacher = self.instance.is_teacher(user)
68 self.is_course_staff = self.is_teacher or self.is_assistant
69 self.get_taggings = lambda: CachedStudent(instance, user.id).data['tag_slugs']
70 self.url_without_language = remove_query_param_from_url(self.request.get_full_path(), 'hl')
71 self.query_language = None
72 self.user_language = None
73
74 self.note(
75 "course", "instance", "content", "user_course_data", "is_student", "is_assistant",
76 "is_teacher", "is_course_staff", "get_taggings", "url_without_language",
77 "query_language", "user_language"
78 )
79
80 # Try to find a language that is defined for this course instance
81 # and apply it
82 if self.instance.language:
83 instance_languages = self.instance.language.strip('|').split('|')
84 instance_def_language = instance_languages[0]
85 instance_languages = set(instance_languages)
86
87 languages = []
88 if self.user_course_data and self.user_course_data.language:
89 languages.append(self.user_course_data.language)
90 if is_real_user and user.userprofile.language:
91 languages.append(user.userprofile.language)
92 languages.append(get_language())
93
94 query_language = self.request.GET.get('hl')
95 if query_language:
96 if query_language[:2] in instance_languages:
97 language = query_language
98 if languages:
99 self.user_language = languages[0]
100 if self.user_language[:2] != query_language[:2]:
101 self.query_language = query_language
102 else:
103 raise TranslationNotFound
104 else:
105 for lang in languages:
106 if lang[:2] in instance_languages:
107 language = lang
108 break
109 else:
110 language = instance_def_language
111
112 language = language[:2]
113 # Override request.LANGUAGE_CODE. It is set in lib/middleware.py
114 # (class LocaleMiddleware) based on the userprofile.language.
115 # The middleware can not easily access the course context and
116 # the language from the enrollment. That is fixed here.
117 self.request.LANGUAGE_CODE = language
118 translation.activate(language)
119
120 def get_access_mode(self):
121 access_mode = super().get_access_mode()
122
123 if hasattr(self, 'instance'):
124 # Loosen the access mode if instance is public
125 show_for = self.instance.view_content_to
126 is_public = show_for == CourseInstance.VIEW_ACCESS.PUBLIC
127 access_mode_student = access_mode in (ACCESS.STUDENT, ACCESS.ENROLL)
128 if is_public and access_mode_student:
129 access_mode = ACCESS.ANONYMOUS
130
131 return access_mode
132
133 def handle_exception(self, exc):
134 if isinstance(exc, TranslationNotFound):
135 instance_languages = self.instance.language.strip("|").split("|")
136 url = remove_query_param_from_url(self.request.get_full_path(), 'hl')
137 for i, lang in enumerate(instance_languages):
138 instance_languages[i] = {"name": get_language_info(lang)['name'], "url": update_url_params(url, {'hl' : lang})}
139 return render(self.request, '404.html', {'error_msg': str(exc), 'languages': instance_languages}, status=404)
140 return super().handle_exception(exc)
141
142 class CourseInstanceMixin(CourseInstanceBaseMixin, UserProfileMixin):
143 def get_course_instance_object(self) -> CourseInstance:
144 return get_object_or_404(
145 CourseInstance.objects.prefetch_related('tabs'),
146 url=self.kwargs[self.instance_kw],
147 course__url=self.kwargs[self.course_kw],
148 )
149
150 def handle_no_permission(self):
151 if (self.request.user.is_authenticated
152 and not self.is_student
153 and not self.is_course_staff
154 and self.get_access_mode() in [ACCESS.STUDENT, ACCESS.ENROLLED]
155 and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED):
156 # Redirect the user to the enrollment page instead of showing
157 # a 403 Forbidden error, if:
158 # - the user is signed in but not enrolled or staff
159 # - the page is not a teacher page (e.g. edit course)
160 # - the course is visible only to enrolled students
161 return redirect(self.instance.get_url('enroll'))
162 return super().handle_no_permission()
163
164
165 class CourseInstanceBaseView(CourseInstanceMixin, BaseTemplateView):
166 pass
167
168
169 class EnrollableViewMixin(CourseInstanceMixin):
170 access_mode = ACCESS.ENROLL
171
172 def get_common_objects(self):
173 self.enrolled = self.is_student
174 self.enrollable = (
175 self.profile
176 and self.instance.is_enrollable(self.profile.user)
177 )
178 self.note('enrolled', 'enrollable')
179
180
181 class CourseModuleBaseMixin(object):
182 module_kw = "module_slug"
183 module_permissions_classes = (
184 CourseModulePermission,
185 )
186
187 def get_permissions(self):
188 perms = super().get_permissions()
189 perms.extend((Perm() for Perm in self.module_permissions_classes))
190 return perms
191
192 # get_course_module_object
193
194 def get_resource_objects(self):
195 super().get_resource_objects()
196 self.module = self.get_course_module_object()
197 self.note("module")
198
199
200 class CourseModuleMixin(CourseModuleBaseMixin, CourseInstanceMixin):
201 def get_course_module_object(self):
202 return get_object_or_404(
203 CourseModule,
204 url=self.kwargs[self.module_kw],
205 course_instance=self.instance
206 )
207
208
209 class CourseModuleBaseView(CourseModuleMixin, BaseTemplateView):
210 pass
211
[end of course/viewbase.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/course/viewbase.py b/course/viewbase.py
--- a/course/viewbase.py
+++ b/course/viewbase.py
@@ -11,13 +11,14 @@
from lib.helpers import remove_query_param_from_url, update_url_params
from lib.viewbase import BaseTemplateView
from userprofile.viewbase import UserProfileMixin
+from exercise.models import LearningObject
from .cache.students import CachedStudent
from .exceptions import TranslationNotFound
from .permissions import (
CourseVisiblePermission,
CourseModulePermission,
)
-from .models import Course, CourseInstance, CourseModule, UserTagging
+from .models import Course, CourseInstance, CourseModule, UserTagging, Enrollment
class CourseMixin(UserProfileMixin):
@@ -158,6 +159,15 @@
# - the user is signed in but not enrolled or staff
# - the page is not a teacher page (e.g. edit course)
# - the course is visible only to enrolled students
+ #
+ # If SIS enrollment is applied and course requires enrollment questionnaire,
+ # redirect to the questionnaire instead.
+ enrollment = self.user_course_data
+ if enrollment and enrollment.status == Enrollment.ENROLLMENT_STATUS.PENDING:
+ exercise = LearningObject.objects.find_enrollment_exercise(
+ self.instance, self.profile.is_external)
+ if exercise:
+ return self.redirect(exercise.get_absolute_url())
return redirect(self.instance.get_url('enroll'))
return super().handle_no_permission()
|
{"golden_diff": "diff --git a/course/viewbase.py b/course/viewbase.py\n--- a/course/viewbase.py\n+++ b/course/viewbase.py\n@@ -11,13 +11,14 @@\n from lib.helpers import remove_query_param_from_url, update_url_params\n from lib.viewbase import BaseTemplateView\n from userprofile.viewbase import UserProfileMixin\n+from exercise.models import LearningObject\n from .cache.students import CachedStudent\n from .exceptions import TranslationNotFound\n from .permissions import (\n CourseVisiblePermission,\n CourseModulePermission,\n )\n-from .models import Course, CourseInstance, CourseModule, UserTagging\n+from .models import Course, CourseInstance, CourseModule, UserTagging, Enrollment\n \n \n class CourseMixin(UserProfileMixin):\n@@ -158,6 +159,15 @@\n # - the user is signed in but not enrolled or staff\n # - the page is not a teacher page (e.g. edit course)\n # - the course is visible only to enrolled students\n+ #\n+ # If SIS enrollment is applied and course requires enrollment questionnaire,\n+ # redirect to the questionnaire instead.\n+ enrollment = self.user_course_data\n+ if enrollment and enrollment.status == Enrollment.ENROLLMENT_STATUS.PENDING:\n+ exercise = LearningObject.objects.find_enrollment_exercise(\n+ self.instance, self.profile.is_external)\n+ if exercise:\n+ return self.redirect(exercise.get_absolute_url())\n return redirect(self.instance.get_url('enroll'))\n return super().handle_no_permission()\n", "issue": "Problems with Enrollment questionnaire and SISU enrollments\nQuite recently a new \"PENDING\" enrollment state was added to support enrollment questionnaires with courses where students are enrolled from SISU. On summer Y1 course this feature was first time in use in production, but does not appear to work properly. Students are set to PENDING state properly when fetched from SISU, but are not redirected to enrollment questionnaire properly as they should. Instead they get \"You must enrol on this course through Student Information System.\" note which should not be shown in this case.\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import translation\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import get_language, get_language_info\n\nfrom authorization.permissions import ACCESS\nfrom exercise.cache.content import CachedContent\nfrom lib.helpers import remove_query_param_from_url, update_url_params\nfrom lib.viewbase import BaseTemplateView\nfrom userprofile.viewbase import UserProfileMixin\nfrom .cache.students import CachedStudent\nfrom .exceptions import TranslationNotFound\nfrom .permissions import (\n CourseVisiblePermission,\n CourseModulePermission,\n)\nfrom .models import Course, CourseInstance, CourseModule, UserTagging\n\n\nclass CourseMixin(UserProfileMixin):\n course_kw = \"course_slug\"\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.course = get_object_or_404(\n Course,\n url=self._get_kwarg(self.course_kw)\n )\n self.note(\"course\")\n\n\nclass CourseBaseView(CourseMixin, BaseTemplateView):\n pass\n\n\nclass CourseInstanceBaseMixin(object):\n course_kw = CourseMixin.course_kw\n instance_kw = \"instance_slug\"\n course_permission_classes = (\n CourseVisiblePermission,\n )\n\n def get_permissions(self):\n perms = super().get_permissions()\n perms.extend((Perm() for Perm in self.course_permission_classes))\n return perms\n\n # get_course_instance_object\n\n def get_resource_objects(self):\n super().get_resource_objects()\n user = self.request.user\n instance = self.get_course_instance_object()\n if instance is not None:\n self.instance = instance\n self.course = self.instance.course\n self.content = CachedContent(self.instance)\n self.user_course_data = None\n is_real_user = user.is_authenticated and not user.is_anonymous\n if is_real_user:\n self.user_course_data = self.instance.get_enrollment_for(user)\n self.is_student = self.instance.is_student(user)\n self.is_assistant = self.instance.is_assistant(user)\n self.is_teacher = self.instance.is_teacher(user)\n self.is_course_staff = self.is_teacher or self.is_assistant\n self.get_taggings = lambda: CachedStudent(instance, user.id).data['tag_slugs']\n self.url_without_language = remove_query_param_from_url(self.request.get_full_path(), 'hl')\n self.query_language = None\n self.user_language = None\n\n self.note(\n \"course\", \"instance\", \"content\", \"user_course_data\", \"is_student\", \"is_assistant\",\n \"is_teacher\", \"is_course_staff\", \"get_taggings\", \"url_without_language\",\n \"query_language\", \"user_language\"\n )\n\n # Try to find a language that is defined for this course instance\n # and apply it\n if self.instance.language:\n instance_languages = self.instance.language.strip('|').split('|')\n instance_def_language = instance_languages[0]\n instance_languages = set(instance_languages)\n\n languages = []\n if self.user_course_data and self.user_course_data.language:\n languages.append(self.user_course_data.language)\n if is_real_user and user.userprofile.language:\n languages.append(user.userprofile.language)\n languages.append(get_language())\n\n query_language = self.request.GET.get('hl')\n if query_language:\n if query_language[:2] in instance_languages:\n language = query_language\n if languages:\n self.user_language = languages[0]\n if self.user_language[:2] != query_language[:2]:\n self.query_language = query_language\n else:\n raise TranslationNotFound\n else:\n for lang in languages:\n if lang[:2] in instance_languages:\n language = lang\n break\n else:\n language = instance_def_language\n\n language = language[:2]\n # Override request.LANGUAGE_CODE. It is set in lib/middleware.py\n # (class LocaleMiddleware) based on the userprofile.language.\n # The middleware can not easily access the course context and\n # the language from the enrollment. That is fixed here.\n self.request.LANGUAGE_CODE = language\n translation.activate(language)\n\n def get_access_mode(self):\n access_mode = super().get_access_mode()\n\n if hasattr(self, 'instance'):\n # Loosen the access mode if instance is public\n show_for = self.instance.view_content_to\n is_public = show_for == CourseInstance.VIEW_ACCESS.PUBLIC\n access_mode_student = access_mode in (ACCESS.STUDENT, ACCESS.ENROLL)\n if is_public and access_mode_student:\n access_mode = ACCESS.ANONYMOUS\n\n return access_mode\n\n def handle_exception(self, exc):\n if isinstance(exc, TranslationNotFound):\n instance_languages = self.instance.language.strip(\"|\").split(\"|\")\n url = remove_query_param_from_url(self.request.get_full_path(), 'hl')\n for i, lang in enumerate(instance_languages):\n instance_languages[i] = {\"name\": get_language_info(lang)['name'], \"url\": update_url_params(url, {'hl' : lang})}\n return render(self.request, '404.html', {'error_msg': str(exc), 'languages': instance_languages}, status=404)\n return super().handle_exception(exc)\n\nclass CourseInstanceMixin(CourseInstanceBaseMixin, UserProfileMixin):\n def get_course_instance_object(self) -> CourseInstance:\n return get_object_or_404(\n CourseInstance.objects.prefetch_related('tabs'),\n url=self.kwargs[self.instance_kw],\n course__url=self.kwargs[self.course_kw],\n )\n\n def handle_no_permission(self):\n if (self.request.user.is_authenticated\n and not self.is_student\n and not self.is_course_staff\n and self.get_access_mode() in [ACCESS.STUDENT, ACCESS.ENROLLED]\n and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED):\n # Redirect the user to the enrollment page instead of showing\n # a 403 Forbidden error, if:\n # - the user is signed in but not enrolled or staff\n # - the page is not a teacher page (e.g. edit course)\n # - the course is visible only to enrolled students\n return redirect(self.instance.get_url('enroll'))\n return super().handle_no_permission()\n\n\nclass CourseInstanceBaseView(CourseInstanceMixin, BaseTemplateView):\n pass\n\n\nclass EnrollableViewMixin(CourseInstanceMixin):\n access_mode = ACCESS.ENROLL\n\n def get_common_objects(self):\n self.enrolled = self.is_student\n self.enrollable = (\n self.profile\n and self.instance.is_enrollable(self.profile.user)\n )\n self.note('enrolled', 'enrollable')\n\n\nclass CourseModuleBaseMixin(object):\n module_kw = \"module_slug\"\n module_permissions_classes = (\n CourseModulePermission,\n )\n\n def get_permissions(self):\n perms = super().get_permissions()\n perms.extend((Perm() for Perm in self.module_permissions_classes))\n return perms\n\n # get_course_module_object\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.module = self.get_course_module_object()\n self.note(\"module\")\n\n\nclass CourseModuleMixin(CourseModuleBaseMixin, CourseInstanceMixin):\n def get_course_module_object(self):\n return get_object_or_404(\n CourseModule,\n url=self.kwargs[self.module_kw],\n course_instance=self.instance\n )\n\n\nclass CourseModuleBaseView(CourseModuleMixin, BaseTemplateView):\n pass\n", "path": "course/viewbase.py"}]}
| 2,787 | 324 |
gh_patches_debug_19375
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__PaddleDetection-2350
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
训练-裁剪-训练-导出报错
PaddleDetection是2.0版本
项目是在平台上运行的
使用PaddleDetection训练好yolov3_mobilenet_v3模型后进行模型裁剪
裁剪完评估完成后导出报错
这是我执行的脚本
`
!python slim/prune/export_model.py \
-c configs/yolov3_mobilenet_v3.yml \
--pruned_params "yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights,yolo_block.0.1.1.conv.weights,yolo_block.0.2.conv.weights,yolo_block.0.tip.conv.weights,yolo_block.1.0.0.conv.weights,yolo_block.1.0.1.conv.weights,yolo_block.1.1.0.conv.weights,yolo_block.1.1.1.conv.weights,yolo_block.1.2.conv.weights,yolo_block.1.tip.conv.weights,yolo_block.2.0.0.conv.weights,yolo_block.2.0.1.conv.weights,yolo_block.2.1.0.conv.weights,yolo_block.2.1.1.conv.weights,yolo_block.2.2.conv.weights,yolo_block.2.tip.conv.weights" \
--pruned_ratios="0.7150126596733395,0.8177442961035291,0.8274278897456334,0.8373393786362668,0.7956892620674756,0.8445719578292334,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9" \
-o weights=output/yolov3_mobilenet_v3/model_final
`
以下是报错信息,
```
[03-04 10:10:58 MainThread @logger.py:242] Argv: slim/prune/export_model.py -c configs/yolov3_mobilenet_v3.yml --pruned_params yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights,yolo_block.0.1.1.conv.weights,yolo_block.0.2.conv.weights,yolo_block.0.tip.conv.weights,yolo_block.1.0.0.conv.weights,yolo_block.1.0.1.conv.weights,yolo_block.1.1.0.conv.weights,yolo_block.1.1.1.conv.weights,yolo_block.1.2.conv.weights,yolo_block.1.tip.conv.weights,yolo_block.2.0.0.conv.weights,yolo_block.2.0.1.conv.weights,yolo_block.2.1.0.conv.weights,yolo_block.2.1.1.conv.weights,yolo_block.2.2.conv.weights,yolo_block.2.tip.conv.weights --pruned_ratios=0.7150126596733395,0.8177442961035291,0.8274278897456334,0.8373393786362668,0.7956892620674756,0.8445719578292334,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9 -o weights=output/yolov3_mobilenet_v3/model_final
[03-04 10:10:58 MainThread @utils.py:79] WRN paddlepaddle version: 2.0.0. The dynamic graph version of PARL is under development, not fully tested and supported
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/parl/remote/communication.py:38: DeprecationWarning: 'pyarrow.default_serialization_context' is deprecated as of 2.0.0 and will be removed in a future version. Use pickle or the pyarrow IPC functionality instead.
context = pyarrow.default_serialization_context()
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/pandas/core/tools/datetimes.py:3: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import MutableMapping
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/rcsetup.py:20: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import Iterable, Mapping
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/colors.py:53: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import Sized
2021-03-04 10:11:00,126-INFO: pruned params: ['yolo_block.0.0.0.conv.weights', 'yolo_block.0.0.1.conv.weights', 'yolo_block.0.1.0.conv.weights', 'yolo_block.0.1.1.conv.weights', 'yolo_block.0.2.conv.weights', 'yolo_block.0.tip.conv.weights', 'yolo_block.1.0.0.conv.weights', 'yolo_block.1.0.1.conv.weights', 'yolo_block.1.1.0.conv.weights', 'yolo_block.1.1.1.conv.weights', 'yolo_block.1.2.conv.weights', 'yolo_block.1.tip.conv.weights', 'yolo_block.2.0.0.conv.weights', 'yolo_block.2.0.1.conv.weights', 'yolo_block.2.1.0.conv.weights', 'yolo_block.2.1.1.conv.weights', 'yolo_block.2.2.conv.weights', 'yolo_block.2.tip.conv.weights']
2021-03-04 10:11:00,126-INFO: pruned ratios: [0.7150126596733395, 0.8177442961035291, 0.8274278897456334, 0.8373393786362668, 0.7956892620674756, 0.8445719578292334, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
2021-03-04 10:11:00,169-INFO: pruning: yolo_block.0.0.0.conv.weights
Traceback (most recent call last):
File "slim/prune/export_model.py", line 123, in <module>
main()
File "slim/prune/export_model.py", line 88, in main
only_graph=True)
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddleslim/prune/pruner.py", line 112, in prune
g = self._transform(self.idx_selector(scores, ratio))
File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddleslim/prune/idx_selector.py", line 57, in default_idx_selector
0] # sort channels by the first convolution's score
IndexError: list index out of range
```
</issue>
<code>
[start of slim/prune/export_model.py]
1 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18
19 import os, sys
20 # add python path of PadleDetection to sys.path
21 parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))
22 if parent_path not in sys.path:
23 sys.path.append(parent_path)
24
25 import paddle
26 from paddle import fluid
27
28 from ppdet.core.workspace import load_config, merge_config, create
29 from ppdet.utils.cli import ArgsParser
30 import ppdet.utils.checkpoint as checkpoint
31 from ppdet.utils.export_utils import save_infer_model, dump_infer_config
32 from ppdet.utils.check import check_config, check_version, enable_static_mode
33 from paddleslim.prune import Pruner
34 from paddleslim.analysis import flops
35
36 import logging
37 FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
38 logging.basicConfig(level=logging.INFO, format=FORMAT)
39 logger = logging.getLogger(__name__)
40
41
42 def main():
43 cfg = load_config(FLAGS.config)
44 merge_config(FLAGS.opt)
45 check_config(cfg)
46 check_version()
47
48 main_arch = cfg.architecture
49
50 # Use CPU for exporting inference model instead of GPU
51 place = fluid.CPUPlace()
52 exe = fluid.Executor(place)
53
54 model = create(main_arch)
55
56 startup_prog = fluid.Program()
57 infer_prog = fluid.Program()
58 with fluid.program_guard(infer_prog, startup_prog):
59 with fluid.unique_name.guard():
60 inputs_def = cfg['TestReader']['inputs_def']
61 inputs_def['use_dataloader'] = False
62 feed_vars, _ = model.build_inputs(**inputs_def)
63 test_fetches = model.test(feed_vars)
64 infer_prog = infer_prog.clone(True)
65
66 pruned_params = FLAGS.pruned_params
67 assert (
68 FLAGS.pruned_params is not None
69 ), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
70 pruned_params = FLAGS.pruned_params.strip().split(",")
71 logger.info("pruned params: {}".format(pruned_params))
72 pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(",")]
73 logger.info("pruned ratios: {}".format(pruned_ratios))
74 assert (len(pruned_params) == len(pruned_ratios)
75 ), "The length of pruned params and pruned ratios should be equal."
76 assert (pruned_ratios > [0] * len(pruned_ratios) and
77 pruned_ratios < [1] * len(pruned_ratios)
78 ), "The elements of pruned ratios should be in range (0, 1)."
79
80 base_flops = flops(infer_prog)
81 pruner = Pruner()
82 infer_prog, _, _ = pruner.prune(
83 infer_prog,
84 fluid.global_scope(),
85 params=pruned_params,
86 ratios=pruned_ratios,
87 place=place,
88 only_graph=True)
89 pruned_flops = flops(infer_prog)
90 logger.info("pruned FLOPS: {}".format(
91 float(base_flops - pruned_flops) / base_flops))
92
93 exe.run(startup_prog)
94 checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)
95
96 dump_infer_config(FLAGS, cfg)
97 save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
98
99
100 if __name__ == '__main__':
101 enable_static_mode()
102 parser = ArgsParser()
103 parser.add_argument(
104 "--output_dir",
105 type=str,
106 default="output",
107 help="Directory for storing the output model files.")
108
109 parser.add_argument(
110 "-p",
111 "--pruned_params",
112 default=None,
113 type=str,
114 help="The parameters to be pruned when calculating sensitivities.")
115 parser.add_argument(
116 "--pruned_ratios",
117 default=None,
118 type=str,
119 help="The ratios pruned iteratively for each parameter when calculating sensitivities."
120 )
121
122 FLAGS = parser.parse_args()
123 main()
124
[end of slim/prune/export_model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/slim/prune/export_model.py b/slim/prune/export_model.py
--- a/slim/prune/export_model.py
+++ b/slim/prune/export_model.py
@@ -63,6 +63,9 @@
test_fetches = model.test(feed_vars)
infer_prog = infer_prog.clone(True)
+ exe.run(startup_prog)
+ checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)
+
pruned_params = FLAGS.pruned_params
assert (
FLAGS.pruned_params is not None
@@ -90,13 +93,9 @@
logger.info("pruned FLOPS: {}".format(
float(base_flops - pruned_flops) / base_flops))
- exe.run(startup_prog)
- checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)
-
dump_infer_config(FLAGS, cfg)
save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
-
if __name__ == '__main__':
enable_static_mode()
parser = ArgsParser()
|
{"golden_diff": "diff --git a/slim/prune/export_model.py b/slim/prune/export_model.py\n--- a/slim/prune/export_model.py\n+++ b/slim/prune/export_model.py\n@@ -63,6 +63,9 @@\n test_fetches = model.test(feed_vars)\n infer_prog = infer_prog.clone(True)\n \n+ exe.run(startup_prog)\n+ checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n+\n pruned_params = FLAGS.pruned_params\n assert (\n FLAGS.pruned_params is not None\n@@ -90,13 +93,9 @@\n logger.info(\"pruned FLOPS: {}\".format(\n float(base_flops - pruned_flops) / base_flops))\n \n- exe.run(startup_prog)\n- checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n-\n dump_infer_config(FLAGS, cfg)\n save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)\n \n-\n if __name__ == '__main__':\n enable_static_mode()\n parser = ArgsParser()\n", "issue": "\u8bad\u7ec3-\u88c1\u526a-\u8bad\u7ec3-\u5bfc\u51fa\u62a5\u9519\nPaddleDetection\u662f2.0\u7248\u672c\r\n\u9879\u76ee\u662f\u5728\u5e73\u53f0\u4e0a\u8fd0\u884c\u7684\r\n\u4f7f\u7528PaddleDetection\u8bad\u7ec3\u597dyolov3_mobilenet_v3\u6a21\u578b\u540e\u8fdb\u884c\u6a21\u578b\u88c1\u526a\r\n\u88c1\u526a\u5b8c\u8bc4\u4f30\u5b8c\u6210\u540e\u5bfc\u51fa\u62a5\u9519\r\n\u8fd9\u662f\u6211\u6267\u884c\u7684\u811a\u672c\r\n`\r\n!python slim/prune/export_model.py \\\r\n-c configs/yolov3_mobilenet_v3.yml \\\r\n--pruned_params \"yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights,yolo_block.0.1.1.conv.weights,yolo_block.0.2.conv.weights,yolo_block.0.tip.conv.weights,yolo_block.1.0.0.conv.weights,yolo_block.1.0.1.conv.weights,yolo_block.1.1.0.conv.weights,yolo_block.1.1.1.conv.weights,yolo_block.1.2.conv.weights,yolo_block.1.tip.conv.weights,yolo_block.2.0.0.conv.weights,yolo_block.2.0.1.conv.weights,yolo_block.2.1.0.conv.weights,yolo_block.2.1.1.conv.weights,yolo_block.2.2.conv.weights,yolo_block.2.tip.conv.weights\" \\\r\n--pruned_ratios=\"0.7150126596733395,0.8177442961035291,0.8274278897456334,0.8373393786362668,0.7956892620674756,0.8445719578292334,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9\" \\\r\n-o weights=output/yolov3_mobilenet_v3/model_final\r\n`\r\n\r\n\u4ee5\u4e0b\u662f\u62a5\u9519\u4fe1\u606f\uff0c\r\n\r\n```\r\n[03-04 10:10:58 MainThread @logger.py:242] Argv: slim/prune/export_model.py -c configs/yolov3_mobilenet_v3.yml --pruned_params yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights,yolo_block.0.1.1.conv.weights,yolo_block.0.2.conv.weights,yolo_block.0.tip.conv.weights,yolo_block.1.0.0.conv.weights,yolo_block.1.0.1.conv.weights,yolo_block.1.1.0.conv.weights,yolo_block.1.1.1.conv.weights,yolo_block.1.2.conv.weights,yolo_block.1.tip.conv.weights,yolo_block.2.0.0.conv.weights,yolo_block.2.0.1.conv.weights,yolo_block.2.1.0.conv.weights,yolo_block.2.1.1.conv.weights,yolo_block.2.2.conv.weights,yolo_block.2.tip.conv.weights --pruned_ratios=0.7150126596733395,0.8177442961035291,0.8274278897456334,0.8373393786362668,0.7956892620674756,0.8445719578292334,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9 -o weights=output/yolov3_mobilenet_v3/model_final\r\n[03-04 10:10:58 MainThread @utils.py:79] WRN paddlepaddle version: 2.0.0. The dynamic graph version of PARL is under development, not fully tested and supported\r\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/parl/remote/communication.py:38: DeprecationWarning: 'pyarrow.default_serialization_context' is deprecated as of 2.0.0 and will be removed in a future version. Use pickle or the pyarrow IPC functionality instead.\r\n context = pyarrow.default_serialization_context()\r\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/pandas/core/tools/datetimes.py:3: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n from collections import MutableMapping\r\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/rcsetup.py:20: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n from collections import Iterable, Mapping\r\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/colors.py:53: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n from collections import Sized\r\n2021-03-04 10:11:00,126-INFO: pruned params: ['yolo_block.0.0.0.conv.weights', 'yolo_block.0.0.1.conv.weights', 'yolo_block.0.1.0.conv.weights', 'yolo_block.0.1.1.conv.weights', 'yolo_block.0.2.conv.weights', 'yolo_block.0.tip.conv.weights', 'yolo_block.1.0.0.conv.weights', 'yolo_block.1.0.1.conv.weights', 'yolo_block.1.1.0.conv.weights', 'yolo_block.1.1.1.conv.weights', 'yolo_block.1.2.conv.weights', 'yolo_block.1.tip.conv.weights', 'yolo_block.2.0.0.conv.weights', 'yolo_block.2.0.1.conv.weights', 'yolo_block.2.1.0.conv.weights', 'yolo_block.2.1.1.conv.weights', 'yolo_block.2.2.conv.weights', 'yolo_block.2.tip.conv.weights']\r\n2021-03-04 10:11:00,126-INFO: pruned ratios: [0.7150126596733395, 0.8177442961035291, 0.8274278897456334, 0.8373393786362668, 0.7956892620674756, 0.8445719578292334, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]\r\n2021-03-04 10:11:00,169-INFO: pruning: yolo_block.0.0.0.conv.weights\r\nTraceback (most recent call last):\r\n File \"slim/prune/export_model.py\", line 123, in <module>\r\n main()\r\n File \"slim/prune/export_model.py\", line 88, in main\r\n only_graph=True)\r\n File \"/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddleslim/prune/pruner.py\", line 112, in prune\r\n g = self._transform(self.idx_selector(scores, ratio))\r\n File \"/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddleslim/prune/idx_selector.py\", line 57, in default_idx_selector\r\n 0] # sort channels by the first convolution's score\r\nIndexError: list index out of range\r\n```\n", "before_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))\nif parent_path not in sys.path:\n sys.path.append(parent_path)\n\nimport paddle\nfrom paddle import fluid\n\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.utils.cli import ArgsParser\nimport ppdet.utils.checkpoint as checkpoint\nfrom ppdet.utils.export_utils import save_infer_model, dump_infer_config\nfrom ppdet.utils.check import check_config, check_version, enable_static_mode\nfrom paddleslim.prune import Pruner\nfrom paddleslim.analysis import flops\n\nimport logging\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n cfg = load_config(FLAGS.config)\n merge_config(FLAGS.opt)\n check_config(cfg)\n check_version()\n\n main_arch = cfg.architecture\n\n # Use CPU for exporting inference model instead of GPU\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n model = create(main_arch)\n\n startup_prog = fluid.Program()\n infer_prog = fluid.Program()\n with fluid.program_guard(infer_prog, startup_prog):\n with fluid.unique_name.guard():\n inputs_def = cfg['TestReader']['inputs_def']\n inputs_def['use_dataloader'] = False\n feed_vars, _ = model.build_inputs(**inputs_def)\n test_fetches = model.test(feed_vars)\n infer_prog = infer_prog.clone(True)\n\n pruned_params = FLAGS.pruned_params\n assert (\n FLAGS.pruned_params is not None\n ), \"FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option.\"\n pruned_params = FLAGS.pruned_params.strip().split(\",\")\n logger.info(\"pruned params: {}\".format(pruned_params))\n pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(\",\")]\n logger.info(\"pruned ratios: {}\".format(pruned_ratios))\n assert (len(pruned_params) == len(pruned_ratios)\n ), \"The length of pruned params and pruned ratios should be equal.\"\n assert (pruned_ratios > [0] * len(pruned_ratios) and\n pruned_ratios < [1] * len(pruned_ratios)\n ), \"The elements of pruned ratios should be in range (0, 1).\"\n\n base_flops = flops(infer_prog)\n pruner = Pruner()\n infer_prog, _, _ = pruner.prune(\n infer_prog,\n fluid.global_scope(),\n params=pruned_params,\n ratios=pruned_ratios,\n place=place,\n only_graph=True)\n pruned_flops = flops(infer_prog)\n logger.info(\"pruned FLOPS: {}\".format(\n float(base_flops - pruned_flops) / base_flops))\n\n exe.run(startup_prog)\n checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n\n dump_infer_config(FLAGS, cfg)\n save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)\n\n\nif __name__ == '__main__':\n enable_static_mode()\n parser = ArgsParser()\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"output\",\n help=\"Directory for storing the output model files.\")\n\n parser.add_argument(\n \"-p\",\n \"--pruned_params\",\n default=None,\n type=str,\n help=\"The parameters to be pruned when calculating sensitivities.\")\n parser.add_argument(\n \"--pruned_ratios\",\n default=None,\n type=str,\n help=\"The ratios pruned iteratively for each parameter when calculating sensitivities.\"\n )\n\n FLAGS = parser.parse_args()\n main()\n", "path": "slim/prune/export_model.py"}]}
| 3,740 | 234 |
gh_patches_debug_5790
|
rasdani/github-patches
|
git_diff
|
googleapis__google-auth-library-python-175
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GCE metadata ping taking ~75 seconds, not timing out after 3 seconds
google-api-python-client: 1.6.2
python version: 3.5.2
```
from google.auth.compute_engine import _metadata
import google.auth.transport._http_client
request = google.auth.transport._http_client.Request()
_metadata.ping(request=request)
```
When running the following script, the default timeout of 3 seconds is not used. I'm instead seeing the script complete in about 75 seconds. If I print out the value of line 4, I get `False`. I'm not sure if that indicates that the call was successful, or if a timeout had occurred. I see this take ~75 seconds consistently If I set the environment variable `GCE_METADATA_TIMEOUT` to a value such as 10, the script still takes ~75 seconds.
I'm running into this issue when using the pandas-gbq library. I'm writing a script to authorize pandas to access the gbq project. That library checks for the default google credentials, which do not exist in my case. [Here's my related issue with that library](https://github.com/pydata/pandas-gbq/issues/73).
Is this an issue with the google-auth library?
</issue>
<code>
[start of google/auth/transport/_http_client.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Transport adapter for http.client, for internal use only."""
16
17 import logging
18 import socket
19
20 from six.moves import http_client
21 from six.moves import urllib
22
23 from google.auth import exceptions
24 from google.auth import transport
25
26 _LOGGER = logging.getLogger(__name__)
27
28
29 class Response(transport.Response):
30 """http.client transport response adapter.
31
32 Args:
33 response (http.client.HTTPResponse): The raw http client response.
34 """
35 def __init__(self, response):
36 self._status = response.status
37 self._headers = {
38 key.lower(): value for key, value in response.getheaders()}
39 self._data = response.read()
40
41 @property
42 def status(self):
43 return self._status
44
45 @property
46 def headers(self):
47 return self._headers
48
49 @property
50 def data(self):
51 return self._data
52
53
54 class Request(transport.Request):
55 """http.client transport request adapter."""
56
57 def __call__(self, url, method='GET', body=None, headers=None,
58 timeout=None, **kwargs):
59 """Make an HTTP request using http.client.
60
61 Args:
62 url (str): The URI to be requested.
63 method (str): The HTTP method to use for the request. Defaults
64 to 'GET'.
65 body (bytes): The payload / body in HTTP request.
66 headers (Mapping): Request headers.
67 timeout (Optional(int)): The number of seconds to wait for a
68 response from the server. If not specified or if None, the
69 socket global default timeout will be used.
70 kwargs: Additional arguments passed throught to the underlying
71 :meth:`~http.client.HTTPConnection.request` method.
72
73 Returns:
74 Response: The HTTP response.
75
76 Raises:
77 google.auth.exceptions.TransportError: If any exception occurred.
78 """
79 # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.
80 if timeout is None:
81 timeout = socket._GLOBAL_DEFAULT_TIMEOUT
82
83 # http.client doesn't allow None as the headers argument.
84 if headers is None:
85 headers = {}
86
87 # http.client needs the host and path parts specified separately.
88 parts = urllib.parse.urlsplit(url)
89 path = urllib.parse.urlunsplit(
90 ('', '', parts.path, parts.query, parts.fragment))
91
92 if parts.scheme != 'http':
93 raise exceptions.TransportError(
94 'http.client transport only supports the http scheme, {}'
95 'was specified'.format(parts.scheme))
96
97 connection = http_client.HTTPConnection(parts.netloc)
98
99 try:
100 _LOGGER.debug('Making request: %s %s', method, url)
101
102 connection.request(
103 method, path, body=body, headers=headers, **kwargs)
104 response = connection.getresponse()
105 return Response(response)
106
107 except (http_client.HTTPException, socket.error) as exc:
108 raise exceptions.TransportError(exc)
109
110 finally:
111 connection.close()
112
[end of google/auth/transport/_http_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/google/auth/transport/_http_client.py b/google/auth/transport/_http_client.py
--- a/google/auth/transport/_http_client.py
+++ b/google/auth/transport/_http_client.py
@@ -94,7 +94,7 @@
'http.client transport only supports the http scheme, {}'
'was specified'.format(parts.scheme))
- connection = http_client.HTTPConnection(parts.netloc)
+ connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)
try:
_LOGGER.debug('Making request: %s %s', method, url)
|
{"golden_diff": "diff --git a/google/auth/transport/_http_client.py b/google/auth/transport/_http_client.py\n--- a/google/auth/transport/_http_client.py\n+++ b/google/auth/transport/_http_client.py\n@@ -94,7 +94,7 @@\n 'http.client transport only supports the http scheme, {}'\n 'was specified'.format(parts.scheme))\n \n- connection = http_client.HTTPConnection(parts.netloc)\n+ connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)\n \n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n", "issue": "GCE metadata ping taking ~75 seconds, not timing out after 3 seconds\ngoogle-api-python-client: 1.6.2\r\npython version: 3.5.2\r\n\r\n```\r\nfrom google.auth.compute_engine import _metadata\r\nimport google.auth.transport._http_client\r\nrequest = google.auth.transport._http_client.Request()\r\n_metadata.ping(request=request)\r\n```\r\n\r\nWhen running the following script, the default timeout of 3 seconds is not used. I'm instead seeing the script complete in about 75 seconds. If I print out the value of line 4, I get `False`. I'm not sure if that indicates that the call was successful, or if a timeout had occurred. I see this take ~75 seconds consistently If I set the environment variable `GCE_METADATA_TIMEOUT` to a value such as 10, the script still takes ~75 seconds.\r\n\r\nI'm running into this issue when using the pandas-gbq library. I'm writing a script to authorize pandas to access the gbq project. That library checks for the default google credentials, which do not exist in my case. [Here's my related issue with that library](https://github.com/pydata/pandas-gbq/issues/73).\r\n\r\nIs this an issue with the google-auth library?\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport adapter for http.client, for internal use only.\"\"\"\n\nimport logging\nimport socket\n\nfrom six.moves import http_client\nfrom six.moves import urllib\n\nfrom google.auth import exceptions\nfrom google.auth import transport\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Response(transport.Response):\n \"\"\"http.client transport response adapter.\n\n Args:\n response (http.client.HTTPResponse): The raw http client response.\n \"\"\"\n def __init__(self, response):\n self._status = response.status\n self._headers = {\n key.lower(): value for key, value in response.getheaders()}\n self._data = response.read()\n\n @property\n def status(self):\n return self._status\n\n @property\n def headers(self):\n return self._headers\n\n @property\n def data(self):\n return self._data\n\n\nclass Request(transport.Request):\n \"\"\"http.client transport request adapter.\"\"\"\n\n def __call__(self, url, method='GET', body=None, headers=None,\n timeout=None, **kwargs):\n \"\"\"Make an HTTP request using http.client.\n\n Args:\n url (str): The URI to be requested.\n method (str): The HTTP method to use for the request. Defaults\n to 'GET'.\n body (bytes): The payload / body in HTTP request.\n headers (Mapping): Request headers.\n timeout (Optional(int)): The number of seconds to wait for a\n response from the server. If not specified or if None, the\n socket global default timeout will be used.\n kwargs: Additional arguments passed throught to the underlying\n :meth:`~http.client.HTTPConnection.request` method.\n\n Returns:\n Response: The HTTP response.\n\n Raises:\n google.auth.exceptions.TransportError: If any exception occurred.\n \"\"\"\n # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.\n if timeout is None:\n timeout = socket._GLOBAL_DEFAULT_TIMEOUT\n\n # http.client doesn't allow None as the headers argument.\n if headers is None:\n headers = {}\n\n # http.client needs the host and path parts specified separately.\n parts = urllib.parse.urlsplit(url)\n path = urllib.parse.urlunsplit(\n ('', '', parts.path, parts.query, parts.fragment))\n\n if parts.scheme != 'http':\n raise exceptions.TransportError(\n 'http.client transport only supports the http scheme, {}'\n 'was specified'.format(parts.scheme))\n\n connection = http_client.HTTPConnection(parts.netloc)\n\n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n\n connection.request(\n method, path, body=body, headers=headers, **kwargs)\n response = connection.getresponse()\n return Response(response)\n\n except (http_client.HTTPException, socket.error) as exc:\n raise exceptions.TransportError(exc)\n\n finally:\n connection.close()\n", "path": "google/auth/transport/_http_client.py"}]}
| 1,782 | 128 |
gh_patches_debug_4041
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1208
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Moldova repeated ValueError
Seen on the Kibana dashboard.
[Logger](https://kibana.electricitymap.org/app/kibana#/discover/1710fdd0-2460-11e8-a779-9d01de8d7a71?_g=(refreshInterval:('$$hashKey':'object:6765',display:'10%20seconds',pause:!f,section:1,value:10000),time:(from:'2018-03-10T00:00:00.000Z',mode:absolute,to:'2018-03-12T10:40:25.571Z'))&_a=(columns:!(level,extra.path,message),filters:!(('$state':(store:appState),exists:(field:level),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:level,negate:!f,type:exists,value:exists)),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:level,negate:!t,params:(query:INFO,type:phrase),type:phrase,value:INFO),query:(match:(level:(query:INFO,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:extra.key,negate:!f,params:(query:MD,type:phrase),type:phrase,value:MD),query:(match:(extra.key:(query:MD,type:phrase))))),index:'93e631f0-245f-11e8-a779-9d01de8d7a71',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc)))
```
Traceback (most recent call last):
File "feeder_electricity.py", line 176, in fetch_exchange
objs = parser(country_code1, country_code2, session, logger=public_logger)
File "/home/electricitymap/parsers/MD.py", line 113, in fetch_exchange
exchange_status = get_data(session=session)
File "/home/electricitymap/parsers/MD.py", line 31, in get_data
data = [float(i) for i in raw_data.split(',')]
File "/home/electricitymap/parsers/MD.py", line 31, in <listcomp>
data = [float(i) for i in raw_data.split(',')]
ValueError: could not convert string to float:
```
```
Traceback (most recent call last):
File "feeder_electricity.py", line 148, in fetch_production
objs = parser(country_code, session, logger=public_logger)
File "/home/electricitymap/parsers/MD.py", line 69, in fetch_production
grid_status = get_data(session=session)
File "/home/electricitymap/parsers/MD.py", line 31, in get_data
data = [float(i) for i in raw_data.split(',')]
File "/home/electricitymap/parsers/MD.py", line 31, in <listcomp>
data = [float(i) for i in raw_data.split(',')]
ValueError: could not convert string to float:
```
</issue>
<code>
[start of parsers/MD.py]
1 #!/usr/bin/env python3
2 # coding=utf-8
3
4 """Parser for Moldova."""
5
6 import arrow
7 from operator import itemgetter
8 import requests
9
10 TYPE_MAPPING = {
11 u'tmva476': 'hydro', # NHE Costeşti (run-of-river) #2 index
12 u'tmva112': 'hydro', # NHE Dubăsari (run-of-river) #4 index
13 u'tmva367': 'gas', # CET Nord (CHPP) #3 index
14 u'tmva42': 'gas', # CET-1 Chişinău (CHPP) #6 index
15 u'tmva378': 'gas', # CET-2 Chişinău (CHPP) #5 index
16 u'tmva1024': 'unknown', # CERS Moldovenească (fuel mix coal, gas, oil) #7 index
17 }
18
19 display_url = 'http://www.moldelectrica.md/ro/activity/system_state'
20 data_url = 'http://www.moldelectrica.md/utils/load4'
21
22
23 def get_data(session=None):
24 """ Returns generation data as a list of floats."""
25
26 s = session or requests.Session()
27
28 data_response = s.get(data_url)
29 raw_data = data_response.text
30
31 data = [float(i) for i in raw_data.split(',')]
32
33 return data
34
35
36 def fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None):
37 """Requests the last known production mix (in MW) of a given country
38
39 Arguments:
40 zone_key (optional) -- used in case a parser is able to fetch multiple countries
41 session (optional) -- request session passed in order to re-use an existing session
42
43 Return:
44 A dictionary in the form:
45 {
46 'zoneKey': 'FR',
47 'datetime': '2017-01-01T00:00:00Z',
48 'production': {
49 'biomass': 0.0,
50 'coal': 0.0,
51 'gas': 0.0,
52 'hydro': 0.0,
53 'nuclear': null,
54 'oil': 0.0,
55 'solar': 0.0,
56 'wind': 0.0,
57 'geothermal': 0.0,
58 'unknown': 0.0
59 },
60 'storage': {
61 'hydro': -10.0,
62 },
63 'source': 'mysource.com'
64 }
65 """
66 if target_datetime:
67 raise NotImplementedError('This parser is not yet able to parse past dates')
68
69 grid_status = get_data(session=session)
70 production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0}
71
72 production['gas'] = sum(itemgetter(3, 5, 6)(grid_status))
73 production['hydro'] = sum(itemgetter(2, 4)(grid_status))
74 production['unknown'] = grid_status[7]
75
76 consumption = grid_status[-5]
77
78 dt = arrow.now('Europe/Chisinau').datetime
79
80 datapoint = {
81 'zoneKey': zone_key,
82 'datetime': dt,
83 'consumption': consumption,
84 'production': production,
85 'storage': {},
86 'source': 'moldelectrica.md'
87 }
88
89 return datapoint
90
91
92 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
93 """Requests the last known power exchange (in MW) between two countries
94 Arguments:
95 zone_key1 -- the first country code
96 zone_key2 -- the second country code; order of the two codes in params doesn't matter
97 session (optional) -- request session passed in order to re-use an existing session
98 Return:
99 A dictionary in the form:
100 {
101 'sortedZoneKeys': 'DK->NO',
102 'datetime': '2017-01-01T00:00:00Z',
103 'netFlow': 0.0,
104 'source': 'mysource.com'
105 }
106 where net flow is from DK into NO
107 """
108 if target_datetime:
109 raise NotImplementedError('This parser is not yet able to parse past dates')
110
111 sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))
112
113 exchange_status = get_data(session=session)
114
115 if sortedZoneKeys == 'MD->UA':
116 netflow = -1 * exchange_status[-3]
117 elif sortedZoneKeys == 'MD->RO':
118 netflow = -1 * exchange_status[-2]
119 else:
120 raise NotImplementedError('This exchange pair is not implemented')
121
122 dt = arrow.now('Europe/Chisinau').datetime
123
124 exchange = {
125 'sortedZoneKeys': sortedZoneKeys,
126 'datetime': dt,
127 'netFlow': netflow,
128 'source': 'moldelectrica.md'
129 }
130
131 return exchange
132
133
134 if __name__ == '__main__':
135 """Main method, never used by the Electricity Map backend, but handy for testing."""
136
137 print('fetch_production() ->')
138 print(fetch_production())
139 print('fetch_exchange(MD, UA) ->')
140 print(fetch_exchange('MD', 'UA'))
141 print('fetch_exchange(MD, RO) ->')
142 print(fetch_exchange('MD', 'RO'))
143
[end of parsers/MD.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/MD.py b/parsers/MD.py
--- a/parsers/MD.py
+++ b/parsers/MD.py
@@ -25,9 +25,10 @@
s = session or requests.Session()
+ #In order for the data url to return data, cookies from the display url must be obtained then reused.
+ response = s.get(display_url)
data_response = s.get(data_url)
raw_data = data_response.text
-
data = [float(i) for i in raw_data.split(',')]
return data
|
{"golden_diff": "diff --git a/parsers/MD.py b/parsers/MD.py\n--- a/parsers/MD.py\n+++ b/parsers/MD.py\n@@ -25,9 +25,10 @@\n \n s = session or requests.Session()\n \n+ #In order for the data url to return data, cookies from the display url must be obtained then reused.\n+ response = s.get(display_url)\n data_response = s.get(data_url)\n raw_data = data_response.text\n-\n data = [float(i) for i in raw_data.split(',')]\n \n return data\n", "issue": "Moldova repeated ValueError\nSeen on the Kibana dashboard.\r\n\r\n[Logger](https://kibana.electricitymap.org/app/kibana#/discover/1710fdd0-2460-11e8-a779-9d01de8d7a71?_g=(refreshInterval:('$$hashKey':'object:6765',display:'10%20seconds',pause:!f,section:1,value:10000),time:(from:'2018-03-10T00:00:00.000Z',mode:absolute,to:'2018-03-12T10:40:25.571Z'))&_a=(columns:!(level,extra.path,message),filters:!(('$state':(store:appState),exists:(field:level),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:level,negate:!f,type:exists,value:exists)),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:level,negate:!t,params:(query:INFO,type:phrase),type:phrase,value:INFO),query:(match:(level:(query:INFO,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:extra.key,negate:!f,params:(query:MD,type:phrase),type:phrase,value:MD),query:(match:(extra.key:(query:MD,type:phrase))))),index:'93e631f0-245f-11e8-a779-9d01de8d7a71',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc)))\r\n```\r\nTraceback (most recent call last):\r\n File \"feeder_electricity.py\", line 176, in fetch_exchange\r\n objs = parser(country_code1, country_code2, session, logger=public_logger)\r\n File \"/home/electricitymap/parsers/MD.py\", line 113, in fetch_exchange\r\n exchange_status = get_data(session=session)\r\n File \"/home/electricitymap/parsers/MD.py\", line 31, in get_data\r\n data = [float(i) for i in raw_data.split(',')]\r\n File \"/home/electricitymap/parsers/MD.py\", line 31, in <listcomp>\r\n data = [float(i) for i in raw_data.split(',')]\r\nValueError: could not convert string to float:\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"feeder_electricity.py\", line 148, in fetch_production\r\n objs = parser(country_code, session, logger=public_logger)\r\n File \"/home/electricitymap/parsers/MD.py\", line 69, in fetch_production\r\n grid_status = get_data(session=session)\r\n File \"/home/electricitymap/parsers/MD.py\", line 31, in get_data\r\n data = [float(i) for i in raw_data.split(',')]\r\n File \"/home/electricitymap/parsers/MD.py\", line 31, in <listcomp>\r\n data = [float(i) for i in raw_data.split(',')]\r\nValueError: could not convert string to float:\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"Parser for Moldova.\"\"\"\n\nimport arrow\nfrom operator import itemgetter\nimport requests\n\nTYPE_MAPPING = {\n u'tmva476': 'hydro', # NHE Coste\u015fti (run-of-river) #2 index\n u'tmva112': 'hydro', # NHE Dub\u0103sari (run-of-river) #4 index\n u'tmva367': 'gas', # CET Nord (CHPP) #3 index\n u'tmva42': 'gas', # CET-1 Chi\u015fin\u0103u (CHPP) #6 index\n u'tmva378': 'gas', # CET-2 Chi\u015fin\u0103u (CHPP) #5 index\n u'tmva1024': 'unknown', # CERS Moldoveneasc\u0103 (fuel mix coal, gas, oil) #7 index\n}\n\ndisplay_url = 'http://www.moldelectrica.md/ro/activity/system_state'\ndata_url = 'http://www.moldelectrica.md/utils/load4'\n\n\ndef get_data(session=None):\n \"\"\" Returns generation data as a list of floats.\"\"\"\n\n s = session or requests.Session()\n\n data_response = s.get(data_url)\n raw_data = data_response.text\n\n data = [float(i) for i in raw_data.split(',')]\n\n return data\n\n\ndef fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n grid_status = get_data(session=session)\n production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0}\n\n production['gas'] = sum(itemgetter(3, 5, 6)(grid_status))\n production['hydro'] = sum(itemgetter(2, 4)(grid_status))\n production['unknown'] = grid_status[7]\n\n consumption = grid_status[-5]\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': dt,\n 'consumption': consumption,\n 'production': production,\n 'storage': {},\n 'source': 'moldelectrica.md'\n }\n\n return datapoint\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))\n\n exchange_status = get_data(session=session)\n\n if sortedZoneKeys == 'MD->UA':\n netflow = -1 * exchange_status[-3]\n elif sortedZoneKeys == 'MD->RO':\n netflow = -1 * exchange_status[-2]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n exchange = {\n 'sortedZoneKeys': sortedZoneKeys,\n 'datetime': dt,\n 'netFlow': netflow,\n 'source': 'moldelectrica.md'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(MD, UA) ->')\n print(fetch_exchange('MD', 'UA'))\n print('fetch_exchange(MD, RO) ->')\n print(fetch_exchange('MD', 'RO'))\n", "path": "parsers/MD.py"}]}
| 2,898 | 126 |
gh_patches_debug_11561
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-3675
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Secsh channel 10 open FAILED: open failed: Connect failed
**Please provide information about your setup**
DVC version(i.e. `dvc --version`), Platform and method of installation (pip, homebrew, pkg Mac, exe (Windows), DEB(Linux), RPM(Linux))
version 0.88.0
installed via pip on Linux and Mac.
Syncing to a Ubuntu host via ssh.
Everything seems to be working but I get a cryptic warning message every time I do anything. For example:
dvc push
0% Querying cache in ssh://[email protected]/media/sda2/dvc/first_day| |0/42 Secsh channel 10 open FAILED: open failed: Connect failed
Secsh channel 10 open FAILED: open failed: Connect failed
Secsh channel 10 open FAILED: open failed: Connect failed
2% /media/sda2/dvc/first_day/4e/4b31f0c5784a2e185d88a3120cac19| |1/42 [00:02<0Secsh channel 10 open FAILED: open failed: Connect failed
Everything is up to date.
This is probably an edge case due to my setup but I'm not sure how to quiet the message or resolve the issue.
</issue>
<code>
[start of dvc/remote/ssh/connection.py]
1 import errno
2 import logging
3 import os
4 import posixpath
5 import stat
6 from contextlib import suppress
7
8 from funcy import cached_property
9
10 try:
11 import paramiko
12 except ImportError:
13 paramiko = None
14
15 from dvc.utils import tmp_fname
16 from dvc.progress import Tqdm
17 from dvc.exceptions import DvcException
18 from dvc.remote.base import RemoteCmdError
19
20
21 logger = logging.getLogger(__name__)
22
23
24 def sizeof_fmt(num, suffix="B"):
25 """ Convert number of bytes to human-readable string """
26 for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
27 if abs(num) < 1024.0:
28 return "%3.1f%s%s" % (num, unit, suffix)
29 num /= 1024.0
30 return "%.1f%s%s" % (num, "Y", suffix)
31
32
33 class SSHConnection:
34 def __init__(self, host, *args, **kwargs):
35 logger.debug(
36 "Establishing ssh connection with '{host}' "
37 "through port '{port}' as user '{username}'".format(
38 host=host, **kwargs
39 )
40 )
41 self.timeout = kwargs.get("timeout", 1800)
42
43 self._ssh = paramiko.SSHClient()
44 self._ssh.load_system_host_keys()
45 self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
46
47 self._ssh.connect(host, *args, **kwargs)
48 self._ssh.get_transport().set_keepalive(10)
49 self._sftp_channels = []
50
51 @property
52 def sftp(self):
53 if not self._sftp_channels:
54 self._sftp_channels = [self._ssh.open_sftp()]
55 return self._sftp_channels[0]
56
57 def close(self):
58 for sftp in self._sftp_channels:
59 sftp.close()
60 self._ssh.close()
61
62 def st_mode(self, path):
63 with suppress(FileNotFoundError):
64 return self.sftp.lstat(path).st_mode
65
66 return 0
67
68 def getsize(self, path):
69 with suppress(FileNotFoundError):
70 return self.sftp.lstat(path).st_size
71
72 return 0
73
74 def exists(self, path, sftp=None):
75 return bool(self.st_mode(path))
76
77 def isdir(self, path):
78 return stat.S_ISDIR(self.st_mode(path))
79
80 def isfile(self, path):
81 return stat.S_ISREG(self.st_mode(path))
82
83 def islink(self, path):
84 return stat.S_ISLNK(self.st_mode(path))
85
86 def makedirs(self, path):
87 # Single stat call will say whether this is a dir, a file or a link
88 st_mode = self.st_mode(path)
89
90 if stat.S_ISDIR(st_mode):
91 return
92
93 if stat.S_ISREG(st_mode) or stat.S_ISLNK(st_mode):
94 raise DvcException(
95 "a file with the same name '{}' already exists".format(path)
96 )
97
98 head, tail = posixpath.split(path)
99
100 if head:
101 self.makedirs(head)
102
103 if tail:
104 try:
105 self.sftp.mkdir(path)
106 except IOError as exc:
107 # Since paramiko errors are very vague we need to recheck
108 # whether it's because path already exists or something else
109 if exc.errno == errno.EACCES or not self.exists(path):
110 raise DvcException(
111 "unable to create remote directory '{}'".format(path)
112 ) from exc
113
114 def walk(self, directory, topdown=True):
115 # NOTE: original os.walk() implementation [1] with default options was
116 # used as a template.
117 #
118 # [1] https://github.com/python/cpython/blob/master/Lib/os.py
119 try:
120 dir_entries = self.sftp.listdir_attr(directory)
121 except IOError as exc:
122 raise DvcException(
123 "couldn't get the '{}' remote directory files list".format(
124 directory
125 )
126 ) from exc
127
128 dirs = []
129 nondirs = []
130 for entry in dir_entries:
131 name = entry.filename
132 if stat.S_ISDIR(entry.st_mode):
133 dirs.append(name)
134 else:
135 nondirs.append(name)
136
137 if topdown:
138 yield directory, dirs, nondirs
139
140 for dname in dirs:
141 newpath = posixpath.join(directory, dname)
142 yield from self.walk(newpath, topdown=topdown)
143
144 if not topdown:
145 yield directory, dirs, nondirs
146
147 def walk_files(self, directory):
148 for root, dirs, files in self.walk(directory):
149 for fname in files:
150 yield posixpath.join(root, fname)
151
152 def _remove_file(self, path):
153 with suppress(FileNotFoundError):
154 self.sftp.remove(path)
155
156 def _remove_dir(self, path):
157 for root, dirs, files in self.walk(path, topdown=False):
158 for fname in files:
159 with suppress(FileNotFoundError):
160 self._remove_file(posixpath.join(root, fname))
161
162 for dname in dirs:
163 with suppress(FileNotFoundError):
164 self.sftp.rmdir(posixpath.join(root, dname))
165
166 with suppress(FileNotFoundError):
167 self.sftp.rmdir(path)
168
169 def remove(self, path):
170 if self.isdir(path):
171 self._remove_dir(path)
172 else:
173 self._remove_file(path)
174
175 def download(self, src, dest, no_progress_bar=False, progress_title=None):
176 with Tqdm(
177 desc=progress_title or os.path.basename(src),
178 disable=no_progress_bar,
179 bytes=True,
180 ) as pbar:
181 self.sftp.get(src, dest, callback=pbar.update_to)
182
183 def move(self, src, dst):
184 """Rename src to dst, if it is not possible (in case src and dst are
185 on different filesystems) and actual physical copying of data is
186 happening.
187 """
188 self.makedirs(posixpath.dirname(dst))
189
190 try:
191 self.sftp.rename(src, dst)
192 except OSError:
193 self.atomic_copy(src, dst)
194 self.remove(src)
195
196 def atomic_copy(self, src, dst):
197 tmp = tmp_fname(dst)
198
199 try:
200 self.copy(src, tmp)
201 self.sftp.rename(tmp, dst)
202 finally:
203 self.remove(tmp)
204
205 def upload(self, src, dest, no_progress_bar=False, progress_title=None):
206 self.makedirs(posixpath.dirname(dest))
207 tmp_file = tmp_fname(dest)
208 if not progress_title:
209 progress_title = posixpath.basename(dest)
210
211 with Tqdm(
212 desc=progress_title, disable=no_progress_bar, bytes=True
213 ) as pbar:
214 self.sftp.put(src, tmp_file, callback=pbar.update_to)
215
216 self.sftp.rename(tmp_file, dest)
217
218 def execute(self, cmd):
219 stdin, stdout, stderr = self._ssh.exec_command(cmd)
220 channel = stdout.channel
221
222 stdin.close()
223 channel.shutdown_write()
224
225 stdout_chunks = []
226 stderr_chunks = []
227 while (
228 not channel.closed
229 or channel.recv_ready()
230 or channel.recv_stderr_ready()
231 ):
232 import select
233
234 got_chunk = False
235 readq, _, _ = select.select([stdout.channel], [], [], self.timeout)
236 for c in readq:
237 if c.recv_ready():
238 stdout_chunks.append(stdout.channel.recv(len(c.in_buffer)))
239 got_chunk = True
240
241 if c.recv_stderr_ready():
242 stderr_len = len(c.in_stderr_buffer)
243 s = stderr.channel.recv_stderr(stderr_len)
244 stderr_chunks.append(s)
245 got_chunk = True
246
247 if (
248 not got_chunk
249 and stdout.channel.exit_status_ready()
250 and not stderr.channel.recv_stderr_ready()
251 and not stdout.channel.recv_ready()
252 ):
253 stdout.channel.shutdown_read()
254 stdout.channel.close()
255 break
256
257 stdout.close()
258 stderr.close()
259
260 ret = stdout.channel.recv_exit_status()
261 if ret != 0:
262 err = b"".join(stderr_chunks).decode("utf-8")
263 raise RemoteCmdError("ssh", cmd, ret, err)
264
265 return b"".join(stdout_chunks).decode("utf-8")
266
267 @cached_property
268 def uname(self):
269 return self.execute("uname").strip()
270
271 def md5(self, path):
272 """
273 Use different md5 commands depending on the OS:
274
275 - Darwin's `md5` returns BSD-style checksums by default
276 - Linux's `md5sum` needs the `--tag` flag for a similar output
277
278 Example:
279 MD5 (foo.txt) = f3d220a856b52aabbf294351e8a24300
280 """
281 if self.uname == "Linux":
282 md5 = self.execute("md5sum " + path).split()[0]
283 elif self.uname == "Darwin":
284 md5 = self.execute("md5 " + path).split()[-1]
285 else:
286 raise DvcException(
287 "'{}' is not supported as a SSH remote".format(self.uname)
288 )
289
290 assert len(md5) == 32
291 return md5
292
293 def copy(self, src, dest):
294 self.execute("cp {} {}".format(src, dest))
295
296 def open_max_sftp_channels(self):
297 # If there are more than 1 it means we've already opened max amount
298 if len(self._sftp_channels) <= 1:
299 while True:
300 try:
301 self._sftp_channels.append(self._ssh.open_sftp())
302 except paramiko.ssh_exception.ChannelException:
303 if not self._sftp_channels:
304 raise
305 break
306 return self._sftp_channels
307
308 def open(self, *args, **kwargs):
309 return self.sftp.open(*args, **kwargs)
310
311 def symlink(self, src, dest):
312 self.sftp.symlink(src, dest)
313
314 def reflink(self, src, dest):
315 if self.uname == "Linux":
316 return self.execute("cp --reflink {} {}".format(src, dest))
317
318 if self.uname == "Darwin":
319 return self.execute("cp -c {} {}".format(src, dest))
320
321 raise DvcException(
322 "'{}' is not supported as a SSH remote".format(self.uname)
323 )
324
325 def hardlink(self, src, dest):
326 self.execute("ln {} {}".format(src, dest))
327
[end of dvc/remote/ssh/connection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dvc/remote/ssh/connection.py b/dvc/remote/ssh/connection.py
--- a/dvc/remote/ssh/connection.py
+++ b/dvc/remote/ssh/connection.py
@@ -41,6 +41,14 @@
self.timeout = kwargs.get("timeout", 1800)
self._ssh = paramiko.SSHClient()
+
+ # Explicitly disable paramiko logger. Due to how paramiko dynamically
+ # loads loggers, it is not disabled by DVC disable_other_loggers().
+ # See https://github.com/iterative/dvc/issues/3482
+ self._ssh.set_log_channel("dvc.paramiko")
+ logging.getLogger("dvc.paramiko").disabled = True
+ logging.getLogger("dvc.paramiko.sftp").disabled = True
+
self._ssh.load_system_host_keys()
self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
{"golden_diff": "diff --git a/dvc/remote/ssh/connection.py b/dvc/remote/ssh/connection.py\n--- a/dvc/remote/ssh/connection.py\n+++ b/dvc/remote/ssh/connection.py\n@@ -41,6 +41,14 @@\n self.timeout = kwargs.get(\"timeout\", 1800)\n \n self._ssh = paramiko.SSHClient()\n+\n+ # Explicitly disable paramiko logger. Due to how paramiko dynamically\n+ # loads loggers, it is not disabled by DVC disable_other_loggers().\n+ # See https://github.com/iterative/dvc/issues/3482\n+ self._ssh.set_log_channel(\"dvc.paramiko\")\n+ logging.getLogger(\"dvc.paramiko\").disabled = True\n+ logging.getLogger(\"dvc.paramiko.sftp\").disabled = True\n+\n self._ssh.load_system_host_keys()\n self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n", "issue": "Secsh channel 10 open FAILED: open failed: Connect failed\n**Please provide information about your setup**\r\nDVC version(i.e. `dvc --version`), Platform and method of installation (pip, homebrew, pkg Mac, exe (Windows), DEB(Linux), RPM(Linux))\r\n\r\nversion 0.88.0\r\ninstalled via pip on Linux and Mac.\r\nSyncing to a Ubuntu host via ssh.\r\n\r\nEverything seems to be working but I get a cryptic warning message every time I do anything. For example:\r\n\r\ndvc push\r\n 0% Querying cache in ssh://[email protected]/media/sda2/dvc/first_day| |0/42 Secsh channel 10 open FAILED: open failed: Connect failed\r\nSecsh channel 10 open FAILED: open failed: Connect failed\r\nSecsh channel 10 open FAILED: open failed: Connect failed\r\n 2% /media/sda2/dvc/first_day/4e/4b31f0c5784a2e185d88a3120cac19| |1/42 [00:02<0Secsh channel 10 open FAILED: open failed: Connect failed\r\nEverything is up to date. \r\n\r\nThis is probably an edge case due to my setup but I'm not sure how to quiet the message or resolve the issue.\n", "before_files": [{"content": "import errno\nimport logging\nimport os\nimport posixpath\nimport stat\nfrom contextlib import suppress\n\nfrom funcy import cached_property\n\ntry:\n import paramiko\nexcept ImportError:\n paramiko = None\n\nfrom dvc.utils import tmp_fname\nfrom dvc.progress import Tqdm\nfrom dvc.exceptions import DvcException\nfrom dvc.remote.base import RemoteCmdError\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef sizeof_fmt(num, suffix=\"B\"):\n \"\"\" Convert number of bytes to human-readable string \"\"\"\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\"]:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, \"Y\", suffix)\n\n\nclass SSHConnection:\n def __init__(self, host, *args, **kwargs):\n logger.debug(\n \"Establishing ssh connection with '{host}' \"\n \"through port '{port}' as user '{username}'\".format(\n host=host, **kwargs\n )\n )\n self.timeout = kwargs.get(\"timeout\", 1800)\n\n self._ssh = paramiko.SSHClient()\n self._ssh.load_system_host_keys()\n self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n self._ssh.connect(host, *args, **kwargs)\n self._ssh.get_transport().set_keepalive(10)\n self._sftp_channels = []\n\n @property\n def sftp(self):\n if not self._sftp_channels:\n self._sftp_channels = [self._ssh.open_sftp()]\n return self._sftp_channels[0]\n\n def close(self):\n for sftp in self._sftp_channels:\n sftp.close()\n self._ssh.close()\n\n def st_mode(self, path):\n with suppress(FileNotFoundError):\n return self.sftp.lstat(path).st_mode\n\n return 0\n\n def getsize(self, path):\n with suppress(FileNotFoundError):\n return self.sftp.lstat(path).st_size\n\n return 0\n\n def exists(self, path, sftp=None):\n return bool(self.st_mode(path))\n\n def isdir(self, path):\n return stat.S_ISDIR(self.st_mode(path))\n\n def isfile(self, path):\n return stat.S_ISREG(self.st_mode(path))\n\n def islink(self, path):\n return stat.S_ISLNK(self.st_mode(path))\n\n def makedirs(self, path):\n # Single stat call will say whether this is a dir, a file or a link\n st_mode = self.st_mode(path)\n\n if stat.S_ISDIR(st_mode):\n return\n\n if stat.S_ISREG(st_mode) or stat.S_ISLNK(st_mode):\n raise DvcException(\n \"a file with the same name '{}' already exists\".format(path)\n )\n\n head, tail = posixpath.split(path)\n\n if head:\n self.makedirs(head)\n\n if tail:\n try:\n self.sftp.mkdir(path)\n except IOError as exc:\n # Since paramiko errors are very vague we need to recheck\n # whether it's because path already exists or something else\n if exc.errno == errno.EACCES or not self.exists(path):\n raise DvcException(\n \"unable to create remote directory '{}'\".format(path)\n ) from exc\n\n def walk(self, directory, topdown=True):\n # NOTE: original os.walk() implementation [1] with default options was\n # used as a template.\n #\n # [1] https://github.com/python/cpython/blob/master/Lib/os.py\n try:\n dir_entries = self.sftp.listdir_attr(directory)\n except IOError as exc:\n raise DvcException(\n \"couldn't get the '{}' remote directory files list\".format(\n directory\n )\n ) from exc\n\n dirs = []\n nondirs = []\n for entry in dir_entries:\n name = entry.filename\n if stat.S_ISDIR(entry.st_mode):\n dirs.append(name)\n else:\n nondirs.append(name)\n\n if topdown:\n yield directory, dirs, nondirs\n\n for dname in dirs:\n newpath = posixpath.join(directory, dname)\n yield from self.walk(newpath, topdown=topdown)\n\n if not topdown:\n yield directory, dirs, nondirs\n\n def walk_files(self, directory):\n for root, dirs, files in self.walk(directory):\n for fname in files:\n yield posixpath.join(root, fname)\n\n def _remove_file(self, path):\n with suppress(FileNotFoundError):\n self.sftp.remove(path)\n\n def _remove_dir(self, path):\n for root, dirs, files in self.walk(path, topdown=False):\n for fname in files:\n with suppress(FileNotFoundError):\n self._remove_file(posixpath.join(root, fname))\n\n for dname in dirs:\n with suppress(FileNotFoundError):\n self.sftp.rmdir(posixpath.join(root, dname))\n\n with suppress(FileNotFoundError):\n self.sftp.rmdir(path)\n\n def remove(self, path):\n if self.isdir(path):\n self._remove_dir(path)\n else:\n self._remove_file(path)\n\n def download(self, src, dest, no_progress_bar=False, progress_title=None):\n with Tqdm(\n desc=progress_title or os.path.basename(src),\n disable=no_progress_bar,\n bytes=True,\n ) as pbar:\n self.sftp.get(src, dest, callback=pbar.update_to)\n\n def move(self, src, dst):\n \"\"\"Rename src to dst, if it is not possible (in case src and dst are\n on different filesystems) and actual physical copying of data is\n happening.\n \"\"\"\n self.makedirs(posixpath.dirname(dst))\n\n try:\n self.sftp.rename(src, dst)\n except OSError:\n self.atomic_copy(src, dst)\n self.remove(src)\n\n def atomic_copy(self, src, dst):\n tmp = tmp_fname(dst)\n\n try:\n self.copy(src, tmp)\n self.sftp.rename(tmp, dst)\n finally:\n self.remove(tmp)\n\n def upload(self, src, dest, no_progress_bar=False, progress_title=None):\n self.makedirs(posixpath.dirname(dest))\n tmp_file = tmp_fname(dest)\n if not progress_title:\n progress_title = posixpath.basename(dest)\n\n with Tqdm(\n desc=progress_title, disable=no_progress_bar, bytes=True\n ) as pbar:\n self.sftp.put(src, tmp_file, callback=pbar.update_to)\n\n self.sftp.rename(tmp_file, dest)\n\n def execute(self, cmd):\n stdin, stdout, stderr = self._ssh.exec_command(cmd)\n channel = stdout.channel\n\n stdin.close()\n channel.shutdown_write()\n\n stdout_chunks = []\n stderr_chunks = []\n while (\n not channel.closed\n or channel.recv_ready()\n or channel.recv_stderr_ready()\n ):\n import select\n\n got_chunk = False\n readq, _, _ = select.select([stdout.channel], [], [], self.timeout)\n for c in readq:\n if c.recv_ready():\n stdout_chunks.append(stdout.channel.recv(len(c.in_buffer)))\n got_chunk = True\n\n if c.recv_stderr_ready():\n stderr_len = len(c.in_stderr_buffer)\n s = stderr.channel.recv_stderr(stderr_len)\n stderr_chunks.append(s)\n got_chunk = True\n\n if (\n not got_chunk\n and stdout.channel.exit_status_ready()\n and not stderr.channel.recv_stderr_ready()\n and not stdout.channel.recv_ready()\n ):\n stdout.channel.shutdown_read()\n stdout.channel.close()\n break\n\n stdout.close()\n stderr.close()\n\n ret = stdout.channel.recv_exit_status()\n if ret != 0:\n err = b\"\".join(stderr_chunks).decode(\"utf-8\")\n raise RemoteCmdError(\"ssh\", cmd, ret, err)\n\n return b\"\".join(stdout_chunks).decode(\"utf-8\")\n\n @cached_property\n def uname(self):\n return self.execute(\"uname\").strip()\n\n def md5(self, path):\n \"\"\"\n Use different md5 commands depending on the OS:\n\n - Darwin's `md5` returns BSD-style checksums by default\n - Linux's `md5sum` needs the `--tag` flag for a similar output\n\n Example:\n MD5 (foo.txt) = f3d220a856b52aabbf294351e8a24300\n \"\"\"\n if self.uname == \"Linux\":\n md5 = self.execute(\"md5sum \" + path).split()[0]\n elif self.uname == \"Darwin\":\n md5 = self.execute(\"md5 \" + path).split()[-1]\n else:\n raise DvcException(\n \"'{}' is not supported as a SSH remote\".format(self.uname)\n )\n\n assert len(md5) == 32\n return md5\n\n def copy(self, src, dest):\n self.execute(\"cp {} {}\".format(src, dest))\n\n def open_max_sftp_channels(self):\n # If there are more than 1 it means we've already opened max amount\n if len(self._sftp_channels) <= 1:\n while True:\n try:\n self._sftp_channels.append(self._ssh.open_sftp())\n except paramiko.ssh_exception.ChannelException:\n if not self._sftp_channels:\n raise\n break\n return self._sftp_channels\n\n def open(self, *args, **kwargs):\n return self.sftp.open(*args, **kwargs)\n\n def symlink(self, src, dest):\n self.sftp.symlink(src, dest)\n\n def reflink(self, src, dest):\n if self.uname == \"Linux\":\n return self.execute(\"cp --reflink {} {}\".format(src, dest))\n\n if self.uname == \"Darwin\":\n return self.execute(\"cp -c {} {}\".format(src, dest))\n\n raise DvcException(\n \"'{}' is not supported as a SSH remote\".format(self.uname)\n )\n\n def hardlink(self, src, dest):\n self.execute(\"ln {} {}\".format(src, dest))\n", "path": "dvc/remote/ssh/connection.py"}]}
| 3,987 | 210 |
gh_patches_debug_30707
|
rasdani/github-patches
|
git_diff
|
encode__starlette-1147
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Session cookie should use root path
The session cookie currently uses '/'.
It should really use the ASGI root path instead, in case the application is submounted.
</issue>
<code>
[start of starlette/middleware/sessions.py]
1 import json
2 import typing
3 from base64 import b64decode, b64encode
4
5 import itsdangerous
6 from itsdangerous.exc import BadTimeSignature, SignatureExpired
7
8 from starlette.datastructures import MutableHeaders, Secret
9 from starlette.requests import HTTPConnection
10 from starlette.types import ASGIApp, Message, Receive, Scope, Send
11
12
13 class SessionMiddleware:
14 def __init__(
15 self,
16 app: ASGIApp,
17 secret_key: typing.Union[str, Secret],
18 session_cookie: str = "session",
19 max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds
20 same_site: str = "lax",
21 https_only: bool = False,
22 ) -> None:
23 self.app = app
24 self.signer = itsdangerous.TimestampSigner(str(secret_key))
25 self.session_cookie = session_cookie
26 self.max_age = max_age
27 self.security_flags = "httponly; samesite=" + same_site
28 if https_only: # Secure flag can be used with HTTPS only
29 self.security_flags += "; secure"
30
31 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
32 if scope["type"] not in ("http", "websocket"): # pragma: no cover
33 await self.app(scope, receive, send)
34 return
35
36 connection = HTTPConnection(scope)
37 initial_session_was_empty = True
38
39 if self.session_cookie in connection.cookies:
40 data = connection.cookies[self.session_cookie].encode("utf-8")
41 try:
42 data = self.signer.unsign(data, max_age=self.max_age)
43 scope["session"] = json.loads(b64decode(data))
44 initial_session_was_empty = False
45 except (BadTimeSignature, SignatureExpired):
46 scope["session"] = {}
47 else:
48 scope["session"] = {}
49
50 async def send_wrapper(message: Message) -> None:
51 if message["type"] == "http.response.start":
52 if scope["session"]:
53 # We have session data to persist.
54 data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
55 data = self.signer.sign(data)
56 headers = MutableHeaders(scope=message)
57 header_value = "%s=%s; path=/; Max-Age=%d; %s" % (
58 self.session_cookie,
59 data.decode("utf-8"),
60 self.max_age,
61 self.security_flags,
62 )
63 headers.append("Set-Cookie", header_value)
64 elif not initial_session_was_empty:
65 # The session has been cleared.
66 headers = MutableHeaders(scope=message)
67 header_value = "{}={}; {}".format(
68 self.session_cookie,
69 "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
70 self.security_flags,
71 )
72 headers.append("Set-Cookie", header_value)
73 await send(message)
74
75 await self.app(scope, receive, send_wrapper)
76
[end of starlette/middleware/sessions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py
--- a/starlette/middleware/sessions.py
+++ b/starlette/middleware/sessions.py
@@ -49,14 +49,16 @@
async def send_wrapper(message: Message) -> None:
if message["type"] == "http.response.start":
+ path = scope.get("root_path", "") or "/"
if scope["session"]:
# We have session data to persist.
data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
data = self.signer.sign(data)
headers = MutableHeaders(scope=message)
- header_value = "%s=%s; path=/; Max-Age=%d; %s" % (
+ header_value = "%s=%s; path=%s; Max-Age=%d; %s" % (
self.session_cookie,
data.decode("utf-8"),
+ path,
self.max_age,
self.security_flags,
)
@@ -66,7 +68,7 @@
headers = MutableHeaders(scope=message)
header_value = "{}={}; {}".format(
self.session_cookie,
- "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
+ f"null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
self.security_flags,
)
headers.append("Set-Cookie", header_value)
|
{"golden_diff": "diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py\n--- a/starlette/middleware/sessions.py\n+++ b/starlette/middleware/sessions.py\n@@ -49,14 +49,16 @@\n \n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n+ path = scope.get(\"root_path\", \"\") or \"/\"\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n- header_value = \"%s=%s; path=/; Max-Age=%d; %s\" % (\n+ header_value = \"%s=%s; path=%s; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n+ path,\n self.max_age,\n self.security_flags,\n )\n@@ -66,7 +68,7 @@\n headers = MutableHeaders(scope=message)\n header_value = \"{}={}; {}\".format(\n self.session_cookie,\n- \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n+ f\"null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n", "issue": "Session cookie should use root path\nThe session cookie currently uses '/'.\r\nIt should really use the ASGI root path instead, in case the application is submounted.\n", "before_files": [{"content": "import json\nimport typing\nfrom base64 import b64decode, b64encode\n\nimport itsdangerous\nfrom itsdangerous.exc import BadTimeSignature, SignatureExpired\n\nfrom starlette.datastructures import MutableHeaders, Secret\nfrom starlette.requests import HTTPConnection\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass SessionMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n secret_key: typing.Union[str, Secret],\n session_cookie: str = \"session\",\n max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds\n same_site: str = \"lax\",\n https_only: bool = False,\n ) -> None:\n self.app = app\n self.signer = itsdangerous.TimestampSigner(str(secret_key))\n self.session_cookie = session_cookie\n self.max_age = max_age\n self.security_flags = \"httponly; samesite=\" + same_site\n if https_only: # Secure flag can be used with HTTPS only\n self.security_flags += \"; secure\"\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] not in (\"http\", \"websocket\"): # pragma: no cover\n await self.app(scope, receive, send)\n return\n\n connection = HTTPConnection(scope)\n initial_session_was_empty = True\n\n if self.session_cookie in connection.cookies:\n data = connection.cookies[self.session_cookie].encode(\"utf-8\")\n try:\n data = self.signer.unsign(data, max_age=self.max_age)\n scope[\"session\"] = json.loads(b64decode(data))\n initial_session_was_empty = False\n except (BadTimeSignature, SignatureExpired):\n scope[\"session\"] = {}\n else:\n scope[\"session\"] = {}\n\n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s; path=/; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n self.max_age,\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n elif not initial_session_was_empty:\n # The session has been cleared.\n headers = MutableHeaders(scope=message)\n header_value = \"{}={}; {}\".format(\n self.session_cookie,\n \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n await send(message)\n\n await self.app(scope, receive, send_wrapper)\n", "path": "starlette/middleware/sessions.py"}]}
| 1,374 | 340 |
gh_patches_debug_27519
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-22
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Config locations
Currently opsdroid looks for the `configuration.yaml` file in the current working directory. It should also look in `~/.opsdroid/configuration.yaml` and `/etc/opsdroid/configuration.yaml`.
</issue>
<code>
[start of opsdroid/loader.py]
1 """Class for loading in modules to OpsDroid."""
2
3 import logging
4 import os
5 import shutil
6 import subprocess
7 import importlib
8 import yaml
9 from opsdroid.const import (
10 DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULE_BRANCH)
11
12
13 def import_module(config):
14 """Import module namespace as variable and return it."""
15 try:
16 module = importlib.import_module(
17 config["path"] + "." + config["name"])
18 logging.debug("Loading " + config["type"] + ": " + config["name"])
19 return module
20 except ImportError as error:
21 logging.error("Failed to load " + config["type"] +
22 " " + config["name"])
23 logging.error(error)
24 return None
25
26
27 def check_cache(config):
28 """Remove module if 'no-cache' set in config."""
29 if "no-cache" in config \
30 and config["no-cache"] \
31 and os.path.isdir(config["install_path"]):
32 logging.debug("'no-cache' set, removing " + config["install_path"])
33 shutil.rmtree(config["install_path"])
34
35
36 def build_module_path(path_type, config):
37 """Generate the module path from name and type."""
38 if path_type == "import":
39 return MODULES_DIRECTORY + "." + config["type"] + "." + config["name"]
40 elif path_type == "install":
41 return MODULES_DIRECTORY + "/" + config["type"] + "/" + config["name"]
42
43
44 def git_clone(git_url, install_path, branch):
45 """Clone a git repo to a location and wait for finish."""
46 process = subprocess.Popen(["git", "clone", "-b", branch,
47 git_url, install_path], shell=False,
48 stdout=subprocess.PIPE,
49 stderr=subprocess.PIPE)
50 process.wait()
51
52
53 def pip_install_deps(requirements_path):
54 """Pip install a requirements.txt file and wait for finish."""
55 process = subprocess.Popen(["pip", "install", "-r", requirements_path],
56 shell=False,
57 stdout=subprocess.PIPE,
58 stderr=subprocess.PIPE)
59 for output in process.communicate():
60 if output != "":
61 for line in output.splitlines():
62 logging.debug(str(line).strip())
63 process.wait()
64
65
66 class Loader:
67 """Class to load in config and modules."""
68
69 def __init__(self, opsdroid):
70 """Setup object with opsdroid instance."""
71 self.opsdroid = opsdroid
72 logging.debug("Loaded loader")
73
74 def load_config_file(self, config_path):
75 """Load a yaml config file from path."""
76 if not os.path.isfile(config_path):
77 self.opsdroid.critical("Config file " + config_path +
78 " not found", 1)
79
80 try:
81 with open(config_path, 'r') as stream:
82 return yaml.load(stream)
83 except yaml.YAMLError as error:
84 self.opsdroid.critical(error, 1)
85 except FileNotFoundError as error:
86 self.opsdroid.critical(str(error), 1)
87
88 def load_config(self, config):
89 """Load all module types based on config."""
90 logging.debug("Loading modules from config")
91
92 if 'databases' in config.keys():
93 self.opsdroid.start_databases(
94 self._load_modules('database', config['databases']))
95 else:
96 logging.warning("No databases in configuration")
97
98 if 'skills' in config.keys():
99 self._setup_modules(
100 self._load_modules('skill', config['skills'])
101 )
102 else:
103 self.opsdroid.critical(
104 "No skills in configuration, at least 1 required", 1)
105
106 if 'connectors' in config.keys():
107 self.opsdroid.start_connectors(
108 self._load_modules('connector', config['connectors']))
109 else:
110 self.opsdroid.critical(
111 "No connectors in configuration, at least 1 required", 1)
112
113 def _load_modules(self, modules_type, modules):
114 """Install and load modules."""
115 logging.debug("Loading " + modules_type + " modules")
116 loaded_modules = []
117
118 # Create modules directory if doesn't exist
119 if not os.path.isdir(MODULES_DIRECTORY):
120 os.makedirs(MODULES_DIRECTORY)
121
122 for module_name in modules.keys():
123
124 # Set up module config
125 config = modules[module_name]
126 config = {} if config is None else config
127 config["name"] = module_name
128 config["type"] = modules_type
129 config["path"] = build_module_path("import", config)
130 config["install_path"] = build_module_path("install", config)
131 if "branch" not in config:
132 config["branch"] = DEFAULT_MODULE_BRANCH
133
134 # Remove module for reinstall if no-cache set
135 check_cache(config)
136
137 # Install module
138 self._install_module(config)
139
140 # Import module
141 module = import_module(config)
142 if module is not None:
143 loaded_modules.append({
144 "module": module,
145 "config": config})
146
147 return loaded_modules
148
149 def _setup_modules(self, modules):
150 """Call the setup function on the passed in modules."""
151 for module in modules:
152 module["module"].setup(self.opsdroid)
153
154 def _install_module(self, config):
155 # pylint: disable=R0201
156 """Install a module."""
157 logging.debug("Installing " + config["name"])
158
159 if os.path.isdir(config["install_path"]):
160 # TODO Allow for updating or reinstalling of modules
161 logging.debug("Module " + config["name"] +
162 " already installed, skipping")
163 else:
164 if config is not None and "repo" in config:
165 git_url = config["repo"]
166 else:
167 git_url = DEFAULT_GIT_URL + config["type"] + \
168 "-" + config["name"] + ".git"
169
170 if any(prefix in git_url for prefix in ["http", "https", "ssh"]):
171 # TODO Test if url or ssh path exists
172 # TODO Handle github authentication
173 git_clone(git_url, config["install_path"], config["branch"])
174 else:
175 if os.path.isdir(git_url):
176 git_clone(git_url, config["install_path"],
177 config["branch"])
178 else:
179 logging.debug("Could not find local git repo " + git_url)
180
181 if os.path.isdir(config["install_path"]):
182 logging.debug("Installed " + config["name"] +
183 " to " + config["install_path"])
184 else:
185 logging.debug("Install of " + config["name"] + " failed ")
186
187 # Install module dependancies
188 if os.path.isfile(config["install_path"] + "/requirements.txt"):
189 pip_install_deps(config["install_path"] + "/requirements.txt")
190
[end of opsdroid/loader.py]
[start of opsdroid/__main__.py]
1 """Starts opsdroid."""
2
3 import logging
4
5 from opsdroid.loader import Loader
6 from opsdroid.core import OpsDroid
7 from opsdroid.helper import set_logging_level
8 from opsdroid.const import LOG_FILENAME
9
10
11 def main():
12 """The main function."""
13 logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
14 logging.info("="*40)
15 logging.info("Stated application")
16 with OpsDroid() as opsdroid:
17 loader = Loader(opsdroid)
18 opsdroid.config = loader.load_config_file("./configuration.yaml")
19 if "logging" in opsdroid.config:
20 set_logging_level(opsdroid.config['logging'])
21 loader.load_config(opsdroid.config)
22 opsdroid.exit()
23
24 if __name__ == "__main__":
25 main()
26
[end of opsdroid/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py
--- a/opsdroid/__main__.py
+++ b/opsdroid/__main__.py
@@ -15,7 +15,11 @@
logging.info("Stated application")
with OpsDroid() as opsdroid:
loader = Loader(opsdroid)
- opsdroid.config = loader.load_config_file("./configuration.yaml")
+ opsdroid.config = loader.load_config_file([
+ "./configuration.yaml",
+ "~/.opsdroid/configuration.yaml",
+ "/etc/opsdroid/configuration.yaml"
+ ])
if "logging" in opsdroid.config:
set_logging_level(opsdroid.config['logging'])
loader.load_config(opsdroid.config)
diff --git a/opsdroid/loader.py b/opsdroid/loader.py
--- a/opsdroid/loader.py
+++ b/opsdroid/loader.py
@@ -71,11 +71,19 @@
self.opsdroid = opsdroid
logging.debug("Loaded loader")
- def load_config_file(self, config_path):
+ def load_config_file(self, config_paths):
"""Load a yaml config file from path."""
- if not os.path.isfile(config_path):
- self.opsdroid.critical("Config file " + config_path +
- " not found", 1)
+ config_path = ""
+ for possible_path in config_paths:
+ if not os.path.isfile(possible_path):
+ logging.warning("Config file " + possible_path +
+ " not found", 1)
+ else:
+ config_path = possible_path
+ break
+
+ if not config_path:
+ self.opsdroid.critical("No configuration files found", 1)
try:
with open(config_path, 'r') as stream:
|
{"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -15,7 +15,11 @@\n logging.info(\"Stated application\")\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n- opsdroid.config = loader.load_config_file(\"./configuration.yaml\")\n+ opsdroid.config = loader.load_config_file([\n+ \"./configuration.yaml\",\n+ \"~/.opsdroid/configuration.yaml\",\n+ \"/etc/opsdroid/configuration.yaml\"\n+ ])\n if \"logging\" in opsdroid.config:\n set_logging_level(opsdroid.config['logging'])\n loader.load_config(opsdroid.config)\ndiff --git a/opsdroid/loader.py b/opsdroid/loader.py\n--- a/opsdroid/loader.py\n+++ b/opsdroid/loader.py\n@@ -71,11 +71,19 @@\n self.opsdroid = opsdroid\n logging.debug(\"Loaded loader\")\n \n- def load_config_file(self, config_path):\n+ def load_config_file(self, config_paths):\n \"\"\"Load a yaml config file from path.\"\"\"\n- if not os.path.isfile(config_path):\n- self.opsdroid.critical(\"Config file \" + config_path +\n- \" not found\", 1)\n+ config_path = \"\"\n+ for possible_path in config_paths:\n+ if not os.path.isfile(possible_path):\n+ logging.warning(\"Config file \" + possible_path +\n+ \" not found\", 1)\n+ else:\n+ config_path = possible_path\n+ break\n+\n+ if not config_path:\n+ self.opsdroid.critical(\"No configuration files found\", 1)\n \n try:\n with open(config_path, 'r') as stream:\n", "issue": "Config locations\nCurrently opsdroid looks for the `configuration.yaml` file in the current working directory. It should also look in `~/.opsdroid/configuration.yaml` and `/etc/opsdroid/configuration.yaml`.\n\n", "before_files": [{"content": "\"\"\"Class for loading in modules to OpsDroid.\"\"\"\n\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport importlib\nimport yaml\nfrom opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULE_BRANCH)\n\n\ndef import_module(config):\n \"\"\"Import module namespace as variable and return it.\"\"\"\n try:\n module = importlib.import_module(\n config[\"path\"] + \".\" + config[\"name\"])\n logging.debug(\"Loading \" + config[\"type\"] + \": \" + config[\"name\"])\n return module\n except ImportError as error:\n logging.error(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"name\"])\n logging.error(error)\n return None\n\n\ndef check_cache(config):\n \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n if \"no-cache\" in config \\\n and config[\"no-cache\"] \\\n and os.path.isdir(config[\"install_path\"]):\n logging.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n shutil.rmtree(config[\"install_path\"])\n\n\ndef build_module_path(path_type, config):\n \"\"\"Generate the module path from name and type.\"\"\"\n if path_type == \"import\":\n return MODULES_DIRECTORY + \".\" + config[\"type\"] + \".\" + config[\"name\"]\n elif path_type == \"install\":\n return MODULES_DIRECTORY + \"/\" + config[\"type\"] + \"/\" + config[\"name\"]\n\n\ndef git_clone(git_url, install_path, branch):\n \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n git_url, install_path], shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n process.wait()\n\n\ndef pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n logging.debug(str(line).strip())\n process.wait()\n\n\nclass Loader:\n \"\"\"Class to load in config and modules.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Setup object with opsdroid instance.\"\"\"\n self.opsdroid = opsdroid\n logging.debug(\"Loaded loader\")\n\n def load_config_file(self, config_path):\n \"\"\"Load a yaml config file from path.\"\"\"\n if not os.path.isfile(config_path):\n self.opsdroid.critical(\"Config file \" + config_path +\n \" not found\", 1)\n\n try:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.opsdroid.critical(error, 1)\n except FileNotFoundError as error:\n self.opsdroid.critical(str(error), 1)\n\n def load_config(self, config):\n \"\"\"Load all module types based on config.\"\"\"\n logging.debug(\"Loading modules from config\")\n\n if 'databases' in config.keys():\n self.opsdroid.start_databases(\n self._load_modules('database', config['databases']))\n else:\n logging.warning(\"No databases in configuration\")\n\n if 'skills' in config.keys():\n self._setup_modules(\n self._load_modules('skill', config['skills'])\n )\n else:\n self.opsdroid.critical(\n \"No skills in configuration, at least 1 required\", 1)\n\n if 'connectors' in config.keys():\n self.opsdroid.start_connectors(\n self._load_modules('connector', config['connectors']))\n else:\n self.opsdroid.critical(\n \"No connectors in configuration, at least 1 required\", 1)\n\n def _load_modules(self, modules_type, modules):\n \"\"\"Install and load modules.\"\"\"\n logging.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n\n # Create modules directory if doesn't exist\n if not os.path.isdir(MODULES_DIRECTORY):\n os.makedirs(MODULES_DIRECTORY)\n\n for module_name in modules.keys():\n\n # Set up module config\n config = modules[module_name]\n config = {} if config is None else config\n config[\"name\"] = module_name\n config[\"type\"] = modules_type\n config[\"path\"] = build_module_path(\"import\", config)\n config[\"install_path\"] = build_module_path(\"install\", config)\n if \"branch\" not in config:\n config[\"branch\"] = DEFAULT_MODULE_BRANCH\n\n # Remove module for reinstall if no-cache set\n check_cache(config)\n\n # Install module\n self._install_module(config)\n\n # Import module\n module = import_module(config)\n if module is not None:\n loaded_modules.append({\n \"module\": module,\n \"config\": config})\n\n return loaded_modules\n\n def _setup_modules(self, modules):\n \"\"\"Call the setup function on the passed in modules.\"\"\"\n for module in modules:\n module[\"module\"].setup(self.opsdroid)\n\n def _install_module(self, config):\n # pylint: disable=R0201\n \"\"\"Install a module.\"\"\"\n logging.debug(\"Installing \" + config[\"name\"])\n\n if os.path.isdir(config[\"install_path\"]):\n # TODO Allow for updating or reinstalling of modules\n logging.debug(\"Module \" + config[\"name\"] +\n \" already installed, skipping\")\n else:\n if config is not None and \"repo\" in config:\n git_url = config[\"repo\"]\n else:\n git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n \"-\" + config[\"name\"] + \".git\"\n\n if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n # TODO Test if url or ssh path exists\n # TODO Handle github authentication\n git_clone(git_url, config[\"install_path\"], config[\"branch\"])\n else:\n if os.path.isdir(git_url):\n git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n logging.debug(\"Could not find local git repo \" + git_url)\n\n if os.path.isdir(config[\"install_path\"]):\n logging.debug(\"Installed \" + config[\"name\"] +\n \" to \" + config[\"install_path\"])\n else:\n logging.debug(\"Install of \" + config[\"name\"] + \" failed \")\n\n # Install module dependancies\n if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n pip_install_deps(config[\"install_path\"] + \"/requirements.txt\")\n", "path": "opsdroid/loader.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport logging\n\nfrom opsdroid.loader import Loader\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.helper import set_logging_level\nfrom opsdroid.const import LOG_FILENAME\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)\n logging.info(\"=\"*40)\n logging.info(\"Stated application\")\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n opsdroid.config = loader.load_config_file(\"./configuration.yaml\")\n if \"logging\" in opsdroid.config:\n set_logging_level(opsdroid.config['logging'])\n loader.load_config(opsdroid.config)\n opsdroid.exit()\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}]}
| 2,705 | 414 |
gh_patches_debug_12217
|
rasdani/github-patches
|
git_diff
|
pallets__click-1872
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`HelpFormatter.write_text()` is not using all the available line width
`HelpFormatter.write_text()` uses the function `wrap_text(text, width, initial_indent, ...)` internally. This function expects `width` to be the line width **including** the eventual indentation. `HelpFormatter.write_text()` gets this wrong and passes `self.width - self.current_indent` instead of just `self.width`.
</issue>
<code>
[start of src/click/formatting.py]
1 import typing as t
2 from contextlib import contextmanager
3 from gettext import gettext as _
4
5 from ._compat import term_len
6 from .parser import split_opt
7
8 # Can force a width. This is used by the test system
9 FORCED_WIDTH: t.Optional[int] = None
10
11
12 def measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]:
13 widths: t.Dict[int, int] = {}
14
15 for row in rows:
16 for idx, col in enumerate(row):
17 widths[idx] = max(widths.get(idx, 0), term_len(col))
18
19 return tuple(y for x, y in sorted(widths.items()))
20
21
22 def iter_rows(
23 rows: t.Iterable[t.Tuple[str, str]], col_count: int
24 ) -> t.Iterator[t.Tuple[str, ...]]:
25 for row in rows:
26 yield row + ("",) * (col_count - len(row))
27
28
29 def wrap_text(
30 text: str,
31 width: int = 78,
32 initial_indent: str = "",
33 subsequent_indent: str = "",
34 preserve_paragraphs: bool = False,
35 ) -> str:
36 """A helper function that intelligently wraps text. By default, it
37 assumes that it operates on a single paragraph of text but if the
38 `preserve_paragraphs` parameter is provided it will intelligently
39 handle paragraphs (defined by two empty lines).
40
41 If paragraphs are handled, a paragraph can be prefixed with an empty
42 line containing the ``\\b`` character (``\\x08``) to indicate that
43 no rewrapping should happen in that block.
44
45 :param text: the text that should be rewrapped.
46 :param width: the maximum width for the text.
47 :param initial_indent: the initial indent that should be placed on the
48 first line as a string.
49 :param subsequent_indent: the indent string that should be placed on
50 each consecutive line.
51 :param preserve_paragraphs: if this flag is set then the wrapping will
52 intelligently handle paragraphs.
53 """
54 from ._textwrap import TextWrapper
55
56 text = text.expandtabs()
57 wrapper = TextWrapper(
58 width,
59 initial_indent=initial_indent,
60 subsequent_indent=subsequent_indent,
61 replace_whitespace=False,
62 )
63 if not preserve_paragraphs:
64 return wrapper.fill(text)
65
66 p: t.List[t.Tuple[int, bool, str]] = []
67 buf: t.List[str] = []
68 indent = None
69
70 def _flush_par() -> None:
71 if not buf:
72 return
73 if buf[0].strip() == "\b":
74 p.append((indent or 0, True, "\n".join(buf[1:])))
75 else:
76 p.append((indent or 0, False, " ".join(buf)))
77 del buf[:]
78
79 for line in text.splitlines():
80 if not line:
81 _flush_par()
82 indent = None
83 else:
84 if indent is None:
85 orig_len = term_len(line)
86 line = line.lstrip()
87 indent = orig_len - term_len(line)
88 buf.append(line)
89 _flush_par()
90
91 rv = []
92 for indent, raw, text in p:
93 with wrapper.extra_indent(" " * indent):
94 if raw:
95 rv.append(wrapper.indent_only(text))
96 else:
97 rv.append(wrapper.fill(text))
98
99 return "\n\n".join(rv)
100
101
102 class HelpFormatter:
103 """This class helps with formatting text-based help pages. It's
104 usually just needed for very special internal cases, but it's also
105 exposed so that developers can write their own fancy outputs.
106
107 At present, it always writes into memory.
108
109 :param indent_increment: the additional increment for each level.
110 :param width: the width for the text. This defaults to the terminal
111 width clamped to a maximum of 78.
112 """
113
114 def __init__(
115 self,
116 indent_increment: int = 2,
117 width: t.Optional[int] = None,
118 max_width: t.Optional[int] = None,
119 ) -> None:
120 import shutil
121
122 self.indent_increment = indent_increment
123 if max_width is None:
124 max_width = 80
125 if width is None:
126 width = FORCED_WIDTH
127 if width is None:
128 width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)
129 self.width = width
130 self.current_indent = 0
131 self.buffer: t.List[str] = []
132
133 def write(self, string: str) -> None:
134 """Writes a unicode string into the internal buffer."""
135 self.buffer.append(string)
136
137 def indent(self) -> None:
138 """Increases the indentation."""
139 self.current_indent += self.indent_increment
140
141 def dedent(self) -> None:
142 """Decreases the indentation."""
143 self.current_indent -= self.indent_increment
144
145 def write_usage(
146 self, prog: str, args: str = "", prefix: t.Optional[str] = None
147 ) -> None:
148 """Writes a usage line into the buffer.
149
150 :param prog: the program name.
151 :param args: whitespace separated list of arguments.
152 :param prefix: The prefix for the first line. Defaults to
153 ``"Usage: "``.
154 """
155 if prefix is None:
156 prefix = f"{_('Usage:')} "
157
158 usage_prefix = f"{prefix:>{self.current_indent}}{prog} "
159 text_width = self.width - self.current_indent
160
161 if text_width >= (term_len(usage_prefix) + 20):
162 # The arguments will fit to the right of the prefix.
163 indent = " " * term_len(usage_prefix)
164 self.write(
165 wrap_text(
166 args,
167 text_width,
168 initial_indent=usage_prefix,
169 subsequent_indent=indent,
170 )
171 )
172 else:
173 # The prefix is too long, put the arguments on the next line.
174 self.write(usage_prefix)
175 self.write("\n")
176 indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
177 self.write(
178 wrap_text(
179 args, text_width, initial_indent=indent, subsequent_indent=indent
180 )
181 )
182
183 self.write("\n")
184
185 def write_heading(self, heading: str) -> None:
186 """Writes a heading into the buffer."""
187 self.write(f"{'':>{self.current_indent}}{heading}:\n")
188
189 def write_paragraph(self) -> None:
190 """Writes a paragraph into the buffer."""
191 if self.buffer:
192 self.write("\n")
193
194 def write_text(self, text: str) -> None:
195 """Writes re-indented text into the buffer. This rewraps and
196 preserves paragraphs.
197 """
198 text_width = max(self.width - self.current_indent, 11)
199 indent = " " * self.current_indent
200 self.write(
201 wrap_text(
202 text,
203 text_width,
204 initial_indent=indent,
205 subsequent_indent=indent,
206 preserve_paragraphs=True,
207 )
208 )
209 self.write("\n")
210
211 def write_dl(
212 self,
213 rows: t.Sequence[t.Tuple[str, str]],
214 col_max: int = 30,
215 col_spacing: int = 2,
216 ) -> None:
217 """Writes a definition list into the buffer. This is how options
218 and commands are usually formatted.
219
220 :param rows: a list of two item tuples for the terms and values.
221 :param col_max: the maximum width of the first column.
222 :param col_spacing: the number of spaces between the first and
223 second column.
224 """
225 rows = list(rows)
226 widths = measure_table(rows)
227 if len(widths) != 2:
228 raise TypeError("Expected two columns for definition list")
229
230 first_col = min(widths[0], col_max) + col_spacing
231
232 for first, second in iter_rows(rows, len(widths)):
233 self.write(f"{'':>{self.current_indent}}{first}")
234 if not second:
235 self.write("\n")
236 continue
237 if term_len(first) <= first_col - col_spacing:
238 self.write(" " * (first_col - term_len(first)))
239 else:
240 self.write("\n")
241 self.write(" " * (first_col + self.current_indent))
242
243 text_width = max(self.width - first_col - 2, 10)
244 wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
245 lines = wrapped_text.splitlines()
246
247 if lines:
248 self.write(f"{lines[0]}\n")
249
250 for line in lines[1:]:
251 self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
252 else:
253 self.write("\n")
254
255 @contextmanager
256 def section(self, name: str) -> t.Iterator[None]:
257 """Helpful context manager that writes a paragraph, a heading,
258 and the indents.
259
260 :param name: the section name that is written as heading.
261 """
262 self.write_paragraph()
263 self.write_heading(name)
264 self.indent()
265 try:
266 yield
267 finally:
268 self.dedent()
269
270 @contextmanager
271 def indentation(self) -> t.Iterator[None]:
272 """A context manager that increases the indentation."""
273 self.indent()
274 try:
275 yield
276 finally:
277 self.dedent()
278
279 def getvalue(self) -> str:
280 """Returns the buffer contents."""
281 return "".join(self.buffer)
282
283
284 def join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]:
285 """Given a list of option strings this joins them in the most appropriate
286 way and returns them in the form ``(formatted_string,
287 any_prefix_is_slash)`` where the second item in the tuple is a flag that
288 indicates if any of the option prefixes was a slash.
289 """
290 rv = []
291 any_prefix_is_slash = False
292
293 for opt in options:
294 prefix = split_opt(opt)[0]
295
296 if prefix == "/":
297 any_prefix_is_slash = True
298
299 rv.append((len(prefix), opt))
300
301 rv.sort(key=lambda x: x[0])
302 return ", ".join(x[1] for x in rv), any_prefix_is_slash
303
[end of src/click/formatting.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/click/formatting.py b/src/click/formatting.py
--- a/src/click/formatting.py
+++ b/src/click/formatting.py
@@ -195,12 +195,11 @@
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
- text_width = max(self.width - self.current_indent, 11)
indent = " " * self.current_indent
self.write(
wrap_text(
text,
- text_width,
+ self.width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True,
|
{"golden_diff": "diff --git a/src/click/formatting.py b/src/click/formatting.py\n--- a/src/click/formatting.py\n+++ b/src/click/formatting.py\n@@ -195,12 +195,11 @@\n \"\"\"Writes re-indented text into the buffer. This rewraps and\n preserves paragraphs.\n \"\"\"\n- text_width = max(self.width - self.current_indent, 11)\n indent = \" \" * self.current_indent\n self.write(\n wrap_text(\n text,\n- text_width,\n+ self.width,\n initial_indent=indent,\n subsequent_indent=indent,\n preserve_paragraphs=True,\n", "issue": "`HelpFormatter.write_text()` is not using all the available line width\n`HelpFormatter.write_text()` uses the function `wrap_text(text, width, initial_indent, ...)` internally. This function expects `width` to be the line width **including** the eventual indentation. `HelpFormatter.write_text()` gets this wrong and passes `self.width - self.current_indent` instead of just `self.width`.\r\n\r\n\n", "before_files": [{"content": "import typing as t\nfrom contextlib import contextmanager\nfrom gettext import gettext as _\n\nfrom ._compat import term_len\nfrom .parser import split_opt\n\n# Can force a width. This is used by the test system\nFORCED_WIDTH: t.Optional[int] = None\n\n\ndef measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]:\n widths: t.Dict[int, int] = {}\n\n for row in rows:\n for idx, col in enumerate(row):\n widths[idx] = max(widths.get(idx, 0), term_len(col))\n\n return tuple(y for x, y in sorted(widths.items()))\n\n\ndef iter_rows(\n rows: t.Iterable[t.Tuple[str, str]], col_count: int\n) -> t.Iterator[t.Tuple[str, ...]]:\n for row in rows:\n yield row + (\"\",) * (col_count - len(row))\n\n\ndef wrap_text(\n text: str,\n width: int = 78,\n initial_indent: str = \"\",\n subsequent_indent: str = \"\",\n preserve_paragraphs: bool = False,\n) -> str:\n \"\"\"A helper function that intelligently wraps text. By default, it\n assumes that it operates on a single paragraph of text but if the\n `preserve_paragraphs` parameter is provided it will intelligently\n handle paragraphs (defined by two empty lines).\n\n If paragraphs are handled, a paragraph can be prefixed with an empty\n line containing the ``\\\\b`` character (``\\\\x08``) to indicate that\n no rewrapping should happen in that block.\n\n :param text: the text that should be rewrapped.\n :param width: the maximum width for the text.\n :param initial_indent: the initial indent that should be placed on the\n first line as a string.\n :param subsequent_indent: the indent string that should be placed on\n each consecutive line.\n :param preserve_paragraphs: if this flag is set then the wrapping will\n intelligently handle paragraphs.\n \"\"\"\n from ._textwrap import TextWrapper\n\n text = text.expandtabs()\n wrapper = TextWrapper(\n width,\n initial_indent=initial_indent,\n subsequent_indent=subsequent_indent,\n replace_whitespace=False,\n )\n if not preserve_paragraphs:\n return wrapper.fill(text)\n\n p: t.List[t.Tuple[int, bool, str]] = []\n buf: t.List[str] = []\n indent = None\n\n def _flush_par() -> None:\n if not buf:\n return\n if buf[0].strip() == \"\\b\":\n p.append((indent or 0, True, \"\\n\".join(buf[1:])))\n else:\n p.append((indent or 0, False, \" \".join(buf)))\n del buf[:]\n\n for line in text.splitlines():\n if not line:\n _flush_par()\n indent = None\n else:\n if indent is None:\n orig_len = term_len(line)\n line = line.lstrip()\n indent = orig_len - term_len(line)\n buf.append(line)\n _flush_par()\n\n rv = []\n for indent, raw, text in p:\n with wrapper.extra_indent(\" \" * indent):\n if raw:\n rv.append(wrapper.indent_only(text))\n else:\n rv.append(wrapper.fill(text))\n\n return \"\\n\\n\".join(rv)\n\n\nclass HelpFormatter:\n \"\"\"This class helps with formatting text-based help pages. It's\n usually just needed for very special internal cases, but it's also\n exposed so that developers can write their own fancy outputs.\n\n At present, it always writes into memory.\n\n :param indent_increment: the additional increment for each level.\n :param width: the width for the text. This defaults to the terminal\n width clamped to a maximum of 78.\n \"\"\"\n\n def __init__(\n self,\n indent_increment: int = 2,\n width: t.Optional[int] = None,\n max_width: t.Optional[int] = None,\n ) -> None:\n import shutil\n\n self.indent_increment = indent_increment\n if max_width is None:\n max_width = 80\n if width is None:\n width = FORCED_WIDTH\n if width is None:\n width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)\n self.width = width\n self.current_indent = 0\n self.buffer: t.List[str] = []\n\n def write(self, string: str) -> None:\n \"\"\"Writes a unicode string into the internal buffer.\"\"\"\n self.buffer.append(string)\n\n def indent(self) -> None:\n \"\"\"Increases the indentation.\"\"\"\n self.current_indent += self.indent_increment\n\n def dedent(self) -> None:\n \"\"\"Decreases the indentation.\"\"\"\n self.current_indent -= self.indent_increment\n\n def write_usage(\n self, prog: str, args: str = \"\", prefix: t.Optional[str] = None\n ) -> None:\n \"\"\"Writes a usage line into the buffer.\n\n :param prog: the program name.\n :param args: whitespace separated list of arguments.\n :param prefix: The prefix for the first line. Defaults to\n ``\"Usage: \"``.\n \"\"\"\n if prefix is None:\n prefix = f\"{_('Usage:')} \"\n\n usage_prefix = f\"{prefix:>{self.current_indent}}{prog} \"\n text_width = self.width - self.current_indent\n\n if text_width >= (term_len(usage_prefix) + 20):\n # The arguments will fit to the right of the prefix.\n indent = \" \" * term_len(usage_prefix)\n self.write(\n wrap_text(\n args,\n text_width,\n initial_indent=usage_prefix,\n subsequent_indent=indent,\n )\n )\n else:\n # The prefix is too long, put the arguments on the next line.\n self.write(usage_prefix)\n self.write(\"\\n\")\n indent = \" \" * (max(self.current_indent, term_len(prefix)) + 4)\n self.write(\n wrap_text(\n args, text_width, initial_indent=indent, subsequent_indent=indent\n )\n )\n\n self.write(\"\\n\")\n\n def write_heading(self, heading: str) -> None:\n \"\"\"Writes a heading into the buffer.\"\"\"\n self.write(f\"{'':>{self.current_indent}}{heading}:\\n\")\n\n def write_paragraph(self) -> None:\n \"\"\"Writes a paragraph into the buffer.\"\"\"\n if self.buffer:\n self.write(\"\\n\")\n\n def write_text(self, text: str) -> None:\n \"\"\"Writes re-indented text into the buffer. This rewraps and\n preserves paragraphs.\n \"\"\"\n text_width = max(self.width - self.current_indent, 11)\n indent = \" \" * self.current_indent\n self.write(\n wrap_text(\n text,\n text_width,\n initial_indent=indent,\n subsequent_indent=indent,\n preserve_paragraphs=True,\n )\n )\n self.write(\"\\n\")\n\n def write_dl(\n self,\n rows: t.Sequence[t.Tuple[str, str]],\n col_max: int = 30,\n col_spacing: int = 2,\n ) -> None:\n \"\"\"Writes a definition list into the buffer. This is how options\n and commands are usually formatted.\n\n :param rows: a list of two item tuples for the terms and values.\n :param col_max: the maximum width of the first column.\n :param col_spacing: the number of spaces between the first and\n second column.\n \"\"\"\n rows = list(rows)\n widths = measure_table(rows)\n if len(widths) != 2:\n raise TypeError(\"Expected two columns for definition list\")\n\n first_col = min(widths[0], col_max) + col_spacing\n\n for first, second in iter_rows(rows, len(widths)):\n self.write(f\"{'':>{self.current_indent}}{first}\")\n if not second:\n self.write(\"\\n\")\n continue\n if term_len(first) <= first_col - col_spacing:\n self.write(\" \" * (first_col - term_len(first)))\n else:\n self.write(\"\\n\")\n self.write(\" \" * (first_col + self.current_indent))\n\n text_width = max(self.width - first_col - 2, 10)\n wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)\n lines = wrapped_text.splitlines()\n\n if lines:\n self.write(f\"{lines[0]}\\n\")\n\n for line in lines[1:]:\n self.write(f\"{'':>{first_col + self.current_indent}}{line}\\n\")\n else:\n self.write(\"\\n\")\n\n @contextmanager\n def section(self, name: str) -> t.Iterator[None]:\n \"\"\"Helpful context manager that writes a paragraph, a heading,\n and the indents.\n\n :param name: the section name that is written as heading.\n \"\"\"\n self.write_paragraph()\n self.write_heading(name)\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n @contextmanager\n def indentation(self) -> t.Iterator[None]:\n \"\"\"A context manager that increases the indentation.\"\"\"\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n def getvalue(self) -> str:\n \"\"\"Returns the buffer contents.\"\"\"\n return \"\".join(self.buffer)\n\n\ndef join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]:\n \"\"\"Given a list of option strings this joins them in the most appropriate\n way and returns them in the form ``(formatted_string,\n any_prefix_is_slash)`` where the second item in the tuple is a flag that\n indicates if any of the option prefixes was a slash.\n \"\"\"\n rv = []\n any_prefix_is_slash = False\n\n for opt in options:\n prefix = split_opt(opt)[0]\n\n if prefix == \"/\":\n any_prefix_is_slash = True\n\n rv.append((len(prefix), opt))\n\n rv.sort(key=lambda x: x[0])\n return \", \".join(x[1] for x in rv), any_prefix_is_slash\n", "path": "src/click/formatting.py"}]}
| 3,647 | 142 |
gh_patches_debug_16146
|
rasdani/github-patches
|
git_diff
|
Nitrate__Nitrate-166
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installing files under /etc yields SandboxViolation in virtualenv
In one of my environments installing a newer version of Nitrate yields:
```
remote: error: Setup script exited with error: SandboxViolation: open('/etc/httpd/conf.d/nitrate-httpd.conf', 'wb') {}
```
this is due to the change made in ff4ecc75 adding the `data_files` attribute to `setup.py`. I propose installing these files under /etc via the RPM packages, not via pip.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 import os
4 from setuptools import setup, find_packages
5
6 import tcms
7
8
9 def get_install_requires():
10 requires = []
11 links = []
12 with open('requirements/base.txt', 'r') as f:
13 for line in f:
14 dep_line = line.strip()
15 parts = dep_line.split('#egg=')
16 if len(parts) == 2:
17 links.append(dep_line)
18 requires.append(parts[1])
19 else:
20 requires.append(dep_line)
21 return requires, links
22
23 install_requires, dependency_links = get_install_requires()
24
25
26 def get_long_description():
27 with open('README.rst', 'r') as f:
28 return f.read()
29
30
31 setup(
32 name='nitrate',
33 version=tcms.__version__,
34 description='Test Case Management System',
35 long_description=get_long_description(),
36 author='Nitrate Team',
37 maintainer='Chenxiong Qi',
38 maintainer_email='[email protected]',
39 url='https://github.com/Nitrate/Nitrate/',
40 license='GPLv2+',
41 keywords='test case',
42
43 install_requires=install_requires,
44 dependency_links=dependency_links,
45
46 packages=find_packages(),
47 include_package_data=True,
48 data_files=[
49 ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),
50 ('/etc/init.d', ['contrib/script/celeryd']),
51 ],
52
53 classifiers=[
54 'Intended Audience :: Developers',
55 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
56 'Programming Language :: Python :: 2',
57 'Programming Language :: Python :: 2.7',
58 'Topic :: Software Development :: Quality Assurance',
59 'Topic :: Software Development :: Testing',
60 ],
61 )
62
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-import os
from setuptools import setup, find_packages
import tcms
@@ -20,6 +19,7 @@
requires.append(dep_line)
return requires, links
+
install_requires, dependency_links = get_install_requires()
@@ -45,10 +45,6 @@
packages=find_packages(),
include_package_data=True,
- data_files=[
- ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),
- ('/etc/init.d', ['contrib/script/celeryd']),
- ],
classifiers=[
'Intended Audience :: Developers',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,5 @@\n # -*- coding: utf-8 -*-\n \n-import os\n from setuptools import setup, find_packages\n \n import tcms\n@@ -20,6 +19,7 @@\n requires.append(dep_line)\n return requires, links\n \n+\n install_requires, dependency_links = get_install_requires()\n \n \n@@ -45,10 +45,6 @@\n \n packages=find_packages(),\n include_package_data=True,\n- data_files=[\n- ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),\n- ('/etc/init.d', ['contrib/script/celeryd']),\n- ],\n \n classifiers=[\n 'Intended Audience :: Developers',\n", "issue": "Installing files under /etc yields SandboxViolation in virtualenv\nIn one of my environments installing a newer version of Nitrate yields:\r\n```\r\nremote: error: Setup script exited with error: SandboxViolation: open('/etc/httpd/conf.d/nitrate-httpd.conf', 'wb') {}\r\n```\r\n\r\nthis is due to the change made in ff4ecc75 adding the `data_files` attribute to `setup.py`. I propose installing these files under /etc via the RPM packages, not via pip. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nfrom setuptools import setup, find_packages\n\nimport tcms\n\n\ndef get_install_requires():\n requires = []\n links = []\n with open('requirements/base.txt', 'r') as f:\n for line in f:\n dep_line = line.strip()\n parts = dep_line.split('#egg=')\n if len(parts) == 2:\n links.append(dep_line)\n requires.append(parts[1])\n else:\n requires.append(dep_line)\n return requires, links\n\ninstall_requires, dependency_links = get_install_requires()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\nsetup(\n name='nitrate',\n version=tcms.__version__,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n\n install_requires=install_requires,\n dependency_links=dependency_links,\n\n packages=find_packages(),\n include_package_data=True,\n data_files=[\n ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),\n ('/etc/init.d', ['contrib/script/celeryd']),\n ],\n\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]}
| 1,133 | 171 |
gh_patches_debug_34419
|
rasdani/github-patches
|
git_diff
|
intel__dffml-567
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docs: operations: model_predict example usage
We need a doctestable example for the `model_predict`/`dffml.model.predict` operation.
References: https://intel.github.io/dffml/master/plugins/dffml_operation.html#dffml-model-predict
</issue>
<code>
[start of dffml/operation/model.py]
1 from typing import Dict, Any
2
3 from ..record import Record
4 from ..base import config
5 from ..model import Model
6 from ..df.types import Definition
7 from ..df.base import op
8
9
10 @config
11 class ModelPredictConfig:
12 model: Model
13
14 def __post_init__(self):
15 if not isinstance(self.model, Model):
16 raise TypeError(
17 "model should be an instance of `dffml.model.model.Model`"
18 )
19
20
21 @op(
22 name="dffml.model.predict",
23 inputs={
24 "features": Definition(
25 name="record_features", primitive="Dict[str, Any]"
26 )
27 },
28 outputs={
29 "prediction": Definition(
30 name="model_predictions", primitive="Dict[str, Any]"
31 )
32 },
33 config_cls=ModelPredictConfig,
34 imp_enter={"model": (lambda self: self.config.model)},
35 ctx_enter={"mctx": (lambda self: self.parent.model())},
36 )
37 async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:
38 async def records():
39 yield Record("", data={"features": features})
40
41 async for record in self.mctx.predict(records()):
42 return {"prediction": record.predictions()}
43
[end of dffml/operation/model.py]
[start of docs/doctest_header.py]
1 # This file is used as a header in every file that is created to run each
2 # example when the doctests are run.
3 import os
4 import sys
5 import shutil
6 import atexit
7 import inspect
8 import asyncio
9 import tempfile
10 import builtins
11 import functools
12 from unittest import mock
13
14 # Create a temporary directory for test to run in
15 DOCTEST_TEMPDIR = tempfile.mkdtemp()
16 # Remove it when the test exits
17 atexit.register(functools.partial(shutil.rmtree, DOCTEST_TEMPDIR))
18 # Change the current working directory to the temporary directory
19 os.chdir(DOCTEST_TEMPDIR)
20
21 from dffml import *
22 from dffml.base import *
23 from dffml.record import *
24 from dffml.df.base import *
25 from dffml.df.types import *
26 from dffml.util.net import *
27 from dffml.df.memory import *
28 from dffml_model_scikit import *
29 from dffml.operation.io import *
30 from dffml.source.memory import *
31 from dffml.operation.output import *
32 from dffml.operation.dataflow import *
33 from dffml.operation.preprocess import *
34 from dffml.operation.mapping import *
35
36 # Used for mocking input() for AcceptUserInput operation.
37 mock.patch("builtins.input", return_value="Data flow is awesome").start()
38
[end of docs/doctest_header.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dffml/operation/model.py b/dffml/operation/model.py
--- a/dffml/operation/model.py
+++ b/dffml/operation/model.py
@@ -35,6 +35,62 @@
ctx_enter={"mctx": (lambda self: self.parent.model())},
)
async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Predict using dffml models.
+
+ Parameters
+ ++++++++++
+ features : dict
+ A dictionary contaning feature name and feature value.
+
+ Returns
+ +++++++
+ dict
+ A dictionary containing prediction.
+
+ Examples
+ ++++++++
+
+ The following example shows how to use model_predict.
+
+ >>> slr_model = SLRModel(
+ ... features=Features(DefFeature("Years", int, 1)),
+ ... predict=DefFeature("Salary", int, 1),
+ ... )
+ >>> dataflow = DataFlow(
+ ... operations={
+ ... "prediction_using_model": model_predict,
+ ... "get_single": GetSingle,
+ ... },
+ ... configs={"prediction_using_model": ModelPredictConfig(model=slr_model)},
+ ... )
+ >>> dataflow.seed.append(
+ ... Input(
+ ... value=[model_predict.op.outputs["prediction"].name],
+ ... definition=GetSingle.op.inputs["spec"],
+ ... )
+ ... )
+ >>>
+ >>> async def main():
+ ... await train(
+ ... slr_model,
+ ... {"Years": 0, "Salary": 10},
+ ... {"Years": 1, "Salary": 20},
+ ... {"Years": 2, "Salary": 30},
+ ... {"Years": 3, "Salary": 40},
+ ... )
+ ... inputs = [
+ ... Input(
+ ... value={"Years": 4}, definition=model_predict.op.inputs["features"],
+ ... )
+ ... ]
+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):
+ ... print(results)
+ >>>
+ >>> asyncio.run(main())
+ {'model_predictions': {'Salary': {'confidence': 1.0, 'value': 50.0}}}
+ """
+
async def records():
yield Record("", data={"features": features})
diff --git a/docs/doctest_header.py b/docs/doctest_header.py
--- a/docs/doctest_header.py
+++ b/docs/doctest_header.py
@@ -25,9 +25,11 @@
from dffml.df.types import *
from dffml.util.net import *
from dffml.df.memory import *
+from dffml.model.slr import *
from dffml_model_scikit import *
from dffml.operation.io import *
from dffml.source.memory import *
+from dffml.operation.model import *
from dffml.operation.output import *
from dffml.operation.dataflow import *
from dffml.operation.preprocess import *
|
{"golden_diff": "diff --git a/dffml/operation/model.py b/dffml/operation/model.py\n--- a/dffml/operation/model.py\n+++ b/dffml/operation/model.py\n@@ -35,6 +35,62 @@\n ctx_enter={\"mctx\": (lambda self: self.parent.model())},\n )\n async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:\n+ \"\"\"\n+ Predict using dffml models.\n+\n+ Parameters\n+ ++++++++++\n+ features : dict\n+ A dictionary contaning feature name and feature value.\n+\n+ Returns\n+ +++++++\n+ dict\n+ A dictionary containing prediction.\n+\n+ Examples\n+ ++++++++\n+\n+ The following example shows how to use model_predict.\n+\n+ >>> slr_model = SLRModel(\n+ ... features=Features(DefFeature(\"Years\", int, 1)),\n+ ... predict=DefFeature(\"Salary\", int, 1),\n+ ... )\n+ >>> dataflow = DataFlow(\n+ ... operations={\n+ ... \"prediction_using_model\": model_predict,\n+ ... \"get_single\": GetSingle,\n+ ... },\n+ ... configs={\"prediction_using_model\": ModelPredictConfig(model=slr_model)},\n+ ... )\n+ >>> dataflow.seed.append(\n+ ... Input(\n+ ... value=[model_predict.op.outputs[\"prediction\"].name],\n+ ... definition=GetSingle.op.inputs[\"spec\"],\n+ ... )\n+ ... )\n+ >>>\n+ >>> async def main():\n+ ... await train(\n+ ... slr_model,\n+ ... {\"Years\": 0, \"Salary\": 10},\n+ ... {\"Years\": 1, \"Salary\": 20},\n+ ... {\"Years\": 2, \"Salary\": 30},\n+ ... {\"Years\": 3, \"Salary\": 40},\n+ ... )\n+ ... inputs = [\n+ ... Input(\n+ ... value={\"Years\": 4}, definition=model_predict.op.inputs[\"features\"],\n+ ... )\n+ ... ]\n+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n+ ... print(results)\n+ >>>\n+ >>> asyncio.run(main())\n+ {'model_predictions': {'Salary': {'confidence': 1.0, 'value': 50.0}}}\n+ \"\"\"\n+\n async def records():\n yield Record(\"\", data={\"features\": features})\n \ndiff --git a/docs/doctest_header.py b/docs/doctest_header.py\n--- a/docs/doctest_header.py\n+++ b/docs/doctest_header.py\n@@ -25,9 +25,11 @@\n from dffml.df.types import *\n from dffml.util.net import *\n from dffml.df.memory import *\n+from dffml.model.slr import *\n from dffml_model_scikit import *\n from dffml.operation.io import *\n from dffml.source.memory import *\n+from dffml.operation.model import *\n from dffml.operation.output import *\n from dffml.operation.dataflow import *\n from dffml.operation.preprocess import *\n", "issue": "docs: operations: model_predict example usage\nWe need a doctestable example for the `model_predict`/`dffml.model.predict` operation.\r\n\r\nReferences: https://intel.github.io/dffml/master/plugins/dffml_operation.html#dffml-model-predict\n", "before_files": [{"content": "from typing import Dict, Any\n\nfrom ..record import Record\nfrom ..base import config\nfrom ..model import Model\nfrom ..df.types import Definition\nfrom ..df.base import op\n\n\n@config\nclass ModelPredictConfig:\n model: Model\n\n def __post_init__(self):\n if not isinstance(self.model, Model):\n raise TypeError(\n \"model should be an instance of `dffml.model.model.Model`\"\n )\n\n\n@op(\n name=\"dffml.model.predict\",\n inputs={\n \"features\": Definition(\n name=\"record_features\", primitive=\"Dict[str, Any]\"\n )\n },\n outputs={\n \"prediction\": Definition(\n name=\"model_predictions\", primitive=\"Dict[str, Any]\"\n )\n },\n config_cls=ModelPredictConfig,\n imp_enter={\"model\": (lambda self: self.config.model)},\n ctx_enter={\"mctx\": (lambda self: self.parent.model())},\n)\nasync def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:\n async def records():\n yield Record(\"\", data={\"features\": features})\n\n async for record in self.mctx.predict(records()):\n return {\"prediction\": record.predictions()}\n", "path": "dffml/operation/model.py"}, {"content": "# This file is used as a header in every file that is created to run each\n# example when the doctests are run.\nimport os\nimport sys\nimport shutil\nimport atexit\nimport inspect\nimport asyncio\nimport tempfile\nimport builtins\nimport functools\nfrom unittest import mock\n\n# Create a temporary directory for test to run in\nDOCTEST_TEMPDIR = tempfile.mkdtemp()\n# Remove it when the test exits\natexit.register(functools.partial(shutil.rmtree, DOCTEST_TEMPDIR))\n# Change the current working directory to the temporary directory\nos.chdir(DOCTEST_TEMPDIR)\n\nfrom dffml import *\nfrom dffml.base import *\nfrom dffml.record import *\nfrom dffml.df.base import *\nfrom dffml.df.types import *\nfrom dffml.util.net import *\nfrom dffml.df.memory import *\nfrom dffml_model_scikit import *\nfrom dffml.operation.io import *\nfrom dffml.source.memory import *\nfrom dffml.operation.output import *\nfrom dffml.operation.dataflow import *\nfrom dffml.operation.preprocess import *\nfrom dffml.operation.mapping import *\n\n# Used for mocking input() for AcceptUserInput operation.\nmock.patch(\"builtins.input\", return_value=\"Data flow is awesome\").start()\n", "path": "docs/doctest_header.py"}]}
| 1,281 | 707 |
gh_patches_debug_13068
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-6846
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add notification bubble on Main Nav for super-librarians # pending MRs
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
Options are to add a bubble to the avatar and then a entry + bubble into the sub-navigation within the hamburger

Or to add a bubble directly to the black IA topbar which when clicked goes directly to /merges

```
.mr-notifications {
position: absolute;
z-index: 4;
background: #02598b;
color: white;
border-radius: 8px;
padding: 3px 7px;
font-size: 12px;
margin-left: 9px;
margin-top: 35px;
font-weight: bold;
}
```
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
</issue>
<code>
[start of openlibrary/core/edits.py]
1 import datetime
2 import json
3 from typing import Optional
4 import web
5
6 from infogami.utils.view import public
7
8 from openlibrary.i18n import gettext as _
9
10 from . import db
11
12
13 @public
14 def get_status_for_view(status_code: int) -> str:
15 """Returns localized status string that corresponds with the given status code."""
16 if status_code == CommunityEditsQueue.STATUS['DECLINED']:
17 return _('Declined')
18 if status_code == CommunityEditsQueue.STATUS['PENDING']:
19 return _('Pending')
20 if status_code == CommunityEditsQueue.STATUS['MERGED']:
21 return _('Merged')
22 return _('Unknown')
23
24
25 class CommunityEditsQueue:
26
27 """Schema
28 id: Primary identifier
29 submitter: username of person that made the request
30 reviewer: The username of the person who reviewed the request
31 url: URL of the merge request
32 status: Either "Pending", "Merged", or "Declined"
33 comment: Short note from reviewer (json blobs (can store timestamps, etc))
34 created: created timestamp
35 updated: update timestamp
36 """
37
38 STATUS = {
39 'DECLINED': 0,
40 'PENDING': 1,
41 'MERGED': 2,
42 }
43
44 MODES = {
45 'all': [STATUS['DECLINED'], STATUS['PENDING'], STATUS['MERGED']],
46 'open': [STATUS['PENDING']],
47 'closed': [STATUS['DECLINED'], STATUS['MERGED']],
48 }
49
50 @classmethod
51 def get_requests(
52 cls,
53 limit: int = 50,
54 page: int = 1,
55 mode: str = 'all',
56 order: str = None,
57 **kwargs,
58 ):
59 oldb = db.get_db()
60
61 query_kwargs = {
62 "limit": limit,
63 "offset": limit * (page - 1),
64 "vars": {**kwargs},
65 }
66
67 query_kwargs['where'] = cls.where_clause(mode, **kwargs)
68
69 if order:
70 query_kwargs['order'] = order
71 return oldb.select("community_edits_queue", **query_kwargs)
72
73 @classmethod
74 def get_counts_by_mode(cls, mode='all', **kwargs):
75 oldb = db.get_db()
76
77 query = 'SELECT count(*) from community_edits_queue'
78
79 where_clause = cls.where_clause(mode, **kwargs)
80 if where_clause:
81 query = f'{query} WHERE {where_clause}'
82 return oldb.query(query, vars=kwargs)[0]['count']
83
84 @classmethod
85 def where_clause(cls, mode, **kwargs):
86 wheres = []
87
88 if kwargs.get('reviewer') is not None:
89 wheres.append(
90 # if reviewer="" then get all unassigned MRs
91 "reviewer IS NULL"
92 if not kwargs.get('reviewer')
93 else "reviewer=$reviewer"
94 )
95 if "submitter" in kwargs:
96 wheres.append(
97 # If submitter not specified, default to any
98 "submitter IS NOT NULL"
99 if kwargs.get("submitter") is None
100 else "submitter=$submitter"
101 )
102 if "url" in kwargs:
103 wheres.append("url=$url")
104 if "id" in kwargs:
105 wheres.append("id=$id")
106
107 status_list = (
108 [f'status={status}' for status in cls.MODES[mode]] if mode != 'all' else []
109 )
110
111 where_clause = ''
112
113 if wheres:
114 where_clause = f'{" AND ".join(wheres)}'
115 if status_list:
116 status_query = f'({" OR ".join(status_list)})'
117 if where_clause:
118 where_clause = f'{where_clause} AND {status_query}'
119 else:
120 where_clause = status_query
121
122 return where_clause
123
124 @classmethod
125 def submit_work_merge_request(
126 cls,
127 work_ids: list[str],
128 submitter: str,
129 comment: str = None,
130 reviewer: str = None,
131 status: int = STATUS['PENDING'],
132 ):
133 """
134 Creates new work merge requests with the given work olids.
135
136 Precondition: OLIDs in work_ids list must be sanitized and normalized.
137 """
138 url = f"/works/merge?records={','.join(work_ids)}"
139 if not cls.exists(url):
140 return cls.submit_request(
141 url,
142 submitter=submitter,
143 comment=comment,
144 reviewer=reviewer,
145 status=status,
146 title=cls.get_work_merge_title(work_ids),
147 )
148
149 @staticmethod
150 def get_work_merge_title(olids):
151 title = None
152 for olid in olids:
153 book = web.ctx.site.get(f'/works/{olid}')
154 if book and book.title:
155 title = book.title
156 break
157 return title
158
159 @classmethod
160 def submit_author_merge_request(cls, author_ids, submitter, comment=None):
161 if not comment:
162 # some default note from submitter
163 pass
164 # XXX IDs should be santiized & normalized
165 url = f"/authors/merge?key={'&key='.join(author_ids)}"
166 cls.submit_request(url, submitter=submitter, comment=comment)
167
168 @classmethod
169 def submit_delete_request(cls, olid, submitter, comment=None):
170 if not comment:
171 # some default note from submitter
172 pass
173 url = f"{olid}/-/edit?m=delete"
174 cls.submit_request(cls, url, submitter=submitter, comment=comment)
175
176 @classmethod
177 def submit_request(
178 cls,
179 url: str,
180 submitter: str,
181 reviewer: str = None,
182 status: int = STATUS['PENDING'],
183 comment: str = None,
184 title: str = None,
185 ):
186 """
187 Inserts a new record into the table.
188
189 Preconditions: All data validations should be completed before calling this method.
190 """
191 oldb = db.get_db()
192
193 comments = [cls.create_comment(submitter, comment)] if comment else []
194 json_comment = json.dumps({"comments": comments})
195
196 return oldb.insert(
197 "community_edits_queue",
198 submitter=submitter,
199 reviewer=reviewer,
200 url=url,
201 status=status,
202 comments=json_comment,
203 title=title,
204 )
205
206 @classmethod
207 def assign_request(
208 cls, rid: int, reviewer: Optional[str]
209 ) -> dict[str, Optional[str]]:
210 """Changes assignees to the request with the given ID.
211
212 This method only modifies requests that are not closed.
213
214 If the given reviewer is the same as the request's reviewer, nothing is
215 modified
216 """
217 request = cls.find_by_id(rid)
218
219 if request['status'] not in cls.MODES['closed']:
220 if request['reviewer'] == reviewer:
221 return {
222 'status': 'error',
223 'error': f'{reviewer} is already assigned to this request',
224 }
225 oldb = db.get_db()
226
227 oldb.update(
228 "community_edits_queue",
229 where="id=$rid",
230 reviewer=reviewer,
231 status=cls.STATUS['PENDING'],
232 updated=datetime.datetime.utcnow(),
233 vars={"rid": rid},
234 )
235 return {
236 'reviewer': reviewer,
237 'newStatus': get_status_for_view(cls.STATUS['PENDING']),
238 }
239 return {'status': 'error', 'error': 'This request has already been closed'}
240
241 @classmethod
242 def unassign_request(cls, rid: int):
243 """
244 Changes status of given request to "Pending", and sets reviewer to None.
245 """
246 oldb = db.get_db()
247 oldb.update(
248 "community_edits_queue",
249 where="id=$rid",
250 status=cls.STATUS['PENDING'],
251 reviewer=None,
252 updated=datetime.datetime.utcnow(),
253 vars={"rid": rid},
254 )
255
256 @classmethod
257 def update_request_status(
258 cls, rid: int, status: int, reviewer: str, comment: str = None
259 ) -> int:
260 """
261 Changes the status of the request with the given rid.
262
263 If a comment is included, existing comments list for this request are fetched and
264 the new comment is appended.
265 """
266 oldb = db.get_db()
267
268 update_kwargs = {}
269
270 # XXX Trim whitespace from comment first
271 if comment:
272 comments = cls.get_comments(rid)
273 comments['comments'].append(cls.create_comment(reviewer, comment))
274 update_kwargs['comments'] = json.dumps(comments)
275
276 return oldb.update(
277 "community_edits_queue",
278 where="id=$rid",
279 status=status,
280 reviewer=reviewer,
281 updated=datetime.datetime.utcnow(),
282 vars={"rid": rid},
283 **update_kwargs,
284 )
285
286 @classmethod
287 def comment_request(cls, rid: int, username: str, comment: str) -> int:
288 oldb = db.get_db()
289
290 comments = cls.get_comments(rid)
291 comments['comments'].append(cls.create_comment(username, comment))
292
293 return oldb.update(
294 "community_edits_queue",
295 where="id=$rid",
296 comments=json.dumps(comments),
297 updated=datetime.datetime.utcnow(),
298 vars={"rid": rid},
299 )
300
301 @classmethod
302 def find_by_id(cls, rid: int):
303 """Returns the record with the given ID."""
304 return cls.get_requests(id=rid)[0] or None
305
306 @classmethod
307 def exists(cls, url: str) -> bool:
308 """Returns True if a request with the given URL exists in the table."""
309 return len(cls.get_requests(limit=1, url=url)) > 0
310
311 @classmethod
312 def get_comments(cls, rid: int):
313 """Fetches the comments for the given request, or an empty comments object."""
314 return cls.get_requests(id=rid)[0]['comments'] or {'comments': []}
315
316 @classmethod
317 def create_comment(cls, username: str, message: str) -> dict[str, str]:
318 """Creates and returns a new comment with the given name and message.
319 Timestamp set as current time.
320 """
321 return {
322 # isoformat to avoid to-json issues
323 "timestamp": datetime.datetime.utcnow().isoformat(),
324 "username": username,
325 "message": message,
326 # XXX It may be easier to update these comments if they had IDs
327 }
328
[end of openlibrary/core/edits.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openlibrary/core/edits.py b/openlibrary/core/edits.py
--- a/openlibrary/core/edits.py
+++ b/openlibrary/core/edits.py
@@ -6,6 +6,8 @@
from infogami.utils.view import public
from openlibrary.i18n import gettext as _
+from openlibrary.core import cache
+from openlibrary.utils import dateutil
from . import db
@@ -325,3 +327,12 @@
"message": message,
# XXX It may be easier to update these comments if they had IDs
}
+
+
+@public
+def cached_get_counts_by_mode(mode='all', **kwargs):
+ return cache.memcache_memoize(
+ CommunityEditsQueue.get_counts_by_mode,
+ f"librarian_queue_counts_{mode}",
+ timeout=dateutil.MINUTE_SECS,
+ )(mode, **kwargs)
|
{"golden_diff": "diff --git a/openlibrary/core/edits.py b/openlibrary/core/edits.py\n--- a/openlibrary/core/edits.py\n+++ b/openlibrary/core/edits.py\n@@ -6,6 +6,8 @@\n from infogami.utils.view import public\n \n from openlibrary.i18n import gettext as _\n+from openlibrary.core import cache\n+from openlibrary.utils import dateutil\n \n from . import db\n \n@@ -325,3 +327,12 @@\n \"message\": message,\n # XXX It may be easier to update these comments if they had IDs\n }\n+\n+\n+@public\n+def cached_get_counts_by_mode(mode='all', **kwargs):\n+ return cache.memcache_memoize(\n+ CommunityEditsQueue.get_counts_by_mode,\n+ f\"librarian_queue_counts_{mode}\",\n+ timeout=dateutil.MINUTE_SECS,\n+ )(mode, **kwargs)\n", "issue": "Add notification bubble on Main Nav for super-librarians # pending MRs\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\nOptions are to add a bubble to the avatar and then a entry + bubble into the sub-navigation within the hamburger\r\n\r\n\r\n\r\nOr to add a bubble directly to the black IA topbar which when clicked goes directly to /merges\r\n\r\n\r\n\r\n```\r\n.mr-notifications {\r\n position: absolute;\r\n z-index: 4;\r\n background: #02598b;\r\n color: white;\r\n border-radius: 8px;\r\n padding: 3px 7px;\r\n font-size: 12px;\r\n margin-left: 9px;\r\n margin-top: 35px;\r\n font-weight: bold;\r\n }\r\n```\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\r\n\r\n\n", "before_files": [{"content": "import datetime\nimport json\nfrom typing import Optional\nimport web\n\nfrom infogami.utils.view import public\n\nfrom openlibrary.i18n import gettext as _\n\nfrom . import db\n\n\n@public\ndef get_status_for_view(status_code: int) -> str:\n \"\"\"Returns localized status string that corresponds with the given status code.\"\"\"\n if status_code == CommunityEditsQueue.STATUS['DECLINED']:\n return _('Declined')\n if status_code == CommunityEditsQueue.STATUS['PENDING']:\n return _('Pending')\n if status_code == CommunityEditsQueue.STATUS['MERGED']:\n return _('Merged')\n return _('Unknown')\n\n\nclass CommunityEditsQueue:\n\n \"\"\"Schema\n id: Primary identifier\n submitter: username of person that made the request\n reviewer: The username of the person who reviewed the request\n url: URL of the merge request\n status: Either \"Pending\", \"Merged\", or \"Declined\"\n comment: Short note from reviewer (json blobs (can store timestamps, etc))\n created: created timestamp\n updated: update timestamp\n \"\"\"\n\n STATUS = {\n 'DECLINED': 0,\n 'PENDING': 1,\n 'MERGED': 2,\n }\n\n MODES = {\n 'all': [STATUS['DECLINED'], STATUS['PENDING'], STATUS['MERGED']],\n 'open': [STATUS['PENDING']],\n 'closed': [STATUS['DECLINED'], STATUS['MERGED']],\n }\n\n @classmethod\n def get_requests(\n cls,\n limit: int = 50,\n page: int = 1,\n mode: str = 'all',\n order: str = None,\n **kwargs,\n ):\n oldb = db.get_db()\n\n query_kwargs = {\n \"limit\": limit,\n \"offset\": limit * (page - 1),\n \"vars\": {**kwargs},\n }\n\n query_kwargs['where'] = cls.where_clause(mode, **kwargs)\n\n if order:\n query_kwargs['order'] = order\n return oldb.select(\"community_edits_queue\", **query_kwargs)\n\n @classmethod\n def get_counts_by_mode(cls, mode='all', **kwargs):\n oldb = db.get_db()\n\n query = 'SELECT count(*) from community_edits_queue'\n\n where_clause = cls.where_clause(mode, **kwargs)\n if where_clause:\n query = f'{query} WHERE {where_clause}'\n return oldb.query(query, vars=kwargs)[0]['count']\n\n @classmethod\n def where_clause(cls, mode, **kwargs):\n wheres = []\n\n if kwargs.get('reviewer') is not None:\n wheres.append(\n # if reviewer=\"\" then get all unassigned MRs\n \"reviewer IS NULL\"\n if not kwargs.get('reviewer')\n else \"reviewer=$reviewer\"\n )\n if \"submitter\" in kwargs:\n wheres.append(\n # If submitter not specified, default to any\n \"submitter IS NOT NULL\"\n if kwargs.get(\"submitter\") is None\n else \"submitter=$submitter\"\n )\n if \"url\" in kwargs:\n wheres.append(\"url=$url\")\n if \"id\" in kwargs:\n wheres.append(\"id=$id\")\n\n status_list = (\n [f'status={status}' for status in cls.MODES[mode]] if mode != 'all' else []\n )\n\n where_clause = ''\n\n if wheres:\n where_clause = f'{\" AND \".join(wheres)}'\n if status_list:\n status_query = f'({\" OR \".join(status_list)})'\n if where_clause:\n where_clause = f'{where_clause} AND {status_query}'\n else:\n where_clause = status_query\n\n return where_clause\n\n @classmethod\n def submit_work_merge_request(\n cls,\n work_ids: list[str],\n submitter: str,\n comment: str = None,\n reviewer: str = None,\n status: int = STATUS['PENDING'],\n ):\n \"\"\"\n Creates new work merge requests with the given work olids.\n\n Precondition: OLIDs in work_ids list must be sanitized and normalized.\n \"\"\"\n url = f\"/works/merge?records={','.join(work_ids)}\"\n if not cls.exists(url):\n return cls.submit_request(\n url,\n submitter=submitter,\n comment=comment,\n reviewer=reviewer,\n status=status,\n title=cls.get_work_merge_title(work_ids),\n )\n\n @staticmethod\n def get_work_merge_title(olids):\n title = None\n for olid in olids:\n book = web.ctx.site.get(f'/works/{olid}')\n if book and book.title:\n title = book.title\n break\n return title\n\n @classmethod\n def submit_author_merge_request(cls, author_ids, submitter, comment=None):\n if not comment:\n # some default note from submitter\n pass\n # XXX IDs should be santiized & normalized\n url = f\"/authors/merge?key={'&key='.join(author_ids)}\"\n cls.submit_request(url, submitter=submitter, comment=comment)\n\n @classmethod\n def submit_delete_request(cls, olid, submitter, comment=None):\n if not comment:\n # some default note from submitter\n pass\n url = f\"{olid}/-/edit?m=delete\"\n cls.submit_request(cls, url, submitter=submitter, comment=comment)\n\n @classmethod\n def submit_request(\n cls,\n url: str,\n submitter: str,\n reviewer: str = None,\n status: int = STATUS['PENDING'],\n comment: str = None,\n title: str = None,\n ):\n \"\"\"\n Inserts a new record into the table.\n\n Preconditions: All data validations should be completed before calling this method.\n \"\"\"\n oldb = db.get_db()\n\n comments = [cls.create_comment(submitter, comment)] if comment else []\n json_comment = json.dumps({\"comments\": comments})\n\n return oldb.insert(\n \"community_edits_queue\",\n submitter=submitter,\n reviewer=reviewer,\n url=url,\n status=status,\n comments=json_comment,\n title=title,\n )\n\n @classmethod\n def assign_request(\n cls, rid: int, reviewer: Optional[str]\n ) -> dict[str, Optional[str]]:\n \"\"\"Changes assignees to the request with the given ID.\n\n This method only modifies requests that are not closed.\n\n If the given reviewer is the same as the request's reviewer, nothing is\n modified\n \"\"\"\n request = cls.find_by_id(rid)\n\n if request['status'] not in cls.MODES['closed']:\n if request['reviewer'] == reviewer:\n return {\n 'status': 'error',\n 'error': f'{reviewer} is already assigned to this request',\n }\n oldb = db.get_db()\n\n oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n reviewer=reviewer,\n status=cls.STATUS['PENDING'],\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n return {\n 'reviewer': reviewer,\n 'newStatus': get_status_for_view(cls.STATUS['PENDING']),\n }\n return {'status': 'error', 'error': 'This request has already been closed'}\n\n @classmethod\n def unassign_request(cls, rid: int):\n \"\"\"\n Changes status of given request to \"Pending\", and sets reviewer to None.\n \"\"\"\n oldb = db.get_db()\n oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n status=cls.STATUS['PENDING'],\n reviewer=None,\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n\n @classmethod\n def update_request_status(\n cls, rid: int, status: int, reviewer: str, comment: str = None\n ) -> int:\n \"\"\"\n Changes the status of the request with the given rid.\n\n If a comment is included, existing comments list for this request are fetched and\n the new comment is appended.\n \"\"\"\n oldb = db.get_db()\n\n update_kwargs = {}\n\n # XXX Trim whitespace from comment first\n if comment:\n comments = cls.get_comments(rid)\n comments['comments'].append(cls.create_comment(reviewer, comment))\n update_kwargs['comments'] = json.dumps(comments)\n\n return oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n status=status,\n reviewer=reviewer,\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n **update_kwargs,\n )\n\n @classmethod\n def comment_request(cls, rid: int, username: str, comment: str) -> int:\n oldb = db.get_db()\n\n comments = cls.get_comments(rid)\n comments['comments'].append(cls.create_comment(username, comment))\n\n return oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n comments=json.dumps(comments),\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n\n @classmethod\n def find_by_id(cls, rid: int):\n \"\"\"Returns the record with the given ID.\"\"\"\n return cls.get_requests(id=rid)[0] or None\n\n @classmethod\n def exists(cls, url: str) -> bool:\n \"\"\"Returns True if a request with the given URL exists in the table.\"\"\"\n return len(cls.get_requests(limit=1, url=url)) > 0\n\n @classmethod\n def get_comments(cls, rid: int):\n \"\"\"Fetches the comments for the given request, or an empty comments object.\"\"\"\n return cls.get_requests(id=rid)[0]['comments'] or {'comments': []}\n\n @classmethod\n def create_comment(cls, username: str, message: str) -> dict[str, str]:\n \"\"\"Creates and returns a new comment with the given name and message.\n Timestamp set as current time.\n \"\"\"\n return {\n # isoformat to avoid to-json issues\n \"timestamp\": datetime.datetime.utcnow().isoformat(),\n \"username\": username,\n \"message\": message,\n # XXX It may be easier to update these comments if they had IDs\n }\n", "path": "openlibrary/core/edits.py"}]}
| 4,064 | 201 |
gh_patches_debug_20979
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1399
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
On a Windows system, in either win7 or win2012R2, performing a pre-commit reports an error
All:
It looks like the pre-commit is using virtualenv
I used pre-commit on a Python project.
Is there any way to solve this problem?
The following log is excerpted from pre-committee.log
**Log**
```
An unexpected error has occurred: CalledProcessError: command: ('C:\\Users\\Administrator\\.cache\\pre-commit\\repop9_0qne0\\py_env-default\\Scripts\\pip.EXE', 'install', '.')
return code: 1
expected return code: 0
stdout:
Looking in indexes: http://pypi.douban.com/simple
Processing c:\users\administrator\.cache\pre-commit\repop9_0qne0
Collecting six
Downloading http://pypi.doubanio.com/packages/65/eb/1f97cb97bfc2390a276969c6fae16075da282f5058082d4cb10c6c5c1dba/six-1.14.0-py2.py3-none-any.whl (10 kB)
Collecting pip-tools==3.6.1
Downloading http://pypi.doubanio.com/packages/06/96/89872db07ae70770fba97205b0737c17ef013d0d1c790899c16bb8bac419/pip_tools-3.6.1-py2.py3-none-any.whl (35 kB)
Collecting pip==19.1
Downloading http://pypi.doubanio.com/packages/f9/fb/863012b13912709c13cf5cfdbfb304fa6c727659d6290438e1a88df9d848/pip-19.1-py2.py3-none-any.whl (1.4 MB)
Collecting click>=6
Downloading http://pypi.doubanio.com/packages/dd/c0/4d8f43a9b16e289f36478422031b8a63b54b6ac3b1ba605d602f10dd54d6/click-7.1.1-py2.py3-none-any.whl (82 kB)
Building wheels for collected packages: pip-tools-compile
Building wheel for pip-tools-compile (setup.py): started
Building wheel for pip-tools-compile (setup.py): finished with status 'done'
Created wheel for pip-tools-compile: filename=pip_tools_compile-1.0-py3-none-any.whl size=17905 sha256=30321f831b5ac147be919304dee138139d055f2bdb52a5511317bc718b29b76d
Stored in directory: C:\Users\ADMINI~1\AppData\Local\Temp\pip-ephem-wheel-cache-b67alovp\wheels\f1\34\3c\bed42474e4aeb415aa0bfd1e28124cde97604fa12005eed65b
Successfully built pip-tools-compile
Installing collected packages: six, click, pip-tools, pip, pip-tools-compile
Attempting uninstall: pip
Found existing installation: pip 20.0.2
Uninstalling pip-20.0.2:
Successfully uninstalled pip-20.0.2
stderr:
ERROR: Could not install packages due to an EnvironmentError: [WinError 5] 拒绝访问。: 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\pip-uninstall-8wsztgaa\\pip.exe'
Consider using the `--user` option or check the permissions.
```
```
Traceback (most recent call last):
File "c:\program files\python37\lib\site-packages\pre_commit\error_handler.py", line 56, in error_handler
yield
File "c:\program files\python37\lib\site-packages\pre_commit\main.py", line 372, in main
args=args.rest[1:],
File "c:\program files\python37\lib\site-packages\pre_commit\commands\hook_impl.py", line 187, in hook_impl
return retv | run(config, store, ns)
File "c:\program files\python37\lib\site-packages\pre_commit\commands\run.py", line 355, in run
install_hook_envs(hooks, store)
File "c:\program files\python37\lib\site-packages\pre_commit\repository.py", line 200, in install_hook_envs
_hook_install(hook)
File "c:\program files\python37\lib\site-packages\pre_commit\repository.py", line 83, in _hook_install
hook.prefix, hook.language_version, hook.additional_dependencies,
File "c:\program files\python37\lib\site-packages\pre_commit\languages\python.py", line 197, in install_environment
prefix, ('pip', 'install', '.') + additional_dependencies,
File "c:\program files\python37\lib\site-packages\pre_commit\languages\helpers.py", line 25, in run_setup_cmd
cmd_output_b(*cmd, cwd=prefix.prefix_dir)
File "c:\program files\python37\lib\site-packages\pre_commit\util.py", line 156, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: <unprintable CalledProcessError object>
```
</issue>
<code>
[start of pre_commit/languages/python.py]
1 import contextlib
2 import functools
3 import os
4 import sys
5 from typing import Callable
6 from typing import ContextManager
7 from typing import Generator
8 from typing import Optional
9 from typing import Sequence
10 from typing import Tuple
11
12 import pre_commit.constants as C
13 from pre_commit.envcontext import envcontext
14 from pre_commit.envcontext import PatchesT
15 from pre_commit.envcontext import UNSET
16 from pre_commit.envcontext import Var
17 from pre_commit.hook import Hook
18 from pre_commit.languages import helpers
19 from pre_commit.parse_shebang import find_executable
20 from pre_commit.prefix import Prefix
21 from pre_commit.util import CalledProcessError
22 from pre_commit.util import clean_path_on_failure
23 from pre_commit.util import cmd_output
24 from pre_commit.util import cmd_output_b
25
26 ENVIRONMENT_DIR = 'py_env'
27
28
29 def bin_dir(venv: str) -> str:
30 """On windows there's a different directory for the virtualenv"""
31 bin_part = 'Scripts' if os.name == 'nt' else 'bin'
32 return os.path.join(venv, bin_part)
33
34
35 def get_env_patch(venv: str) -> PatchesT:
36 return (
37 ('PYTHONHOME', UNSET),
38 ('VIRTUAL_ENV', venv),
39 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
40 )
41
42
43 def _find_by_py_launcher(
44 version: str,
45 ) -> Optional[str]: # pragma: no cover (windows only)
46 if version.startswith('python'):
47 num = version[len('python'):]
48 try:
49 cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')
50 return cmd_output(*cmd)[1].strip()
51 except CalledProcessError:
52 pass
53 return None
54
55
56 def _find_by_sys_executable() -> Optional[str]:
57 def _norm(path: str) -> Optional[str]:
58 _, exe = os.path.split(path.lower())
59 exe, _, _ = exe.partition('.exe')
60 if exe not in {'python', 'pythonw'} and find_executable(exe):
61 return exe
62 return None
63
64 # On linux, I see these common sys.executables:
65 #
66 # system `python`: /usr/bin/python -> python2.7
67 # system `python2`: /usr/bin/python2 -> python2.7
68 # virtualenv v: v/bin/python (will not return from this loop)
69 # virtualenv v -ppython2: v/bin/python -> python2
70 # virtualenv v -ppython2.7: v/bin/python -> python2.7
71 # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
72 for path in (sys.executable, os.path.realpath(sys.executable)):
73 exe = _norm(path)
74 if exe:
75 return exe
76 return None
77
78
79 @functools.lru_cache(maxsize=1)
80 def get_default_version() -> str: # pragma: no cover (platform dependent)
81 # First attempt from `sys.executable` (or the realpath)
82 exe = _find_by_sys_executable()
83 if exe:
84 return exe
85
86 # Next try the `pythonX.X` executable
87 exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'
88 if find_executable(exe):
89 return exe
90
91 if _find_by_py_launcher(exe):
92 return exe
93
94 # Give a best-effort try for windows
95 default_folder_name = exe.replace('.', '')
96 if os.path.exists(fr'C:\{default_folder_name}\python.exe'):
97 return exe
98
99 # We tried!
100 return C.DEFAULT
101
102
103 def _sys_executable_matches(version: str) -> bool:
104 if version == 'python':
105 return True
106 elif not version.startswith('python'):
107 return False
108
109 try:
110 info = tuple(int(p) for p in version[len('python'):].split('.'))
111 except ValueError:
112 return False
113
114 return sys.version_info[:len(info)] == info
115
116
117 def norm_version(version: str) -> str:
118 # first see if our current executable is appropriate
119 if _sys_executable_matches(version):
120 return sys.executable
121
122 if os.name == 'nt': # pragma: no cover (windows)
123 version_exec = _find_by_py_launcher(version)
124 if version_exec:
125 return version_exec
126
127 # Try looking up by name
128 version_exec = find_executable(version)
129 if version_exec and version_exec != version:
130 return version_exec
131
132 # If it is in the form pythonx.x search in the default
133 # place on windows
134 if version.startswith('python'):
135 default_folder_name = version.replace('.', '')
136 return fr'C:\{default_folder_name}\python.exe'
137
138 # Otherwise assume it is a path
139 return os.path.expanduser(version)
140
141
142 def py_interface(
143 _dir: str,
144 _make_venv: Callable[[str, str], None],
145 ) -> Tuple[
146 Callable[[Prefix, str], ContextManager[None]],
147 Callable[[Prefix, str], bool],
148 Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]],
149 Callable[[Prefix, str, Sequence[str]], None],
150 ]:
151 @contextlib.contextmanager
152 def in_env(
153 prefix: Prefix,
154 language_version: str,
155 ) -> Generator[None, None, None]:
156 envdir = prefix.path(helpers.environment_dir(_dir, language_version))
157 with envcontext(get_env_patch(envdir)):
158 yield
159
160 def healthy(prefix: Prefix, language_version: str) -> bool:
161 envdir = helpers.environment_dir(_dir, language_version)
162 exe_name = 'python.exe' if sys.platform == 'win32' else 'python'
163 py_exe = prefix.path(bin_dir(envdir), exe_name)
164 with in_env(prefix, language_version):
165 retcode, _, _ = cmd_output_b(
166 py_exe, '-c', 'import ctypes, datetime, io, os, ssl, weakref',
167 cwd='/',
168 retcode=None,
169 )
170 return retcode == 0
171
172 def run_hook(
173 hook: Hook,
174 file_args: Sequence[str],
175 color: bool,
176 ) -> Tuple[int, bytes]:
177 with in_env(hook.prefix, hook.language_version):
178 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
179
180 def install_environment(
181 prefix: Prefix,
182 version: str,
183 additional_dependencies: Sequence[str],
184 ) -> None:
185 additional_dependencies = tuple(additional_dependencies)
186 directory = helpers.environment_dir(_dir, version)
187
188 env_dir = prefix.path(directory)
189 with clean_path_on_failure(env_dir):
190 if version != C.DEFAULT:
191 python = norm_version(version)
192 else:
193 python = os.path.realpath(sys.executable)
194 _make_venv(env_dir, python)
195 with in_env(prefix, version):
196 helpers.run_setup_cmd(
197 prefix, ('pip', 'install', '.') + additional_dependencies,
198 )
199
200 return in_env, healthy, run_hook, install_environment
201
202
203 def make_venv(envdir: str, python: str) -> None:
204 env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')
205 cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)
206 cmd_output_b(*cmd, env=env, cwd='/')
207
208
209 _interface = py_interface(ENVIRONMENT_DIR, make_venv)
210 in_env, healthy, run_hook, install_environment = _interface
211
[end of pre_commit/languages/python.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -182,8 +182,8 @@
version: str,
additional_dependencies: Sequence[str],
) -> None:
- additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(_dir, version)
+ install = ('python', '-mpip', 'install', '.', *additional_dependencies)
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
@@ -193,9 +193,7 @@
python = os.path.realpath(sys.executable)
_make_venv(env_dir, python)
with in_env(prefix, version):
- helpers.run_setup_cmd(
- prefix, ('pip', 'install', '.') + additional_dependencies,
- )
+ helpers.run_setup_cmd(prefix, install)
return in_env, healthy, run_hook, install_environment
|
{"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -182,8 +182,8 @@\n version: str,\n additional_dependencies: Sequence[str],\n ) -> None:\n- additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(_dir, version)\n+ install = ('python', '-mpip', 'install', '.', *additional_dependencies)\n \n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n@@ -193,9 +193,7 @@\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n- helpers.run_setup_cmd(\n- prefix, ('pip', 'install', '.') + additional_dependencies,\n- )\n+ helpers.run_setup_cmd(prefix, install)\n \n return in_env, healthy, run_hook, install_environment\n", "issue": "On a Windows system, in either win7 or win2012R2, performing a pre-commit reports an error\n All:\r\nIt looks like the pre-commit is using virtualenv\r\n\r\nI used pre-commit on a Python project. \r\n\r\nIs there any way to solve this problem?\r\n\r\n\r\nThe following log is excerpted from pre-committee.log \r\n**Log**\r\n```\r\nAn unexpected error has occurred: CalledProcessError: command: ('C:\\\\Users\\\\Administrator\\\\.cache\\\\pre-commit\\\\repop9_0qne0\\\\py_env-default\\\\Scripts\\\\pip.EXE', 'install', '.')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout:\r\n Looking in indexes: http://pypi.douban.com/simple\r\n Processing c:\\users\\administrator\\.cache\\pre-commit\\repop9_0qne0\r\n Collecting six\r\n Downloading http://pypi.doubanio.com/packages/65/eb/1f97cb97bfc2390a276969c6fae16075da282f5058082d4cb10c6c5c1dba/six-1.14.0-py2.py3-none-any.whl (10 kB)\r\n Collecting pip-tools==3.6.1\r\n Downloading http://pypi.doubanio.com/packages/06/96/89872db07ae70770fba97205b0737c17ef013d0d1c790899c16bb8bac419/pip_tools-3.6.1-py2.py3-none-any.whl (35 kB)\r\n Collecting pip==19.1\r\n Downloading http://pypi.doubanio.com/packages/f9/fb/863012b13912709c13cf5cfdbfb304fa6c727659d6290438e1a88df9d848/pip-19.1-py2.py3-none-any.whl (1.4 MB)\r\n Collecting click>=6\r\n Downloading http://pypi.doubanio.com/packages/dd/c0/4d8f43a9b16e289f36478422031b8a63b54b6ac3b1ba605d602f10dd54d6/click-7.1.1-py2.py3-none-any.whl (82 kB)\r\n Building wheels for collected packages: pip-tools-compile\r\n Building wheel for pip-tools-compile (setup.py): started\r\n Building wheel for pip-tools-compile (setup.py): finished with status 'done'\r\n Created wheel for pip-tools-compile: filename=pip_tools_compile-1.0-py3-none-any.whl size=17905 sha256=30321f831b5ac147be919304dee138139d055f2bdb52a5511317bc718b29b76d\r\n Stored in directory: C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\pip-ephem-wheel-cache-b67alovp\\wheels\\f1\\34\\3c\\bed42474e4aeb415aa0bfd1e28124cde97604fa12005eed65b\r\n Successfully built pip-tools-compile\r\n Installing collected packages: six, click, pip-tools, pip, pip-tools-compile\r\n Attempting uninstall: pip\r\n Found existing installation: pip 20.0.2\r\n Uninstalling pip-20.0.2:\r\n Successfully uninstalled pip-20.0.2\r\n \r\nstderr:\r\n ERROR: Could not install packages due to an EnvironmentError: [WinError 5] \u62d2\u7edd\u8bbf\u95ee\u3002: 'C:\\\\Users\\\\ADMINI~1\\\\AppData\\\\Local\\\\Temp\\\\pip-uninstall-8wsztgaa\\\\pip.exe'\r\n Consider using the `--user` option or check the permissions.\r\n \r\n \r\n``` \r\n \r\n```\r\nTraceback (most recent call last):\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\error_handler.py\", line 56, in error_handler\r\n yield\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\main.py\", line 372, in main\r\n args=args.rest[1:],\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\commands\\hook_impl.py\", line 187, in hook_impl\r\n return retv | run(config, store, ns)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\commands\\run.py\", line 355, in run\r\n install_hook_envs(hooks, store)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\repository.py\", line 200, in install_hook_envs\r\n _hook_install(hook)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\repository.py\", line 83, in _hook_install\r\n hook.prefix, hook.language_version, hook.additional_dependencies,\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\languages\\python.py\", line 197, in install_environment\r\n prefix, ('pip', 'install', '.') + additional_dependencies,\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\languages\\helpers.py\", line 25, in run_setup_cmd\r\n cmd_output_b(*cmd, cwd=prefix.prefix_dir)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\util.py\", line 156, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: <unprintable CalledProcessError object>\r\n\r\n```\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Callable\nfrom typing import ContextManager\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n try:\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n return cmd_output(*cmd)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n default_folder_name = exe.replace('.', '')\n if os.path.exists(fr'C:\\{default_folder_name}\\python.exe'):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> str:\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n default_folder_name = version.replace('.', '')\n return fr'C:\\{default_folder_name}\\python.exe'\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef py_interface(\n _dir: str,\n _make_venv: Callable[[str, str], None],\n) -> Tuple[\n Callable[[Prefix, str], ContextManager[None]],\n Callable[[Prefix, str], bool],\n Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]],\n Callable[[Prefix, str, Sequence[str]], None],\n]:\n @contextlib.contextmanager\n def in_env(\n prefix: Prefix,\n language_version: str,\n ) -> Generator[None, None, None]:\n envdir = prefix.path(helpers.environment_dir(_dir, language_version))\n with envcontext(get_env_patch(envdir)):\n yield\n\n def healthy(prefix: Prefix, language_version: str) -> bool:\n envdir = helpers.environment_dir(_dir, language_version)\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output_b(\n py_exe, '-c', 'import ctypes, datetime, io, os, ssl, weakref',\n cwd='/',\n retcode=None,\n )\n return retcode == 0\n\n def run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n ) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n\n def install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n ) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(_dir, version)\n\n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n if version != C.DEFAULT:\n python = norm_version(version)\n else:\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('pip', 'install', '.') + additional_dependencies,\n )\n\n return in_env, healthy, run_hook, install_environment\n\n\ndef make_venv(envdir: str, python: str) -> None:\n env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')\n cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n cmd_output_b(*cmd, env=env, cwd='/')\n\n\n_interface = py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python.py"}]}
| 4,086 | 217 |
gh_patches_debug_4622
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1083
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate from bumpversion to bump2version
# Description
@dguest has brought to my attention that [`bumpversion` is no longer maintained](https://github.com/peritus/bumpversion) (as of apparently November 2019). Given this we should probably take the project's advice
> 🎬 If you want to start using `bumpversion`, you're best advised to install one of the maintained forks, e.g. ➡ @ c4urself's [`bump2version`](https://github.com/c4urself/bump2version/#installation).
given that it seems that [transferring ownership and maintainers is taking a very long time/might not happen](https://github.com/c4urself/bump2version/issues/86).
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
7 'tensorflow-probability~=0.10.0',
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
11 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes
12 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted(set(['matplotlib']))
23 extras_require['lint'] = sorted(set(['pyflakes', 'black']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'pytest~=6.0',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'jupyter',
43 'uproot~=3.3',
44 'graphviz',
45 'jsonpatch',
46 ]
47 )
48 )
49 extras_require['docs'] = sorted(
50 set(
51 [
52 'sphinx>=3.1.2',
53 'sphinxcontrib-bibtex',
54 'sphinx-click',
55 'sphinx_rtd_theme',
56 'nbsphinx',
57 'ipywidgets',
58 'sphinx-issues',
59 'sphinx-copybutton>0.2.9',
60 ]
61 )
62 )
63 extras_require['develop'] = sorted(
64 set(
65 extras_require['docs']
66 + extras_require['lint']
67 + extras_require['test']
68 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
69 )
70 )
71 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
72
73
74 setup(
75 extras_require=extras_require,
76 use_scm_version=lambda: {'local_scheme': lambda version: ''},
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,7 +65,7 @@
extras_require['docs']
+ extras_require['lint']
+ extras_require['test']
- + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
+ + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']
)
)
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,7 +65,7 @@\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n- + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n+ + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n )\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n", "issue": "Migrate from bumpversion to bump2version\n# Description\r\n\r\n@dguest has brought to my attention that [`bumpversion` is no longer maintained](https://github.com/peritus/bumpversion) (as of apparently November 2019). Given this we should probably take the project's advice\r\n\r\n> \ud83c\udfac If you want to start using `bumpversion`, you're best advised to install one of the maintained forks, e.g. \u27a1 @ c4urself's [`bump2version`](https://github.com/c4urself/bump2version/#installation).\r\n\r\ngiven that it seems that [transferring ownership and maintainers is taking a very long time/might not happen](https://github.com/c4urself/bump2version/issues/86).\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 1,431 | 132 |
gh_patches_debug_14290
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2759
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add min/max fields to Histogram in otlp metrics exporter
From proto https://github.com/open-telemetry/opentelemetry-proto/pull/279
</issue>
<code>
[start of exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from logging import getLogger
15 from os import environ
16 from typing import Optional, Sequence
17 from grpc import ChannelCredentials, Compression
18 from opentelemetry.exporter.otlp.proto.grpc.exporter import (
19 OTLPExporterMixin,
20 get_resource_data,
21 )
22 from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (
23 ExportMetricsServiceRequest,
24 )
25 from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (
26 MetricsServiceStub,
27 )
28 from opentelemetry.proto.common.v1.common_pb2 import InstrumentationScope
29 from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2
30 from opentelemetry.sdk.environment_variables import (
31 OTEL_EXPORTER_OTLP_METRICS_INSECURE,
32 )
33 from opentelemetry.sdk.metrics.export import (
34 Gauge,
35 Histogram,
36 Metric,
37 Sum,
38 )
39
40 from opentelemetry.sdk.metrics.export import (
41 MetricExporter,
42 MetricExportResult,
43 MetricsData,
44 )
45
46 _logger = getLogger(__name__)
47
48
49 class OTLPMetricExporter(
50 MetricExporter,
51 OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult],
52 ):
53 _result = MetricExportResult
54 _stub = MetricsServiceStub
55
56 def __init__(
57 self,
58 endpoint: Optional[str] = None,
59 insecure: Optional[bool] = None,
60 credentials: Optional[ChannelCredentials] = None,
61 headers: Optional[Sequence] = None,
62 timeout: Optional[int] = None,
63 compression: Optional[Compression] = None,
64 ):
65
66 if insecure is None:
67 insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE)
68 if insecure is not None:
69 insecure = insecure.lower() == "true"
70
71 super().__init__(
72 **{
73 "endpoint": endpoint,
74 "insecure": insecure,
75 "credentials": credentials,
76 "headers": headers,
77 "timeout": timeout,
78 "compression": compression,
79 }
80 )
81
82 def _translate_data(
83 self, data: MetricsData
84 ) -> ExportMetricsServiceRequest:
85
86 resource_metrics_dict = {}
87
88 for resource_metrics in data.resource_metrics:
89
90 resource = resource_metrics.resource
91
92 # It is safe to assume that each entry in data.resource_metrics is
93 # associated with an unique resource.
94 scope_metrics_dict = {}
95
96 resource_metrics_dict[resource] = scope_metrics_dict
97
98 for scope_metrics in resource_metrics.scope_metrics:
99
100 instrumentation_scope = scope_metrics.scope
101
102 # The SDK groups metrics in instrumentation scopes already so
103 # there is no need to check for existing instrumentation scopes
104 # here.
105 pb2_scope_metrics = pb2.ScopeMetrics(
106 scope=InstrumentationScope(
107 name=instrumentation_scope.name,
108 version=instrumentation_scope.version,
109 )
110 )
111
112 scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics
113
114 for metric in scope_metrics.metrics:
115 pb2_metric = pb2.Metric(
116 name=metric.name,
117 description=metric.description,
118 unit=metric.unit,
119 )
120
121 if isinstance(metric.data, Gauge):
122 for data_point in metric.data.data_points:
123 pt = pb2.NumberDataPoint(
124 attributes=self._translate_attributes(
125 data_point.attributes
126 ),
127 time_unix_nano=data_point.time_unix_nano,
128 )
129 if isinstance(data_point.value, int):
130 pt.as_int = data_point.value
131 else:
132 pt.as_double = data_point.value
133 pb2_metric.gauge.data_points.append(pt)
134
135 elif isinstance(metric.data, Histogram):
136 for data_point in metric.data.data_points:
137 pt = pb2.HistogramDataPoint(
138 attributes=self._translate_attributes(
139 data_point.attributes
140 ),
141 time_unix_nano=data_point.time_unix_nano,
142 start_time_unix_nano=(
143 data_point.start_time_unix_nano
144 ),
145 count=data_point.count,
146 sum=data_point.sum,
147 bucket_counts=data_point.bucket_counts,
148 explicit_bounds=data_point.explicit_bounds,
149 )
150 pb2_metric.histogram.aggregation_temporality = (
151 metric.data.aggregation_temporality
152 )
153 pb2_metric.histogram.data_points.append(pt)
154
155 elif isinstance(metric.data, Sum):
156 for data_point in metric.data.data_points:
157 pt = pb2.NumberDataPoint(
158 attributes=self._translate_attributes(
159 data_point.attributes
160 ),
161 start_time_unix_nano=(
162 data_point.start_time_unix_nano
163 ),
164 time_unix_nano=data_point.time_unix_nano,
165 )
166 if isinstance(data_point.value, int):
167 pt.as_int = data_point.value
168 else:
169 pt.as_double = data_point.value
170 # note that because sum is a message type, the
171 # fields must be set individually rather than
172 # instantiating a pb2.Sum and setting it once
173 pb2_metric.sum.aggregation_temporality = (
174 metric.data.aggregation_temporality
175 )
176 pb2_metric.sum.is_monotonic = (
177 metric.data.is_monotonic
178 )
179 pb2_metric.sum.data_points.append(pt)
180 else:
181 _logger.warn(
182 "unsupported datapoint type %s", metric.point
183 )
184 continue
185
186 pb2_scope_metrics.metrics.append(pb2_metric)
187
188 return ExportMetricsServiceRequest(
189 resource_metrics=get_resource_data(
190 resource_metrics_dict,
191 pb2.ResourceMetrics,
192 "metrics",
193 )
194 )
195
196 def export(
197 self,
198 metrics_data: MetricsData,
199 timeout_millis: float = 10_000,
200 **kwargs,
201 ) -> MetricExportResult:
202 # TODO(#2663): OTLPExporterMixin should pass timeout to gRPC
203 return self._export(metrics_data)
204
205 def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
206 pass
207
[end of exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py
--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py
@@ -146,6 +146,8 @@
sum=data_point.sum,
bucket_counts=data_point.bucket_counts,
explicit_bounds=data_point.explicit_bounds,
+ max=data_point.max,
+ min=data_point.min,
)
pb2_metric.histogram.aggregation_temporality = (
metric.data.aggregation_temporality
|
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py\n@@ -146,6 +146,8 @@\n sum=data_point.sum,\n bucket_counts=data_point.bucket_counts,\n explicit_bounds=data_point.explicit_bounds,\n+ max=data_point.max,\n+ min=data_point.min,\n )\n pb2_metric.histogram.aggregation_temporality = (\n metric.data.aggregation_temporality\n", "issue": "Add min/max fields to Histogram in otlp metrics exporter\nFrom proto https://github.com/open-telemetry/opentelemetry-proto/pull/279\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Optional, Sequence\nfrom grpc import ChannelCredentials, Compression\nfrom opentelemetry.exporter.otlp.proto.grpc.exporter import (\n OTLPExporterMixin,\n get_resource_data,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (\n ExportMetricsServiceRequest,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (\n MetricsServiceStub,\n)\nfrom opentelemetry.proto.common.v1.common_pb2 import InstrumentationScope\nfrom opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_METRICS_INSECURE,\n)\nfrom opentelemetry.sdk.metrics.export import (\n Gauge,\n Histogram,\n Metric,\n Sum,\n)\n\nfrom opentelemetry.sdk.metrics.export import (\n MetricExporter,\n MetricExportResult,\n MetricsData,\n)\n\n_logger = getLogger(__name__)\n\n\nclass OTLPMetricExporter(\n MetricExporter,\n OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult],\n):\n _result = MetricExportResult\n _stub = MetricsServiceStub\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n insecure: Optional[bool] = None,\n credentials: Optional[ChannelCredentials] = None,\n headers: Optional[Sequence] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n\n if insecure is None:\n insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE)\n if insecure is not None:\n insecure = insecure.lower() == \"true\"\n\n super().__init__(\n **{\n \"endpoint\": endpoint,\n \"insecure\": insecure,\n \"credentials\": credentials,\n \"headers\": headers,\n \"timeout\": timeout,\n \"compression\": compression,\n }\n )\n\n def _translate_data(\n self, data: MetricsData\n ) -> ExportMetricsServiceRequest:\n\n resource_metrics_dict = {}\n\n for resource_metrics in data.resource_metrics:\n\n resource = resource_metrics.resource\n\n # It is safe to assume that each entry in data.resource_metrics is\n # associated with an unique resource.\n scope_metrics_dict = {}\n\n resource_metrics_dict[resource] = scope_metrics_dict\n\n for scope_metrics in resource_metrics.scope_metrics:\n\n instrumentation_scope = scope_metrics.scope\n\n # The SDK groups metrics in instrumentation scopes already so\n # there is no need to check for existing instrumentation scopes\n # here.\n pb2_scope_metrics = pb2.ScopeMetrics(\n scope=InstrumentationScope(\n name=instrumentation_scope.name,\n version=instrumentation_scope.version,\n )\n )\n\n scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics\n\n for metric in scope_metrics.metrics:\n pb2_metric = pb2.Metric(\n name=metric.name,\n description=metric.description,\n unit=metric.unit,\n )\n\n if isinstance(metric.data, Gauge):\n for data_point in metric.data.data_points:\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n time_unix_nano=data_point.time_unix_nano,\n )\n if isinstance(data_point.value, int):\n pt.as_int = data_point.value\n else:\n pt.as_double = data_point.value\n pb2_metric.gauge.data_points.append(pt)\n\n elif isinstance(metric.data, Histogram):\n for data_point in metric.data.data_points:\n pt = pb2.HistogramDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n time_unix_nano=data_point.time_unix_nano,\n start_time_unix_nano=(\n data_point.start_time_unix_nano\n ),\n count=data_point.count,\n sum=data_point.sum,\n bucket_counts=data_point.bucket_counts,\n explicit_bounds=data_point.explicit_bounds,\n )\n pb2_metric.histogram.aggregation_temporality = (\n metric.data.aggregation_temporality\n )\n pb2_metric.histogram.data_points.append(pt)\n\n elif isinstance(metric.data, Sum):\n for data_point in metric.data.data_points:\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n start_time_unix_nano=(\n data_point.start_time_unix_nano\n ),\n time_unix_nano=data_point.time_unix_nano,\n )\n if isinstance(data_point.value, int):\n pt.as_int = data_point.value\n else:\n pt.as_double = data_point.value\n # note that because sum is a message type, the\n # fields must be set individually rather than\n # instantiating a pb2.Sum and setting it once\n pb2_metric.sum.aggregation_temporality = (\n metric.data.aggregation_temporality\n )\n pb2_metric.sum.is_monotonic = (\n metric.data.is_monotonic\n )\n pb2_metric.sum.data_points.append(pt)\n else:\n _logger.warn(\n \"unsupported datapoint type %s\", metric.point\n )\n continue\n\n pb2_scope_metrics.metrics.append(pb2_metric)\n\n return ExportMetricsServiceRequest(\n resource_metrics=get_resource_data(\n resource_metrics_dict,\n pb2.ResourceMetrics,\n \"metrics\",\n )\n )\n\n def export(\n self,\n metrics_data: MetricsData,\n timeout_millis: float = 10_000,\n **kwargs,\n ) -> MetricExportResult:\n # TODO(#2663): OTLPExporterMixin should pass timeout to gRPC\n return self._export(metrics_data)\n\n def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:\n pass\n", "path": "exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py"}]}
| 2,512 | 218 |
gh_patches_debug_12004
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1432
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Opsdroid logger returning errors
# Description
Hi, I have been using `opsdroid` for a few days now, and while it is a good framework, I have been having a lot of trouble. Most recently I have been getting a error `formatter refrenced before use`. I have linted my configuration.yaml and none of my python files have errors. The error message below only shows errors in opsdroid library files. Even so, I am probably doing something wrong. Any help is greatly appreciated!
## Steps to Reproduce
I just linted and built my config. Neither of those actions returned errors.
## Expected Functionality
My bot should have run on Telegram and in bash.
## Experienced Functionality
```bash
Traceback (most recent call last):
File "/home/gideongrinberg/.local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/gideongrinberg/.local/lib/python3.6/site-packages/opsdroid/cli/start.py", line 38, in start
configure_logging(config)
File "/home/gideongrinberg/.local/lib/python3.6/site-packages/opsdroid/logging.py", line 93, in configure_logging
console_handler.setFormatter(formatter)
UnboundLocalError: local variable 'formatter' referenced before assignment
```
## Versions
- **Opsdroid version:** 0.17.1
- **Python version:** Python 3.6.9
- **OS/Docker version:** Ubuntu 18.04 on Window Subsystem Linux (Windows 10)
## Configuration File
My config.yaml is to large to include, but this is the only line I've change from the example (other than adding tokens)
```yaml
recycle-nlp:
path: '~/opdroid_recycle/skill-recycle-nlp'
```
Again, that file returns no errors when I run `opsdroid config -f [PATH] lint` or `opsdroid config -f [PATH] build`.
Additionally, the python file:
```python
from opsdroid.skill import Skill
from opsdroid.matchers import match_luisai_intent
class recycle-nlp(Skill):
@match_luisai_intent('recycle')
async def recycle-nlp(self, message):
if message.luisai["topScoringIntent"]["intent"]=="recycle":
await message.respond(str(message.luisai))
```
My directory structure (/home for WSL, not windows):
```
| /home
|____ opsdroid_recycle
|
|_____ config.yaml
|_____skill-recycle-nlp
|
|____ __init__.py
|______ README.md
|______ LICENSE
|
|___.local/lib/python3.6/site-packages
```
## Additional Details
Interestingly, my bot worked fine with the example config
Any help is much appreciated!
</issue>
<code>
[start of opsdroid/logging.py]
1 """Class for Filter logs and logging logic."""
2
3 import os
4 import logging
5 import contextlib
6
7 from logging.handlers import RotatingFileHandler
8 from opsdroid.const import DEFAULT_LOG_FILENAME, __version__
9
10 _LOGGER = logging.getLogger(__name__)
11
12
13 class ParsingFilter(logging.Filter):
14 """Class that filters logs."""
15
16 def __init__(self, config, *parse_list):
17 """Create object to implement filtering."""
18 super(ParsingFilter, self).__init__()
19 self.config = config["logging"]
20 try:
21 if (
22 self.config["filter"]["whitelist"]
23 and self.config["filter"]["blacklist"]
24 ):
25 _LOGGER.warning(
26 _(
27 "Both whitelist and blacklist filters found in configuration. "
28 "Only one can be used at a time - only the whitelist filter will be used."
29 )
30 )
31 self.parse_list = [
32 logging.Filter(name) for name in parse_list[0]["whitelist"]
33 ]
34 except KeyError:
35 self.parse_list = parse_list[0].get("whitelist") or parse_list[0].get(
36 "blacklist"
37 )
38
39 self.parse_list = [logging.Filter(name) for name in self.parse_list]
40
41 def filter(self, record):
42 """Apply filter to the log message.
43
44 This is a subset of Logger.filter, this method applies the logger
45 filters and returns a bool. If the value is true the record will
46 be passed to the handlers and the log shown. If the value is
47 false it will be ignored.
48
49 Args:
50 record: a log record containing the log message and the
51 name of the log - example: opsdroid.core.
52
53 Returns:
54 Boolean: If True - pass the log to handler.
55
56 """
57
58 if self.config["filter"].get("whitelist"):
59 return any(name.filter(record) for name in self.parse_list)
60 return not any(name.filter(record) for name in self.parse_list)
61
62
63 def configure_logging(config):
64 """Configure the root logger based on user config."""
65 rootlogger = logging.getLogger()
66 logging_config = config or {}
67
68 while rootlogger.handlers:
69 rootlogger.handlers.pop()
70
71 try:
72 if config["logging"]["path"]:
73 logfile_path = os.path.expanduser(config["logging"]["path"])
74 else:
75 logfile_path = config["logging"]["path"]
76 except KeyError:
77 logfile_path = DEFAULT_LOG_FILENAME
78
79 try:
80 log_level = get_logging_level(config["logging"]["level"])
81 except KeyError:
82 log_level = logging.INFO
83
84 rootlogger.setLevel(log_level)
85
86 try:
87 if config["logging"]["extended"]:
88 formatter = logging.Formatter(
89 "%(levelname)s %(name)s.%(funcName)s(): %(message)s"
90 )
91 except KeyError:
92 formatter = logging.Formatter("%(levelname)s %(name)s: %(message)s")
93
94 console_handler = logging.StreamHandler()
95 console_handler.setLevel(log_level)
96 console_handler.setFormatter(formatter)
97
98 with contextlib.suppress(KeyError):
99 console_handler.addFilter(ParsingFilter(config, config["logging"]["filter"]))
100
101 rootlogger.addHandler(console_handler)
102
103 with contextlib.suppress(KeyError):
104 if not config["logging"]["console"]:
105 console_handler.setLevel(logging.CRITICAL)
106
107 if logfile_path:
108 logdir = os.path.dirname(os.path.realpath(logfile_path))
109 if not os.path.isdir(logdir):
110 os.makedirs(logdir)
111
112 file_handler = RotatingFileHandler(
113 logfile_path, maxBytes=logging_config.get("file-size", 50e6)
114 )
115
116 file_handler.setLevel(log_level)
117 file_handler.setFormatter(formatter)
118
119 with contextlib.suppress(KeyError):
120 file_handler.addFilter(ParsingFilter(config, config["logging"]["filter"]))
121
122 rootlogger.addHandler(file_handler)
123 _LOGGER.info("=" * 40)
124 _LOGGER.info(_("Started opsdroid %s."), __version__)
125
126
127 def get_logging_level(logging_level):
128 """Get the logger level based on the user configuration.
129
130 Args:
131 logging_level: logging level from config file
132
133 Returns:
134 logging LEVEL ->
135 CRITICAL = 50
136 FATAL = CRITICAL
137 ERROR = 40
138 WARNING = 30
139 WARN = WARNING
140 INFO = 20
141 DEBUG = 10
142 NOTSET = 0
143
144 """
145 if logging_level == "critical":
146 return logging.CRITICAL
147
148 if logging_level == "error":
149 return logging.ERROR
150 if logging_level == "warning":
151 return logging.WARNING
152
153 if logging_level == "debug":
154 return logging.DEBUG
155
156 return logging.INFO
157
[end of opsdroid/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/logging.py b/opsdroid/logging.py
--- a/opsdroid/logging.py
+++ b/opsdroid/logging.py
@@ -83,13 +83,13 @@
rootlogger.setLevel(log_level)
- try:
+ formatter = logging.Formatter("%(levelname)s %(name)s: %(message)s")
+
+ with contextlib.suppress(KeyError):
if config["logging"]["extended"]:
formatter = logging.Formatter(
"%(levelname)s %(name)s.%(funcName)s(): %(message)s"
)
- except KeyError:
- formatter = logging.Formatter("%(levelname)s %(name)s: %(message)s")
console_handler = logging.StreamHandler()
console_handler.setLevel(log_level)
|
{"golden_diff": "diff --git a/opsdroid/logging.py b/opsdroid/logging.py\n--- a/opsdroid/logging.py\n+++ b/opsdroid/logging.py\n@@ -83,13 +83,13 @@\n \n rootlogger.setLevel(log_level)\n \n- try:\n+ formatter = logging.Formatter(\"%(levelname)s %(name)s: %(message)s\")\n+\n+ with contextlib.suppress(KeyError):\n if config[\"logging\"][\"extended\"]:\n formatter = logging.Formatter(\n \"%(levelname)s %(name)s.%(funcName)s(): %(message)s\"\n )\n- except KeyError:\n- formatter = logging.Formatter(\"%(levelname)s %(name)s: %(message)s\")\n \n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n", "issue": "Opsdroid logger returning errors\n# Description\r\nHi, I have been using `opsdroid` for a few days now, and while it is a good framework, I have been having a lot of trouble. Most recently I have been getting a error `formatter refrenced before use`. I have linted my configuration.yaml and none of my python files have errors. The error message below only shows errors in opsdroid library files. Even so, I am probably doing something wrong. Any help is greatly appreciated!\r\n\r\n## Steps to Reproduce\r\nI just linted and built my config. Neither of those actions returned errors.\r\n\r\n## Expected Functionality\r\nMy bot should have run on Telegram and in bash.\r\n\r\n## Experienced Functionality\r\n```bash\r\nTraceback (most recent call last):\r\n File \"/home/gideongrinberg/.local/bin/opsdroid\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/home/gideongrinberg/.local/lib/python3.6/site-packages/opsdroid/cli/start.py\", line 38, in start\r\n configure_logging(config)\r\n File \"/home/gideongrinberg/.local/lib/python3.6/site-packages/opsdroid/logging.py\", line 93, in configure_logging\r\n console_handler.setFormatter(formatter)\r\nUnboundLocalError: local variable 'formatter' referenced before assignment\r\n```\r\n## Versions\r\n- **Opsdroid version:** 0.17.1\r\n- **Python version:** Python 3.6.9\r\n- **OS/Docker version:** Ubuntu 18.04 on Window Subsystem Linux (Windows 10)\r\n\r\n## Configuration File\r\nMy config.yaml is to large to include, but this is the only line I've change from the example (other than adding tokens)\r\n```yaml\r\n\r\n recycle-nlp:\r\n path: '~/opdroid_recycle/skill-recycle-nlp'\r\n```\r\nAgain, that file returns no errors when I run `opsdroid config -f [PATH] lint` or `opsdroid config -f [PATH] build`.\r\n\r\nAdditionally, the python file:\r\n```python\r\nfrom opsdroid.skill import Skill\r\nfrom opsdroid.matchers import match_luisai_intent\r\n\r\nclass recycle-nlp(Skill):\r\n@match_luisai_intent('recycle')\r\n async def recycle-nlp(self, message):\r\n if message.luisai[\"topScoringIntent\"][\"intent\"]==\"recycle\":\r\n await message.respond(str(message.luisai))\r\n```\r\nMy directory structure (/home for WSL, not windows):\r\n```\r\n| /home\r\n |____ opsdroid_recycle\r\n |\r\n |_____ config.yaml\r\n |_____skill-recycle-nlp\r\n |\r\n |____ __init__.py\r\n |______ README.md\r\n |______ LICENSE\r\n |\r\n |___.local/lib/python3.6/site-packages\r\n```\r\n## Additional Details\r\nInterestingly, my bot worked fine with the example config\r\n\r\nAny help is much appreciated! \r\n\n", "before_files": [{"content": "\"\"\"Class for Filter logs and logging logic.\"\"\"\n\nimport os\nimport logging\nimport contextlib\n\nfrom logging.handlers import RotatingFileHandler\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, __version__\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ParsingFilter(logging.Filter):\n \"\"\"Class that filters logs.\"\"\"\n\n def __init__(self, config, *parse_list):\n \"\"\"Create object to implement filtering.\"\"\"\n super(ParsingFilter, self).__init__()\n self.config = config[\"logging\"]\n try:\n if (\n self.config[\"filter\"][\"whitelist\"]\n and self.config[\"filter\"][\"blacklist\"]\n ):\n _LOGGER.warning(\n _(\n \"Both whitelist and blacklist filters found in configuration. \"\n \"Only one can be used at a time - only the whitelist filter will be used.\"\n )\n )\n self.parse_list = [\n logging.Filter(name) for name in parse_list[0][\"whitelist\"]\n ]\n except KeyError:\n self.parse_list = parse_list[0].get(\"whitelist\") or parse_list[0].get(\n \"blacklist\"\n )\n\n self.parse_list = [logging.Filter(name) for name in self.parse_list]\n\n def filter(self, record):\n \"\"\"Apply filter to the log message.\n\n This is a subset of Logger.filter, this method applies the logger\n filters and returns a bool. If the value is true the record will\n be passed to the handlers and the log shown. If the value is\n false it will be ignored.\n\n Args:\n record: a log record containing the log message and the\n name of the log - example: opsdroid.core.\n\n Returns:\n Boolean: If True - pass the log to handler.\n\n \"\"\"\n\n if self.config[\"filter\"].get(\"whitelist\"):\n return any(name.filter(record) for name in self.parse_list)\n return not any(name.filter(record) for name in self.parse_list)\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n logging_config = config or {}\n\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n\n try:\n if config[\"logging\"][\"extended\"]:\n formatter = logging.Formatter(\n \"%(levelname)s %(name)s.%(funcName)s(): %(message)s\"\n )\n except KeyError:\n formatter = logging.Formatter(\"%(levelname)s %(name)s: %(message)s\")\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n\n with contextlib.suppress(KeyError):\n console_handler.addFilter(ParsingFilter(config, config[\"logging\"][\"filter\"]))\n\n rootlogger.addHandler(console_handler)\n\n with contextlib.suppress(KeyError):\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n file_handler = RotatingFileHandler(\n logfile_path, maxBytes=logging_config.get(\"file-size\", 50e6)\n )\n\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n\n with contextlib.suppress(KeyError):\n file_handler.addFilter(ParsingFilter(config, config[\"logging\"][\"filter\"]))\n\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"Started opsdroid %s.\"), __version__)\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\n\n Args:\n logging_level: logging level from config file\n\n Returns:\n logging LEVEL ->\n CRITICAL = 50\n FATAL = CRITICAL\n ERROR = 40\n WARNING = 30\n WARN = WARNING\n INFO = 20\n DEBUG = 10\n NOTSET = 0\n\n \"\"\"\n if logging_level == \"critical\":\n return logging.CRITICAL\n\n if logging_level == \"error\":\n return logging.ERROR\n if logging_level == \"warning\":\n return logging.WARNING\n\n if logging_level == \"debug\":\n return logging.DEBUG\n\n return logging.INFO\n", "path": "opsdroid/logging.py"}]}
| 2,702 | 165 |
gh_patches_debug_3312
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-5422
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] MCDCNNClassifier.fit runs only 1 training epoch
Despite n_epochs being set to 200, only one training epoch will be executed here:
```python
from sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier
from keras.losses import binary_crossentropy
model = MCDCNNClassifier(n_epochs=200, batch_size=64, loss=binary_crossentropy, random_state=42, metrics=['binary_accuracy'])
model.fit(x_train, y_train)
```
The problem is that model.n_epochs is not passed to self.model_.fit in MCDCNNClassifier._fit (mcdcnn.py):
```python
def _fit(self, X, y):
...
self.history = self.model_.fit(
X,
y_onehot,
epochs=self.n_epochs, **#<<<-----------------THIS LINS IS MISSING #**
batch_size=self.batch_size,
verbose=self.verbose,
callbacks=self.callbacks_,
)
return self
```
</issue>
<code>
[start of sktime/classification/deep_learning/mcdcnn.py]
1 """Multi Channel Deep Convolutional Neural Classifier (MCDCNN)."""
2
3 __author__ = [
4 "JamesLarge",
5 ]
6
7 from copy import deepcopy
8
9 import numpy as np
10 from sklearn.utils import check_random_state
11
12 from sktime.classification.deep_learning.base import BaseDeepClassifier
13 from sktime.networks.mcdcnn import MCDCNNNetwork
14 from sktime.utils.validation._dependencies import _check_dl_dependencies
15
16
17 class MCDCNNClassifier(BaseDeepClassifier):
18 """Multi Channel Deep Convolutional Neural Classifier, as described in [1]_.
19
20 Parameters
21 ----------
22 n_epochs : int, optional (default=120)
23 The number of epochs to train the model.
24 batch_size : int, optional (default=16)
25 The number of samples per gradient update.
26 kernel_size : int, optional (default=5)
27 The size of kernel in Conv1D layer.
28 pool_size : int, optional (default=2)
29 The size of kernel in (Max) Pool layer.
30 filter_sizes : tuple, optional (default=(8, 8))
31 The sizes of filter for Conv1D layer corresponding
32 to each Conv1D in the block.
33 dense_units : int, optional (default=732)
34 The number of output units of the final Dense
35 layer of this Network. This is NOT the final layer
36 but the penultimate layer.
37 conv_padding : str or None, optional (default="same")
38 The type of padding to be applied to convolutional
39 layers.
40 pool_padding : str or None, optional (default="same")
41 The type of padding to be applied to pooling layers.
42 loss : str, optional (default="categorical_crossentropy")
43 The name of the loss function to be used during training,
44 should be supported by keras.
45 activation : str, optional (default="sigmoid")
46 The activation function to apply at the output. It should be
47 "software" if response variable has more than two types.
48 use_bias : bool, optional (default=True)
49 Whether bias should be included in the output layer.
50 metrics : None or string, optional (default=None)
51 The string which will be used during model compilation. If left as None,
52 then "accuracy" is passed to `model.compile()`.
53 optimizer: None or keras.optimizers.Optimizer instance, optional (default=None)
54 The optimizer that is used for model compiltation. If left as None,
55 then `keras.optimizers.SGD` is used with the following parameters -
56 `learning_rate=0.01, momentum=0.9, weight_decay=0.0005`.
57 callbacks : None or list of keras.callbacks.Callback, optinal (default=None)
58 The callback(s) to use during training.
59 random_state : int, optional (default=0)
60 The seed to any random action.
61
62 Notes
63 -----
64 Adapted from the implementation of Fawaz et. al
65 https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mcdcnn.py
66
67 References
68 ----------
69 .. [1] Zheng et. al, Time series classification using multi-channels deep
70 convolutional neural networks, International Conference on
71 Web-Age Information Management, Pages 298-310, year 2014, organization: Springer.
72
73 Examples
74 --------
75 >>> from sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier
76 >>> from sktime.datasets import load_unit_test
77 >>> X_train, y_tain = load_unit_test(split="train")
78 >>> mcdcnn = MCDCNNClassifier() # doctest: +SKIP
79 >>> mcdcnn.fit(X_train, y_train) # doctest: +SKIP
80 MCDCNNClassifier(...)
81 """
82
83 _tags = {"python_dependencies": "tensorflow"}
84
85 def __init__(
86 self,
87 n_epochs=120,
88 batch_size=16,
89 kernel_size=5,
90 pool_size=2,
91 filter_sizes=(8, 8),
92 dense_units=732,
93 conv_padding="same",
94 pool_padding="same",
95 loss="categorical_crossentropy",
96 activation="sigmoid",
97 use_bias=True,
98 callbacks=None,
99 metrics=None,
100 optimizer=None,
101 verbose=False,
102 random_state=0,
103 ):
104 _check_dl_dependencies(severity="error")
105 super().__init__()
106
107 self.n_epochs = n_epochs
108 self.batch_size = batch_size
109 self.kernel_size = kernel_size
110 self.pool_size = pool_size
111 self.filter_sizes = filter_sizes
112 self.dense_units = dense_units
113 self.conv_padding = conv_padding
114 self.pool_padding = pool_padding
115 self.loss = loss
116 self.activation = activation
117 self.use_bias = use_bias
118 self.callbacks = callbacks
119 self.metrics = metrics
120 self.optimizer = optimizer
121 self.verbose = verbose
122 self.random_state = random_state
123 self.history = None
124 self._network = MCDCNNNetwork(
125 kernel_size=self.kernel_size,
126 pool_size=self.pool_size,
127 filter_sizes=self.filter_sizes,
128 dense_units=self.dense_units,
129 conv_padding=self.conv_padding,
130 pool_padding=self.pool_padding,
131 random_state=self.random_state,
132 )
133
134 def build_model(self, input_shape, n_classes, **kwargs):
135 """Construct a compiled, un-trained, keras model that is ready for training.
136
137 In sktime, time series are stored in numpy arrays of shape (d,m), where d
138 is the number of dimensions, m is the series length. Keras/tensorflow assume
139 data is in shape (m,d). This method also assumes (m,d). Transpose should
140 happen in fit.
141
142 Parameters
143 ----------
144 input_shape : tuple
145 The shape of the data fed into the input layer, should be (m,d)
146 n_classes: int
147 The number of classes, which becomes the size of the output layer
148
149 Returns
150 -------
151 output : a compiled Keras Model
152 """
153 import tensorflow as tf
154 from tensorflow import keras
155
156 tf.random.set_seed(self.random_state)
157
158 metrics = ["accuracy"] if self.metrics is None else self.metrics
159
160 input_layers, output_layer = self._network.build_network(input_shape, **kwargs)
161
162 output_layer = keras.layers.Dense(
163 units=n_classes,
164 activation=self.activation,
165 use_bias=self.use_bias,
166 )(output_layer)
167
168 self.optimizer_ = (
169 keras.optimizers.SGD(
170 learning_rate=0.01,
171 momentum=0.9,
172 weight_decay=0.0005,
173 )
174 if self.optimizer is None
175 else self.optimizer
176 )
177
178 model = keras.models.Model(inputs=input_layers, outputs=output_layer)
179
180 model.compile(
181 loss=self.loss,
182 optimizer=self.optimizer_,
183 metrics=metrics,
184 )
185
186 return model
187
188 def _fit(self, X, y):
189 """Fit the classifier on the training set (X, y).
190
191 Parameters
192 ----------
193 X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))
194 The training input samples.
195 y : np.ndarray of shape n
196 The training data class labels.
197
198 Returns
199 -------
200 self : object
201 """
202 y_onehot = self.convert_y_to_keras(y)
203 X = X.transpose(0, 2, 1)
204 self.input_shape = X.shape[1:]
205 X = self._network._prepare_input(X)
206
207 check_random_state(self.random_state)
208
209 self.model_ = self.build_model(self.input_shape, self.n_classes_)
210 self.callbacks_ = deepcopy(self.callbacks)
211
212 if self.verbose:
213 self.model_.summary()
214
215 self.history = self.model_.fit(
216 X,
217 y_onehot,
218 batch_size=self.batch_size,
219 verbose=self.verbose,
220 callbacks=self.callbacks_,
221 )
222
223 return self
224
225 def _predict_proba(self, X, **kwargs):
226 """Find probability estimates for each class for all cases in X.
227
228 Parameters
229 ----------
230 X : an np.ndarray of shape = (n_instances, n_dimensions, series_length)
231 The training input samples.
232
233 Returns
234 -------
235 output : array of shape = [n_instances, n_classes] of probabilities
236 """
237 self.check_is_fitted()
238 X = X.transpose([0, 2, 1])
239 X = self._network._prepare_input(X)
240
241 probs = self.model_.predict(X, self.batch_size, **kwargs)
242
243 if probs.shape[1] == 1:
244 probs = np.hstack([1 - probs, probs])
245 probs = probs / probs.sum(axis=1, keepdims=1)
246 return probs
247
[end of sktime/classification/deep_learning/mcdcnn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sktime/classification/deep_learning/mcdcnn.py b/sktime/classification/deep_learning/mcdcnn.py
--- a/sktime/classification/deep_learning/mcdcnn.py
+++ b/sktime/classification/deep_learning/mcdcnn.py
@@ -215,6 +215,7 @@
self.history = self.model_.fit(
X,
y_onehot,
+ epochs=self.n_epochs,
batch_size=self.batch_size,
verbose=self.verbose,
callbacks=self.callbacks_,
|
{"golden_diff": "diff --git a/sktime/classification/deep_learning/mcdcnn.py b/sktime/classification/deep_learning/mcdcnn.py\n--- a/sktime/classification/deep_learning/mcdcnn.py\n+++ b/sktime/classification/deep_learning/mcdcnn.py\n@@ -215,6 +215,7 @@\n self.history = self.model_.fit(\r\n X,\r\n y_onehot,\r\n+ epochs=self.n_epochs,\r\n batch_size=self.batch_size,\r\n verbose=self.verbose,\r\n callbacks=self.callbacks_,\n", "issue": "[BUG] MCDCNNClassifier.fit runs only 1 training epoch\nDespite n_epochs being set to 200, only one training epoch will be executed here:\r\n\r\n```python\r\nfrom sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier\r\nfrom keras.losses import binary_crossentropy\r\n\r\nmodel = MCDCNNClassifier(n_epochs=200, batch_size=64, loss=binary_crossentropy, random_state=42, metrics=['binary_accuracy'])\r\nmodel.fit(x_train, y_train)\r\n```\r\n\r\nThe problem is that model.n_epochs is not passed to self.model_.fit in MCDCNNClassifier._fit (mcdcnn.py):\r\n\r\n```python\r\n def _fit(self, X, y):\r\n ...\r\n\r\n self.history = self.model_.fit(\r\n X,\r\n y_onehot,\r\n epochs=self.n_epochs, **#<<<-----------------THIS LINS IS MISSING #**\r\n batch_size=self.batch_size,\r\n verbose=self.verbose,\r\n callbacks=self.callbacks_,\r\n )\r\n\r\n return self\r\n```\n", "before_files": [{"content": "\"\"\"Multi Channel Deep Convolutional Neural Classifier (MCDCNN).\"\"\"\r\n\r\n__author__ = [\r\n \"JamesLarge\",\r\n]\r\n\r\nfrom copy import deepcopy\r\n\r\nimport numpy as np\r\nfrom sklearn.utils import check_random_state\r\n\r\nfrom sktime.classification.deep_learning.base import BaseDeepClassifier\r\nfrom sktime.networks.mcdcnn import MCDCNNNetwork\r\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\r\n\r\n\r\nclass MCDCNNClassifier(BaseDeepClassifier):\r\n \"\"\"Multi Channel Deep Convolutional Neural Classifier, as described in [1]_.\r\n\r\n Parameters\r\n ----------\r\n n_epochs : int, optional (default=120)\r\n The number of epochs to train the model.\r\n batch_size : int, optional (default=16)\r\n The number of samples per gradient update.\r\n kernel_size : int, optional (default=5)\r\n The size of kernel in Conv1D layer.\r\n pool_size : int, optional (default=2)\r\n The size of kernel in (Max) Pool layer.\r\n filter_sizes : tuple, optional (default=(8, 8))\r\n The sizes of filter for Conv1D layer corresponding\r\n to each Conv1D in the block.\r\n dense_units : int, optional (default=732)\r\n The number of output units of the final Dense\r\n layer of this Network. This is NOT the final layer\r\n but the penultimate layer.\r\n conv_padding : str or None, optional (default=\"same\")\r\n The type of padding to be applied to convolutional\r\n layers.\r\n pool_padding : str or None, optional (default=\"same\")\r\n The type of padding to be applied to pooling layers.\r\n loss : str, optional (default=\"categorical_crossentropy\")\r\n The name of the loss function to be used during training,\r\n should be supported by keras.\r\n activation : str, optional (default=\"sigmoid\")\r\n The activation function to apply at the output. It should be\r\n \"software\" if response variable has more than two types.\r\n use_bias : bool, optional (default=True)\r\n Whether bias should be included in the output layer.\r\n metrics : None or string, optional (default=None)\r\n The string which will be used during model compilation. If left as None,\r\n then \"accuracy\" is passed to `model.compile()`.\r\n optimizer: None or keras.optimizers.Optimizer instance, optional (default=None)\r\n The optimizer that is used for model compiltation. If left as None,\r\n then `keras.optimizers.SGD` is used with the following parameters -\r\n `learning_rate=0.01, momentum=0.9, weight_decay=0.0005`.\r\n callbacks : None or list of keras.callbacks.Callback, optinal (default=None)\r\n The callback(s) to use during training.\r\n random_state : int, optional (default=0)\r\n The seed to any random action.\r\n\r\n Notes\r\n -----\r\n Adapted from the implementation of Fawaz et. al\r\n https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mcdcnn.py\r\n\r\n References\r\n ----------\r\n .. [1] Zheng et. al, Time series classification using multi-channels deep\r\n convolutional neural networks, International Conference on\r\n Web-Age Information Management, Pages 298-310, year 2014, organization: Springer.\r\n\r\n Examples\r\n --------\r\n >>> from sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier\r\n >>> from sktime.datasets import load_unit_test\r\n >>> X_train, y_tain = load_unit_test(split=\"train\")\r\n >>> mcdcnn = MCDCNNClassifier() # doctest: +SKIP\r\n >>> mcdcnn.fit(X_train, y_train) # doctest: +SKIP\r\n MCDCNNClassifier(...)\r\n \"\"\"\r\n\r\n _tags = {\"python_dependencies\": \"tensorflow\"}\r\n\r\n def __init__(\r\n self,\r\n n_epochs=120,\r\n batch_size=16,\r\n kernel_size=5,\r\n pool_size=2,\r\n filter_sizes=(8, 8),\r\n dense_units=732,\r\n conv_padding=\"same\",\r\n pool_padding=\"same\",\r\n loss=\"categorical_crossentropy\",\r\n activation=\"sigmoid\",\r\n use_bias=True,\r\n callbacks=None,\r\n metrics=None,\r\n optimizer=None,\r\n verbose=False,\r\n random_state=0,\r\n ):\r\n _check_dl_dependencies(severity=\"error\")\r\n super().__init__()\r\n\r\n self.n_epochs = n_epochs\r\n self.batch_size = batch_size\r\n self.kernel_size = kernel_size\r\n self.pool_size = pool_size\r\n self.filter_sizes = filter_sizes\r\n self.dense_units = dense_units\r\n self.conv_padding = conv_padding\r\n self.pool_padding = pool_padding\r\n self.loss = loss\r\n self.activation = activation\r\n self.use_bias = use_bias\r\n self.callbacks = callbacks\r\n self.metrics = metrics\r\n self.optimizer = optimizer\r\n self.verbose = verbose\r\n self.random_state = random_state\r\n self.history = None\r\n self._network = MCDCNNNetwork(\r\n kernel_size=self.kernel_size,\r\n pool_size=self.pool_size,\r\n filter_sizes=self.filter_sizes,\r\n dense_units=self.dense_units,\r\n conv_padding=self.conv_padding,\r\n pool_padding=self.pool_padding,\r\n random_state=self.random_state,\r\n )\r\n\r\n def build_model(self, input_shape, n_classes, **kwargs):\r\n \"\"\"Construct a compiled, un-trained, keras model that is ready for training.\r\n\r\n In sktime, time series are stored in numpy arrays of shape (d,m), where d\r\n is the number of dimensions, m is the series length. Keras/tensorflow assume\r\n data is in shape (m,d). This method also assumes (m,d). Transpose should\r\n happen in fit.\r\n\r\n Parameters\r\n ----------\r\n input_shape : tuple\r\n The shape of the data fed into the input layer, should be (m,d)\r\n n_classes: int\r\n The number of classes, which becomes the size of the output layer\r\n\r\n Returns\r\n -------\r\n output : a compiled Keras Model\r\n \"\"\"\r\n import tensorflow as tf\r\n from tensorflow import keras\r\n\r\n tf.random.set_seed(self.random_state)\r\n\r\n metrics = [\"accuracy\"] if self.metrics is None else self.metrics\r\n\r\n input_layers, output_layer = self._network.build_network(input_shape, **kwargs)\r\n\r\n output_layer = keras.layers.Dense(\r\n units=n_classes,\r\n activation=self.activation,\r\n use_bias=self.use_bias,\r\n )(output_layer)\r\n\r\n self.optimizer_ = (\r\n keras.optimizers.SGD(\r\n learning_rate=0.01,\r\n momentum=0.9,\r\n weight_decay=0.0005,\r\n )\r\n if self.optimizer is None\r\n else self.optimizer\r\n )\r\n\r\n model = keras.models.Model(inputs=input_layers, outputs=output_layer)\r\n\r\n model.compile(\r\n loss=self.loss,\r\n optimizer=self.optimizer_,\r\n metrics=metrics,\r\n )\r\n\r\n return model\r\n\r\n def _fit(self, X, y):\r\n \"\"\"Fit the classifier on the training set (X, y).\r\n\r\n Parameters\r\n ----------\r\n X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))\r\n The training input samples.\r\n y : np.ndarray of shape n\r\n The training data class labels.\r\n\r\n Returns\r\n -------\r\n self : object\r\n \"\"\"\r\n y_onehot = self.convert_y_to_keras(y)\r\n X = X.transpose(0, 2, 1)\r\n self.input_shape = X.shape[1:]\r\n X = self._network._prepare_input(X)\r\n\r\n check_random_state(self.random_state)\r\n\r\n self.model_ = self.build_model(self.input_shape, self.n_classes_)\r\n self.callbacks_ = deepcopy(self.callbacks)\r\n\r\n if self.verbose:\r\n self.model_.summary()\r\n\r\n self.history = self.model_.fit(\r\n X,\r\n y_onehot,\r\n batch_size=self.batch_size,\r\n verbose=self.verbose,\r\n callbacks=self.callbacks_,\r\n )\r\n\r\n return self\r\n\r\n def _predict_proba(self, X, **kwargs):\r\n \"\"\"Find probability estimates for each class for all cases in X.\r\n\r\n Parameters\r\n ----------\r\n X : an np.ndarray of shape = (n_instances, n_dimensions, series_length)\r\n The training input samples.\r\n\r\n Returns\r\n -------\r\n output : array of shape = [n_instances, n_classes] of probabilities\r\n \"\"\"\r\n self.check_is_fitted()\r\n X = X.transpose([0, 2, 1])\r\n X = self._network._prepare_input(X)\r\n\r\n probs = self.model_.predict(X, self.batch_size, **kwargs)\r\n\r\n if probs.shape[1] == 1:\r\n probs = np.hstack([1 - probs, probs])\r\n probs = probs / probs.sum(axis=1, keepdims=1)\r\n return probs\r\n", "path": "sktime/classification/deep_learning/mcdcnn.py"}]}
| 3,306 | 115 |
gh_patches_debug_19873
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-2067
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Size limits on logo, favicon, image uploads
Sometimes people upload really big images for small things like the logo or the favicon. We should impose some kind of size limit or automatic resizing.
</issue>
<code>
[start of CTFd/forms/setup.py]
1 from wtforms import (
2 FileField,
3 HiddenField,
4 PasswordField,
5 RadioField,
6 SelectField,
7 StringField,
8 TextAreaField,
9 )
10 from wtforms.fields.html5 import EmailField
11 from wtforms.validators import InputRequired
12
13 from CTFd.constants.themes import DEFAULT_THEME
14 from CTFd.forms import BaseForm
15 from CTFd.forms.fields import SubmitField
16 from CTFd.utils.config import get_themes
17
18
19 class SetupForm(BaseForm):
20 ctf_name = StringField(
21 "Event Name", description="The name of your CTF event/workshop"
22 )
23 ctf_description = TextAreaField(
24 "Event Description", description="Description for the CTF"
25 )
26 user_mode = RadioField(
27 "User Mode",
28 choices=[("teams", "Team Mode"), ("users", "User Mode")],
29 default="teams",
30 description="Controls whether users join together in teams to play (Team Mode) or play as themselves (User Mode)",
31 validators=[InputRequired()],
32 )
33
34 name = StringField(
35 "Admin Username",
36 description="Your username for the administration account",
37 validators=[InputRequired()],
38 )
39 email = EmailField(
40 "Admin Email",
41 description="Your email address for the administration account",
42 validators=[InputRequired()],
43 )
44 password = PasswordField(
45 "Admin Password",
46 description="Your password for the administration account",
47 validators=[InputRequired()],
48 )
49
50 ctf_logo = FileField(
51 "Logo",
52 description="Logo to use for the website instead of a CTF name. Used as the home page button.",
53 )
54 ctf_banner = FileField("Banner", description="Banner to use for the homepage.")
55 ctf_small_icon = FileField(
56 "Small Icon",
57 description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.",
58 )
59 ctf_theme = SelectField(
60 "Theme",
61 description="CTFd Theme to use",
62 choices=list(zip(get_themes(), get_themes())),
63 default=DEFAULT_THEME,
64 validators=[InputRequired()],
65 )
66 theme_color = HiddenField(
67 "Theme Color",
68 description="Color used by theme to control aesthetics. Requires theme support. Optional.",
69 )
70
71 start = StringField(
72 "Start Time", description="Time when your CTF is scheduled to start. Optional."
73 )
74 end = StringField(
75 "End Time", description="Time when your CTF is scheduled to end. Optional."
76 )
77 submit = SubmitField("Finish")
78
[end of CTFd/forms/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/forms/setup.py b/CTFd/forms/setup.py
--- a/CTFd/forms/setup.py
+++ b/CTFd/forms/setup.py
@@ -49,16 +49,18 @@
ctf_logo = FileField(
"Logo",
- description="Logo to use for the website instead of a CTF name. Used as the home page button.",
+ description="Logo to use for the website instead of a CTF name. Used as the home page button. Optional.",
+ )
+ ctf_banner = FileField(
+ "Banner", description="Banner to use for the homepage. Optional."
)
- ctf_banner = FileField("Banner", description="Banner to use for the homepage.")
ctf_small_icon = FileField(
"Small Icon",
- description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.",
+ description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px. Optional.",
)
ctf_theme = SelectField(
"Theme",
- description="CTFd Theme to use",
+ description="CTFd Theme to use. Can be changed later.",
choices=list(zip(get_themes(), get_themes())),
default=DEFAULT_THEME,
validators=[InputRequired()],
|
{"golden_diff": "diff --git a/CTFd/forms/setup.py b/CTFd/forms/setup.py\n--- a/CTFd/forms/setup.py\n+++ b/CTFd/forms/setup.py\n@@ -49,16 +49,18 @@\n \n ctf_logo = FileField(\n \"Logo\",\n- description=\"Logo to use for the website instead of a CTF name. Used as the home page button.\",\n+ description=\"Logo to use for the website instead of a CTF name. Used as the home page button. Optional.\",\n+ )\n+ ctf_banner = FileField(\n+ \"Banner\", description=\"Banner to use for the homepage. Optional.\"\n )\n- ctf_banner = FileField(\"Banner\", description=\"Banner to use for the homepage.\")\n ctf_small_icon = FileField(\n \"Small Icon\",\n- description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.\",\n+ description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px. Optional.\",\n )\n ctf_theme = SelectField(\n \"Theme\",\n- description=\"CTFd Theme to use\",\n+ description=\"CTFd Theme to use. Can be changed later.\",\n choices=list(zip(get_themes(), get_themes())),\n default=DEFAULT_THEME,\n validators=[InputRequired()],\n", "issue": "Size limits on logo, favicon, image uploads\nSometimes people upload really big images for small things like the logo or the favicon. We should impose some kind of size limit or automatic resizing. \n", "before_files": [{"content": "from wtforms import (\n FileField,\n HiddenField,\n PasswordField,\n RadioField,\n SelectField,\n StringField,\n TextAreaField,\n)\nfrom wtforms.fields.html5 import EmailField\nfrom wtforms.validators import InputRequired\n\nfrom CTFd.constants.themes import DEFAULT_THEME\nfrom CTFd.forms import BaseForm\nfrom CTFd.forms.fields import SubmitField\nfrom CTFd.utils.config import get_themes\n\n\nclass SetupForm(BaseForm):\n ctf_name = StringField(\n \"Event Name\", description=\"The name of your CTF event/workshop\"\n )\n ctf_description = TextAreaField(\n \"Event Description\", description=\"Description for the CTF\"\n )\n user_mode = RadioField(\n \"User Mode\",\n choices=[(\"teams\", \"Team Mode\"), (\"users\", \"User Mode\")],\n default=\"teams\",\n description=\"Controls whether users join together in teams to play (Team Mode) or play as themselves (User Mode)\",\n validators=[InputRequired()],\n )\n\n name = StringField(\n \"Admin Username\",\n description=\"Your username for the administration account\",\n validators=[InputRequired()],\n )\n email = EmailField(\n \"Admin Email\",\n description=\"Your email address for the administration account\",\n validators=[InputRequired()],\n )\n password = PasswordField(\n \"Admin Password\",\n description=\"Your password for the administration account\",\n validators=[InputRequired()],\n )\n\n ctf_logo = FileField(\n \"Logo\",\n description=\"Logo to use for the website instead of a CTF name. Used as the home page button.\",\n )\n ctf_banner = FileField(\"Banner\", description=\"Banner to use for the homepage.\")\n ctf_small_icon = FileField(\n \"Small Icon\",\n description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.\",\n )\n ctf_theme = SelectField(\n \"Theme\",\n description=\"CTFd Theme to use\",\n choices=list(zip(get_themes(), get_themes())),\n default=DEFAULT_THEME,\n validators=[InputRequired()],\n )\n theme_color = HiddenField(\n \"Theme Color\",\n description=\"Color used by theme to control aesthetics. Requires theme support. Optional.\",\n )\n\n start = StringField(\n \"Start Time\", description=\"Time when your CTF is scheduled to start. Optional.\"\n )\n end = StringField(\n \"End Time\", description=\"Time when your CTF is scheduled to end. Optional.\"\n )\n submit = SubmitField(\"Finish\")\n", "path": "CTFd/forms/setup.py"}]}
| 1,265 | 294 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.