problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_17349 | rasdani/github-patches | git_diff | conan-io__conan-center-index-19060 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] fast-cdr/1.1.0
### Package Name/Version
fast-cdr/1.1.0
### Changelog
https://github.com/eProsima/Fast-CDR/releases/tag/v1.1.0
### Context about the new update
The Conan Center Bot detects the updatable recipe in #3470.
Open branch [qchateau/conan-center-index/ccb-fast-cdr-1.1.0](https://github.com/qchateau/conan-center-index/tree/ccb-fast-cdr-1.1.0)
</issue>
<code>
[start of recipes/fast-cdr/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.build import check_min_cppstd
4 from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
5 from conan.tools.files import collect_libs, copy, get, rm, rmdir, save
6 from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
7 import os
8 import textwrap
9
10 required_conan_version = ">=1.54.0"
11
12
13 class FastCDRConan(ConanFile):
14 name = "fast-cdr"
15 license = "Apache-2.0"
16 homepage = "https://github.com/eProsima/Fast-CDR"
17 url = "https://github.com/conan-io/conan-center-index"
18 description = "eProsima FastCDR library for serialization"
19 topics = ("dds", "middleware", "serialization")
20
21 package_type = "library"
22 settings = "os", "arch", "compiler", "build_type"
23 options = {
24 "shared": [True, False],
25 "fPIC": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 }
31
32 def config_options(self):
33 if self.settings.os == "Windows":
34 del self.options.fPIC
35
36 def configure(self):
37 if self.options.shared:
38 self.options.rm_safe("fPIC")
39
40 def layout(self):
41 cmake_layout(self, src_folder="src")
42
43 def validate(self):
44 if self.settings.compiler.get_safe("cppstd"):
45 check_min_cppstd(self, 11)
46 if self.options.shared and is_msvc(self) and is_msvc_static_runtime(self):
47 # This combination leads to an fast-cdr error when linking
48 # linking dynamic '*.dll' and static MT runtime
49 # see https://github.com/eProsima/Fast-CDR/blob/v1.0.21/include/fastcdr/eProsima_auto_link.h#L37
50 # (2021-05-31)
51 raise ConanInvalidConfiguration("Mixing a dll eprosima library with a static runtime is a bad idea")
52
53 def source(self):
54 get(self, **self.conan_data["sources"][self.version], strip_root=True)
55
56 def generate(self):
57 tc = CMakeToolchain(self)
58 tc.variables["BUILD_STATIC"] = not self.options.shared
59 tc.generate()
60
61 def build(self):
62 cmake = CMake(self)
63 cmake.configure()
64 cmake.build()
65
66 def package(self):
67 copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
68 cmake = CMake(self)
69 cmake.install()
70 rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
71 rmdir(self, os.path.join(self.package_folder, "share"))
72 rm(self, "*.pdb", os.path.join(self.package_folder, "lib"))
73 rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
74
75 # TODO: to remove in conan v2 once cmake_find_package_* generators removed
76 self._create_cmake_module_alias_targets(
77 os.path.join(self.package_folder, self._module_file_rel_path),
78 {"fastcdr": "fastcdr::fastcdr"}
79 )
80
81 def _create_cmake_module_alias_targets(self, module_file, targets):
82 content = ""
83 for alias, aliased in targets.items():
84 content += textwrap.dedent(f"""\
85 if(TARGET {aliased} AND NOT TARGET {alias})
86 add_library({alias} INTERFACE IMPORTED)
87 set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
88 endif()
89 """)
90 save(self, module_file, content)
91
92 @property
93 def _module_file_rel_path(self):
94 return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake")
95
96 def package_info(self):
97 self.cpp_info.set_property("cmake_file_name", "fastcdr")
98 self.cpp_info.set_property("cmake_target_name", "fastcdr")
99 self.cpp_info.libs = collect_libs(self)
100 if self.settings.os == "Windows" and self.options.shared:
101 self.cpp_info.defines.append("FASTCDR_DYN_LINK")
102
103 # TODO: to remove in conan v2 once cmake_find_package_* generators removed
104 self.cpp_info.names["cmake_find_package"] = "fastcdr"
105 self.cpp_info.names["cmake_find_package_multi"] = "fastcdr"
106 self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
107 self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
108
[end of recipes/fast-cdr/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/fast-cdr/all/conanfile.py b/recipes/fast-cdr/all/conanfile.py
--- a/recipes/fast-cdr/all/conanfile.py
+++ b/recipes/fast-cdr/all/conanfile.py
@@ -4,6 +4,7 @@
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import collect_libs, copy, get, rm, rmdir, save
from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
+from conan.tools.scm import Version
import os
import textwrap
@@ -40,6 +41,10 @@
def layout(self):
cmake_layout(self, src_folder="src")
+ def build_requirements(self):
+ if Version(self.version) >= "1.1.0":
+ self.tool_requires("cmake/[>=3.16.3 <4]")
+
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
| {"golden_diff": "diff --git a/recipes/fast-cdr/all/conanfile.py b/recipes/fast-cdr/all/conanfile.py\n--- a/recipes/fast-cdr/all/conanfile.py\n+++ b/recipes/fast-cdr/all/conanfile.py\n@@ -4,6 +4,7 @@\n from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\n from conan.tools.files import collect_libs, copy, get, rm, rmdir, save\n from conan.tools.microsoft import is_msvc, is_msvc_static_runtime\n+from conan.tools.scm import Version\n import os\n import textwrap\n \n@@ -40,6 +41,10 @@\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n \n+ def build_requirements(self):\n+ if Version(self.version) >= \"1.1.0\":\n+ self.tool_requires(\"cmake/[>=3.16.3 <4]\")\n+\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n", "issue": "[request] fast-cdr/1.1.0\n### Package Name/Version\n\nfast-cdr/1.1.0\n\n### Changelog\n\nhttps://github.com/eProsima/Fast-CDR/releases/tag/v1.1.0\n\n### Context about the new update\n\nThe Conan Center Bot detects the updatable recipe in #3470.\r\nOpen branch [qchateau/conan-center-index/ccb-fast-cdr-1.1.0](https://github.com/qchateau/conan-center-index/tree/ccb-fast-cdr-1.1.0)\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import collect_libs, copy, get, rm, rmdir, save\nfrom conan.tools.microsoft import is_msvc, is_msvc_static_runtime\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.54.0\"\n\n\nclass FastCDRConan(ConanFile):\n name = \"fast-cdr\"\n license = \"Apache-2.0\"\n homepage = \"https://github.com/eProsima/Fast-CDR\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"eProsima FastCDR library for serialization\"\n topics = (\"dds\", \"middleware\", \"serialization\")\n\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n if self.options.shared and is_msvc(self) and is_msvc_static_runtime(self):\n # This combination leads to an fast-cdr error when linking\n # linking dynamic '*.dll' and static MT runtime\n # see https://github.com/eProsima/Fast-CDR/blob/v1.0.21/include/fastcdr/eProsima_auto_link.h#L37\n # (2021-05-31)\n raise ConanInvalidConfiguration(\"Mixing a dll eprosima library with a static runtime is a bad idea\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_STATIC\"] = not self.options.shared\n tc.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"fastcdr\": \"fastcdr::fastcdr\"}\n )\n\n def _create_cmake_module_alias_targets(self, module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(f\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\")\n save(self, module_file, content)\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\"lib\", \"cmake\", f\"conan-official-{self.name}-targets.cmake\")\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"fastcdr\")\n self.cpp_info.set_property(\"cmake_target_name\", \"fastcdr\")\n self.cpp_info.libs = collect_libs(self)\n if self.settings.os == \"Windows\" and self.options.shared:\n self.cpp_info.defines.append(\"FASTCDR_DYN_LINK\")\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self.cpp_info.names[\"cmake_find_package\"] = \"fastcdr\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"fastcdr\"\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n", "path": "recipes/fast-cdr/all/conanfile.py"}]} | 1,912 | 232 |
gh_patches_debug_1457 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-539 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use list markup for lists of elements
Part of the BITV-Test: "1.3.1b HTML-Strukturelemente für Listen"
----
- [x] list of blueprints
- [x] list of projects
unsure:
- [ ] list of questions in poll contents
- [ ] list of choices in poll contents
</issue>
<code>
[start of apps/contrib/templatetags/contrib_tags.py]
1 from django import template
2 from django.template.loader import render_to_string
3
4 register = template.Library()
5
6
7 @register.assignment_tag
8 def include_template_string(template, **kwargs):
9 rendered_template = render_to_string(template, kwargs)
10 return str(rendered_template)
11
12
13 @register.assignment_tag
14 def combined_url_parameter(request_query_dict, **kwargs):
15 combined_query_dict = request_query_dict.copy()
16 for key in kwargs:
17 combined_query_dict.setlist(key, [kwargs[key]])
18 encoded_parameter = '?' + combined_query_dict.urlencode()
19 return encoded_parameter
20
21
22 @register.assignment_tag
23 def filter_has_perm(perm, user, objects):
24 """Filter a list of objects based on user permissions."""
25 if not hasattr(user, 'has_perm'):
26 # If the swapped user model does not support permissions, all objects
27 # will be returned. This is taken from rules.templatetags.has_perm.
28 return objects
29 else:
30 return (obj for obj in objects if user.has_perm(perm, obj))
31
[end of apps/contrib/templatetags/contrib_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py
--- a/apps/contrib/templatetags/contrib_tags.py
+++ b/apps/contrib/templatetags/contrib_tags.py
@@ -27,4 +27,4 @@
# will be returned. This is taken from rules.templatetags.has_perm.
return objects
else:
- return (obj for obj in objects if user.has_perm(perm, obj))
+ return [obj for obj in objects if user.has_perm(perm, obj)]
| {"golden_diff": "diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py\n--- a/apps/contrib/templatetags/contrib_tags.py\n+++ b/apps/contrib/templatetags/contrib_tags.py\n@@ -27,4 +27,4 @@\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n- return (obj for obj in objects if user.has_perm(perm, obj))\n+ return [obj for obj in objects if user.has_perm(perm, obj)]\n", "issue": "use list markup for lists of elements\nPart of the BITV-Test: \"1.3.1b HTML-Strukturelemente f\u00fcr Listen\"\r\n----\r\n- [x] list of blueprints\r\n- [x] list of projects\r\n\r\nunsure:\r\n- [ ] list of questions in poll contents\r\n- [ ] list of choices in poll contents \n", "before_files": [{"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return (obj for obj in objects if user.has_perm(perm, obj))\n", "path": "apps/contrib/templatetags/contrib_tags.py"}]} | 892 | 134 |
gh_patches_debug_11284 | rasdani/github-patches | git_diff | encode__starlette-867 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Invalid cookie name leads to exception
When handling a request with an invalid cookie name (does not conform to RFC2109) starlette raises an exception. i.e iam/cookiename
This is because Starlette uses Python's stdlib cookie library, which is very strict.
I do understand the strictness, but in real life scenarios you receive such malformed cookies and I want to handle those requests.
My suggestion for a solution would be to catch those exceptions and ignore the invalid cookie.
** EDIT **
I just realized stdlib is used for the whole cookie header, hence can't ignore only one cookie.
I'll create a PR for ignoring the whole cookie on such case, but maybe we should create our own Cookie/Morsel class and override the methods to ignore such error in the inbound case?
</issue>
<code>
[start of starlette/requests.py]
1 import asyncio
2 import http.cookies
3 import json
4 import typing
5 from collections.abc import Mapping
6
7 from starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State
8 from starlette.formparsers import FormParser, MultiPartParser
9 from starlette.types import Message, Receive, Scope, Send
10
11 try:
12 from multipart.multipart import parse_options_header
13 except ImportError: # pragma: nocover
14 parse_options_header = None
15
16
17 SERVER_PUSH_HEADERS_TO_COPY = {
18 "accept",
19 "accept-encoding",
20 "accept-language",
21 "cache-control",
22 "user-agent",
23 }
24
25
26 class ClientDisconnect(Exception):
27 pass
28
29
30 class HTTPConnection(Mapping):
31 """
32 A base class for incoming HTTP connections, that is used to provide
33 any functionality that is common to both `Request` and `WebSocket`.
34 """
35
36 def __init__(self, scope: Scope, receive: Receive = None) -> None:
37 assert scope["type"] in ("http", "websocket")
38 self.scope = scope
39
40 def __getitem__(self, key: str) -> str:
41 return self.scope[key]
42
43 def __iter__(self) -> typing.Iterator[str]:
44 return iter(self.scope)
45
46 def __len__(self) -> int:
47 return len(self.scope)
48
49 @property
50 def app(self) -> typing.Any:
51 return self.scope["app"]
52
53 @property
54 def url(self) -> URL:
55 if not hasattr(self, "_url"):
56 self._url = URL(scope=self.scope)
57 return self._url
58
59 @property
60 def base_url(self) -> URL:
61 if not hasattr(self, "_base_url"):
62 base_url_scope = dict(self.scope)
63 base_url_scope["path"] = "/"
64 base_url_scope["query_string"] = b""
65 base_url_scope["root_path"] = base_url_scope.get(
66 "app_root_path", base_url_scope.get("root_path", "")
67 )
68 self._base_url = URL(scope=base_url_scope)
69 return self._base_url
70
71 @property
72 def headers(self) -> Headers:
73 if not hasattr(self, "_headers"):
74 self._headers = Headers(scope=self.scope)
75 return self._headers
76
77 @property
78 def query_params(self) -> QueryParams:
79 if not hasattr(self, "_query_params"):
80 self._query_params = QueryParams(self.scope["query_string"])
81 return self._query_params
82
83 @property
84 def path_params(self) -> dict:
85 return self.scope.get("path_params", {})
86
87 @property
88 def cookies(self) -> typing.Dict[str, str]:
89 if not hasattr(self, "_cookies"):
90 cookies = {}
91 cookie_header = self.headers.get("cookie")
92 if cookie_header:
93 cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie
94 cookie.load(cookie_header)
95 for key, morsel in cookie.items():
96 cookies[key] = morsel.value
97 self._cookies = cookies
98 return self._cookies
99
100 @property
101 def client(self) -> Address:
102 host, port = self.scope.get("client") or (None, None)
103 return Address(host=host, port=port)
104
105 @property
106 def session(self) -> dict:
107 assert (
108 "session" in self.scope
109 ), "SessionMiddleware must be installed to access request.session"
110 return self.scope["session"]
111
112 @property
113 def auth(self) -> typing.Any:
114 assert (
115 "auth" in self.scope
116 ), "AuthenticationMiddleware must be installed to access request.auth"
117 return self.scope["auth"]
118
119 @property
120 def user(self) -> typing.Any:
121 assert (
122 "user" in self.scope
123 ), "AuthenticationMiddleware must be installed to access request.user"
124 return self.scope["user"]
125
126 @property
127 def state(self) -> State:
128 if not hasattr(self, "_state"):
129 # Ensure 'state' has an empty dict if it's not already populated.
130 self.scope.setdefault("state", {})
131 # Create a state instance with a reference to the dict in which it should store info
132 self._state = State(self.scope["state"])
133 return self._state
134
135 def url_for(self, name: str, **path_params: typing.Any) -> str:
136 router = self.scope["router"]
137 url_path = router.url_path_for(name, **path_params)
138 return url_path.make_absolute_url(base_url=self.base_url)
139
140
141 async def empty_receive() -> Message:
142 raise RuntimeError("Receive channel has not been made available")
143
144
145 async def empty_send(message: Message) -> None:
146 raise RuntimeError("Send channel has not been made available")
147
148
149 class Request(HTTPConnection):
150 def __init__(
151 self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send
152 ):
153 super().__init__(scope)
154 assert scope["type"] == "http"
155 self._receive = receive
156 self._send = send
157 self._stream_consumed = False
158 self._is_disconnected = False
159
160 @property
161 def method(self) -> str:
162 return self.scope["method"]
163
164 @property
165 def receive(self) -> Receive:
166 return self._receive
167
168 async def stream(self) -> typing.AsyncGenerator[bytes, None]:
169 if hasattr(self, "_body"):
170 yield self._body
171 yield b""
172 return
173
174 if self._stream_consumed:
175 raise RuntimeError("Stream consumed")
176
177 self._stream_consumed = True
178 while True:
179 message = await self._receive()
180 if message["type"] == "http.request":
181 body = message.get("body", b"")
182 if body:
183 yield body
184 if not message.get("more_body", False):
185 break
186 elif message["type"] == "http.disconnect":
187 self._is_disconnected = True
188 raise ClientDisconnect()
189 yield b""
190
191 async def body(self) -> bytes:
192 if not hasattr(self, "_body"):
193 chunks = []
194 async for chunk in self.stream():
195 chunks.append(chunk)
196 self._body = b"".join(chunks)
197 return self._body
198
199 async def json(self) -> typing.Any:
200 if not hasattr(self, "_json"):
201 body = await self.body()
202 self._json = json.loads(body)
203 return self._json
204
205 async def form(self) -> FormData:
206 if not hasattr(self, "_form"):
207 assert (
208 parse_options_header is not None
209 ), "The `python-multipart` library must be installed to use form parsing."
210 content_type_header = self.headers.get("Content-Type")
211 content_type, options = parse_options_header(content_type_header)
212 if content_type == b"multipart/form-data":
213 multipart_parser = MultiPartParser(self.headers, self.stream())
214 self._form = await multipart_parser.parse()
215 elif content_type == b"application/x-www-form-urlencoded":
216 form_parser = FormParser(self.headers, self.stream())
217 self._form = await form_parser.parse()
218 else:
219 self._form = FormData()
220 return self._form
221
222 async def close(self) -> None:
223 if hasattr(self, "_form"):
224 await self._form.close()
225
226 async def is_disconnected(self) -> bool:
227 if not self._is_disconnected:
228 try:
229 message = await asyncio.wait_for(self._receive(), timeout=0.0000001)
230 except asyncio.TimeoutError:
231 message = {}
232
233 if message.get("type") == "http.disconnect":
234 self._is_disconnected = True
235
236 return self._is_disconnected
237
238 async def send_push_promise(self, path: str) -> None:
239 if "http.response.push" in self.scope.get("extensions", {}):
240 raw_headers = []
241 for name in SERVER_PUSH_HEADERS_TO_COPY:
242 for value in self.headers.getlist(name):
243 raw_headers.append(
244 (name.encode("latin-1"), value.encode("latin-1"))
245 )
246 await self._send(
247 {"type": "http.response.push", "path": path, "headers": raw_headers}
248 )
249
[end of starlette/requests.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/requests.py b/starlette/requests.py
--- a/starlette/requests.py
+++ b/starlette/requests.py
@@ -91,7 +91,10 @@
cookie_header = self.headers.get("cookie")
if cookie_header:
cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie
- cookie.load(cookie_header)
+ try:
+ cookie.load(cookie_header)
+ except http.cookies.CookieError:
+ pass
for key, morsel in cookie.items():
cookies[key] = morsel.value
self._cookies = cookies
| {"golden_diff": "diff --git a/starlette/requests.py b/starlette/requests.py\n--- a/starlette/requests.py\n+++ b/starlette/requests.py\n@@ -91,7 +91,10 @@\n cookie_header = self.headers.get(\"cookie\")\n if cookie_header:\n cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie\n- cookie.load(cookie_header)\n+ try:\n+ cookie.load(cookie_header)\n+ except http.cookies.CookieError:\n+ pass\n for key, morsel in cookie.items():\n cookies[key] = morsel.value\n self._cookies = cookies\n", "issue": "[bug] Invalid cookie name leads to exception\nWhen handling a request with an invalid cookie name (does not conform to RFC2109) starlette raises an exception. i.e iam/cookiename\r\nThis is because Starlette uses Python's stdlib cookie library, which is very strict.\r\nI do understand the strictness, but in real life scenarios you receive such malformed cookies and I want to handle those requests.\r\nMy suggestion for a solution would be to catch those exceptions and ignore the invalid cookie. \r\n** EDIT **\r\nI just realized stdlib is used for the whole cookie header, hence can't ignore only one cookie.\r\nI'll create a PR for ignoring the whole cookie on such case, but maybe we should create our own Cookie/Morsel class and override the methods to ignore such error in the inbound case?\n", "before_files": [{"content": "import asyncio\nimport http.cookies\nimport json\nimport typing\nfrom collections.abc import Mapping\n\nfrom starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State\nfrom starlette.formparsers import FormParser, MultiPartParser\nfrom starlette.types import Message, Receive, Scope, Send\n\ntry:\n from multipart.multipart import parse_options_header\nexcept ImportError: # pragma: nocover\n parse_options_header = None\n\n\nSERVER_PUSH_HEADERS_TO_COPY = {\n \"accept\",\n \"accept-encoding\",\n \"accept-language\",\n \"cache-control\",\n \"user-agent\",\n}\n\n\nclass ClientDisconnect(Exception):\n pass\n\n\nclass HTTPConnection(Mapping):\n \"\"\"\n A base class for incoming HTTP connections, that is used to provide\n any functionality that is common to both `Request` and `WebSocket`.\n \"\"\"\n\n def __init__(self, scope: Scope, receive: Receive = None) -> None:\n assert scope[\"type\"] in (\"http\", \"websocket\")\n self.scope = scope\n\n def __getitem__(self, key: str) -> str:\n return self.scope[key]\n\n def __iter__(self) -> typing.Iterator[str]:\n return iter(self.scope)\n\n def __len__(self) -> int:\n return len(self.scope)\n\n @property\n def app(self) -> typing.Any:\n return self.scope[\"app\"]\n\n @property\n def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n self._url = URL(scope=self.scope)\n return self._url\n\n @property\n def base_url(self) -> URL:\n if not hasattr(self, \"_base_url\"):\n base_url_scope = dict(self.scope)\n base_url_scope[\"path\"] = \"/\"\n base_url_scope[\"query_string\"] = b\"\"\n base_url_scope[\"root_path\"] = base_url_scope.get(\n \"app_root_path\", base_url_scope.get(\"root_path\", \"\")\n )\n self._base_url = URL(scope=base_url_scope)\n return self._base_url\n\n @property\n def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(scope=self.scope)\n return self._headers\n\n @property\n def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n self._query_params = QueryParams(self.scope[\"query_string\"])\n return self._query_params\n\n @property\n def path_params(self) -> dict:\n return self.scope.get(\"path_params\", {})\n\n @property\n def cookies(self) -> typing.Dict[str, str]:\n if not hasattr(self, \"_cookies\"):\n cookies = {}\n cookie_header = self.headers.get(\"cookie\")\n if cookie_header:\n cookie = http.cookies.SimpleCookie() # type: http.cookies.BaseCookie\n cookie.load(cookie_header)\n for key, morsel in cookie.items():\n cookies[key] = morsel.value\n self._cookies = cookies\n return self._cookies\n\n @property\n def client(self) -> Address:\n host, port = self.scope.get(\"client\") or (None, None)\n return Address(host=host, port=port)\n\n @property\n def session(self) -> dict:\n assert (\n \"session\" in self.scope\n ), \"SessionMiddleware must be installed to access request.session\"\n return self.scope[\"session\"]\n\n @property\n def auth(self) -> typing.Any:\n assert (\n \"auth\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.auth\"\n return self.scope[\"auth\"]\n\n @property\n def user(self) -> typing.Any:\n assert (\n \"user\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.user\"\n return self.scope[\"user\"]\n\n @property\n def state(self) -> State:\n if not hasattr(self, \"_state\"):\n # Ensure 'state' has an empty dict if it's not already populated.\n self.scope.setdefault(\"state\", {})\n # Create a state instance with a reference to the dict in which it should store info\n self._state = State(self.scope[\"state\"])\n return self._state\n\n def url_for(self, name: str, **path_params: typing.Any) -> str:\n router = self.scope[\"router\"]\n url_path = router.url_path_for(name, **path_params)\n return url_path.make_absolute_url(base_url=self.base_url)\n\n\nasync def empty_receive() -> Message:\n raise RuntimeError(\"Receive channel has not been made available\")\n\n\nasync def empty_send(message: Message) -> None:\n raise RuntimeError(\"Send channel has not been made available\")\n\n\nclass Request(HTTPConnection):\n def __init__(\n self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send\n ):\n super().__init__(scope)\n assert scope[\"type\"] == \"http\"\n self._receive = receive\n self._send = send\n self._stream_consumed = False\n self._is_disconnected = False\n\n @property\n def method(self) -> str:\n return self.scope[\"method\"]\n\n @property\n def receive(self) -> Receive:\n return self._receive\n\n async def stream(self) -> typing.AsyncGenerator[bytes, None]:\n if hasattr(self, \"_body\"):\n yield self._body\n yield b\"\"\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n elif message[\"type\"] == \"http.disconnect\":\n self._is_disconnected = True\n raise ClientDisconnect()\n yield b\"\"\n\n async def body(self) -> bytes:\n if not hasattr(self, \"_body\"):\n chunks = []\n async for chunk in self.stream():\n chunks.append(chunk)\n self._body = b\"\".join(chunks)\n return self._body\n\n async def json(self) -> typing.Any:\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n\n async def form(self) -> FormData:\n if not hasattr(self, \"_form\"):\n assert (\n parse_options_header is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n content_type_header = self.headers.get(\"Content-Type\")\n content_type, options = parse_options_header(content_type_header)\n if content_type == b\"multipart/form-data\":\n multipart_parser = MultiPartParser(self.headers, self.stream())\n self._form = await multipart_parser.parse()\n elif content_type == b\"application/x-www-form-urlencoded\":\n form_parser = FormParser(self.headers, self.stream())\n self._form = await form_parser.parse()\n else:\n self._form = FormData()\n return self._form\n\n async def close(self) -> None:\n if hasattr(self, \"_form\"):\n await self._form.close()\n\n async def is_disconnected(self) -> bool:\n if not self._is_disconnected:\n try:\n message = await asyncio.wait_for(self._receive(), timeout=0.0000001)\n except asyncio.TimeoutError:\n message = {}\n\n if message.get(\"type\") == \"http.disconnect\":\n self._is_disconnected = True\n\n return self._is_disconnected\n\n async def send_push_promise(self, path: str) -> None:\n if \"http.response.push\" in self.scope.get(\"extensions\", {}):\n raw_headers = []\n for name in SERVER_PUSH_HEADERS_TO_COPY:\n for value in self.headers.getlist(name):\n raw_headers.append(\n (name.encode(\"latin-1\"), value.encode(\"latin-1\"))\n )\n await self._send(\n {\"type\": \"http.response.push\", \"path\": path, \"headers\": raw_headers}\n )\n", "path": "starlette/requests.py"}]} | 3,101 | 135 |
gh_patches_debug_24475 | rasdani/github-patches | git_diff | pyca__cryptography-5022 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement env var for OpenSSL 1.0.1 support
As part of #4923 we need to add `CRYPTOGRAPHY_ALLOW_OPENSSL_101` and update CI to test using that.
</issue>
<code>
[start of src/cryptography/hazmat/bindings/openssl/binding.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import collections
8 import threading
9 import types
10 import warnings
11
12 import cryptography
13 from cryptography import utils
14 from cryptography.exceptions import InternalError
15 from cryptography.hazmat.bindings._openssl import ffi, lib
16 from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES
17
18 _OpenSSLErrorWithText = collections.namedtuple(
19 "_OpenSSLErrorWithText", ["code", "lib", "func", "reason", "reason_text"]
20 )
21
22
23 class _OpenSSLError(object):
24 def __init__(self, code, lib, func, reason):
25 self._code = code
26 self._lib = lib
27 self._func = func
28 self._reason = reason
29
30 def _lib_reason_match(self, lib, reason):
31 return lib == self.lib and reason == self.reason
32
33 code = utils.read_only_property("_code")
34 lib = utils.read_only_property("_lib")
35 func = utils.read_only_property("_func")
36 reason = utils.read_only_property("_reason")
37
38
39 def _consume_errors(lib):
40 errors = []
41 while True:
42 code = lib.ERR_get_error()
43 if code == 0:
44 break
45
46 err_lib = lib.ERR_GET_LIB(code)
47 err_func = lib.ERR_GET_FUNC(code)
48 err_reason = lib.ERR_GET_REASON(code)
49
50 errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))
51
52 return errors
53
54
55 def _openssl_assert(lib, ok):
56 if not ok:
57 errors = _consume_errors(lib)
58 errors_with_text = []
59 for err in errors:
60 buf = ffi.new("char[]", 256)
61 lib.ERR_error_string_n(err.code, buf, len(buf))
62 err_text_reason = ffi.string(buf)
63
64 errors_with_text.append(
65 _OpenSSLErrorWithText(
66 err.code, err.lib, err.func, err.reason, err_text_reason
67 )
68 )
69
70 raise InternalError(
71 "Unknown OpenSSL error. This error is commonly encountered when "
72 "another library is not cleaning up the OpenSSL error stack. If "
73 "you are using cryptography with another library that uses "
74 "OpenSSL try disabling it before reporting a bug. Otherwise "
75 "please file an issue at https://github.com/pyca/cryptography/"
76 "issues with information on how to reproduce "
77 "this. ({0!r})".format(errors_with_text),
78 errors_with_text
79 )
80
81
82 def build_conditional_library(lib, conditional_names):
83 conditional_lib = types.ModuleType("lib")
84 conditional_lib._original_lib = lib
85 excluded_names = set()
86 for condition, names_cb in conditional_names.items():
87 if not getattr(lib, condition):
88 excluded_names.update(names_cb())
89
90 for attr in dir(lib):
91 if attr not in excluded_names:
92 setattr(conditional_lib, attr, getattr(lib, attr))
93
94 return conditional_lib
95
96
97 class Binding(object):
98 """
99 OpenSSL API wrapper.
100 """
101 lib = None
102 ffi = ffi
103 _lib_loaded = False
104 _init_lock = threading.Lock()
105 _lock_init_lock = threading.Lock()
106
107 def __init__(self):
108 self._ensure_ffi_initialized()
109
110 @classmethod
111 def _register_osrandom_engine(cls):
112 # Clear any errors extant in the queue before we start. In many
113 # scenarios other things may be interacting with OpenSSL in the same
114 # process space and it has proven untenable to assume that they will
115 # reliably clear the error queue. Once we clear it here we will
116 # error on any subsequent unexpected item in the stack.
117 cls.lib.ERR_clear_error()
118 if cls.lib.Cryptography_HAS_ENGINE:
119 result = cls.lib.Cryptography_add_osrandom_engine()
120 _openssl_assert(cls.lib, result in (1, 2))
121
122 @classmethod
123 def _ensure_ffi_initialized(cls):
124 with cls._init_lock:
125 if not cls._lib_loaded:
126 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)
127 cls._lib_loaded = True
128 # initialize the SSL library
129 cls.lib.SSL_library_init()
130 # adds all ciphers/digests for EVP
131 cls.lib.OpenSSL_add_all_algorithms()
132 # loads error strings for libcrypto and libssl functions
133 cls.lib.SSL_load_error_strings()
134 cls._register_osrandom_engine()
135
136 @classmethod
137 def init_static_locks(cls):
138 with cls._lock_init_lock:
139 cls._ensure_ffi_initialized()
140 # Use Python's implementation if available, importing _ssl triggers
141 # the setup for this.
142 __import__("_ssl")
143
144 if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or
145 cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):
146 return
147
148 # If nothing else has setup a locking callback already, we set up
149 # our own
150 res = lib.Cryptography_setup_ssl_threads()
151 _openssl_assert(cls.lib, res == 1)
152
153
154 def _verify_openssl_version(lib):
155 if (
156 lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and
157 not lib.CRYPTOGRAPHY_IS_LIBRESSL
158 ):
159 warnings.warn(
160 "OpenSSL version 1.0.1 is no longer supported by the OpenSSL "
161 "project, please upgrade. The next version of cryptography will "
162 "drop support for it.",
163 utils.CryptographyDeprecationWarning
164 )
165
166
167 def _verify_package_version(version):
168 # Occasionally we run into situations where the version of the Python
169 # package does not match the version of the shared object that is loaded.
170 # This may occur in environments where multiple versions of cryptography
171 # are installed and available in the python path. To avoid errors cropping
172 # up later this code checks that the currently imported package and the
173 # shared object that were loaded have the same version and raise an
174 # ImportError if they do not
175 so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)
176 if version.encode("ascii") != so_package_version:
177 raise ImportError(
178 "The version of cryptography does not match the loaded "
179 "shared object. This can happen if you have multiple copies of "
180 "cryptography installed in your Python path. Please try creating "
181 "a new virtual environment to resolve this issue. "
182 "Loaded python version: {}, shared object version: {}".format(
183 version, so_package_version
184 )
185 )
186
187
188 _verify_package_version(cryptography.__version__)
189
190 # OpenSSL is not thread safe until the locks are initialized. We call this
191 # method in module scope so that it executes with the import lock. On
192 # Pythons < 3.4 this import lock is a global lock, which can prevent a race
193 # condition registering the OpenSSL locks. On Python 3.4+ the import lock
194 # is per module so this approach will not work.
195 Binding.init_static_locks()
196
197 _verify_openssl_version(Binding.lib)
198
[end of src/cryptography/hazmat/bindings/openssl/binding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py
--- a/src/cryptography/hazmat/bindings/openssl/binding.py
+++ b/src/cryptography/hazmat/bindings/openssl/binding.py
@@ -5,6 +5,7 @@
from __future__ import absolute_import, division, print_function
import collections
+import os
import threading
import types
import warnings
@@ -156,12 +157,19 @@
lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and
not lib.CRYPTOGRAPHY_IS_LIBRESSL
):
- warnings.warn(
- "OpenSSL version 1.0.1 is no longer supported by the OpenSSL "
- "project, please upgrade. The next version of cryptography will "
- "drop support for it.",
- utils.CryptographyDeprecationWarning
- )
+ if os.environ.get("CRYPTOGRAPHY_ALLOW_OPENSSL_101"):
+ warnings.warn(
+ "OpenSSL version 1.0.1 is no longer supported by the OpenSSL "
+ "project, please upgrade. The next version of cryptography "
+ "will completely remove support for it.",
+ utils.CryptographyDeprecationWarning
+ )
+ else:
+ raise RuntimeError(
+ "You are linking against OpenSSL 1.0.1, which is no longer "
+ "supported by the OpenSSL project. You need to upgrade to a "
+ "newer version of OpenSSL."
+ )
def _verify_package_version(version):
| {"golden_diff": "diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py\n--- a/src/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/src/cryptography/hazmat/bindings/openssl/binding.py\n@@ -5,6 +5,7 @@\n from __future__ import absolute_import, division, print_function\n \n import collections\n+import os\n import threading\n import types\n import warnings\n@@ -156,12 +157,19 @@\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and\n not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n- warnings.warn(\n- \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n- \"project, please upgrade. The next version of cryptography will \"\n- \"drop support for it.\",\n- utils.CryptographyDeprecationWarning\n- )\n+ if os.environ.get(\"CRYPTOGRAPHY_ALLOW_OPENSSL_101\"):\n+ warnings.warn(\n+ \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n+ \"project, please upgrade. The next version of cryptography \"\n+ \"will completely remove support for it.\",\n+ utils.CryptographyDeprecationWarning\n+ )\n+ else:\n+ raise RuntimeError(\n+ \"You are linking against OpenSSL 1.0.1, which is no longer \"\n+ \"supported by the OpenSSL project. You need to upgrade to a \"\n+ \"newer version of OpenSSL.\"\n+ )\n \n \n def _verify_package_version(version):\n", "issue": "Implement env var for OpenSSL 1.0.1 support\nAs part of #4923 we need to add `CRYPTOGRAPHY_ALLOW_OPENSSL_101` and update CI to test using that.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport threading\nimport types\nimport warnings\n\nimport cryptography\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\nclass _OpenSSLError(object):\n def __init__(self, code, lib, func, reason):\n self._code = code\n self._lib = lib\n self._func = func\n self._reason = reason\n\n def _lib_reason_match(self, lib, reason):\n return lib == self.lib and reason == self.reason\n\n code = utils.read_only_property(\"_code\")\n lib = utils.read_only_property(\"_lib\")\n func = utils.read_only_property(\"_func\")\n reason = utils.read_only_property(\"_reason\")\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _openssl_assert(lib, ok):\n if not ok:\n errors = _consume_errors(lib)\n errors_with_text = []\n for err in errors:\n buf = ffi.new(\"char[]\", 256)\n lib.ERR_error_string_n(err.code, buf, len(buf))\n err_text_reason = ffi.string(buf)\n\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text\n )\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n conditional_lib._original_lib = lib\n excluded_names = set()\n for condition, names_cb in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names.update(names_cb())\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n lib = None\n ffi = ffi\n _lib_loaded = False\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n # Clear any errors extant in the queue before we start. In many\n # scenarios other things may be interacting with OpenSSL in the same\n # process space and it has proven untenable to assume that they will\n # reliably clear the error queue. Once we clear it here we will\n # error on any subsequent unexpected item in the stack.\n cls.lib.ERR_clear_error()\n if cls.lib.Cryptography_HAS_ENGINE:\n result = cls.lib.Cryptography_add_osrandom_engine()\n _openssl_assert(cls.lib, result in (1, 2))\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or\n cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n res = lib.Cryptography_setup_ssl_threads()\n _openssl_assert(cls.lib, res == 1)\n\n\ndef _verify_openssl_version(lib):\n if (\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and\n not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n warnings.warn(\n \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n \"project, please upgrade. The next version of cryptography will \"\n \"drop support for it.\",\n utils.CryptographyDeprecationWarning\n )\n\n\ndef _verify_package_version(version):\n # Occasionally we run into situations where the version of the Python\n # package does not match the version of the shared object that is loaded.\n # This may occur in environments where multiple versions of cryptography\n # are installed and available in the python path. To avoid errors cropping\n # up later this code checks that the currently imported package and the\n # shared object that were loaded have the same version and raise an\n # ImportError if they do not\n so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)\n if version.encode(\"ascii\") != so_package_version:\n raise ImportError(\n \"The version of cryptography does not match the loaded \"\n \"shared object. This can happen if you have multiple copies of \"\n \"cryptography installed in your Python path. Please try creating \"\n \"a new virtual environment to resolve this issue. \"\n \"Loaded python version: {}, shared object version: {}\".format(\n version, so_package_version\n )\n )\n\n\n_verify_package_version(cryptography.__version__)\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n\n_verify_openssl_version(Binding.lib)\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}]} | 2,629 | 353 |
gh_patches_debug_15037 | rasdani/github-patches | git_diff | azavea__raster-vision-1464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch from Gitter to another tool for usage questions
## 🚀 Feature
Currently we use [Gitter](https://gitter.im/azavea/raster-vision) for usage questions, but it's not well designed for this use case. I would like to switch to [Github Discussions](https://github.com/azavea/raster-vision/discussions/landing), [Stack Overflow](https://stackoverflow.com/), or [Discourse](https://www.discourse.org/).
## Motivation
Gitter is designed more as a chat system, and we need something where people can make posts that can be open/closed, and later searchable to avoid repeat questions.
## Pitch
We should spend a small amount of time comparing the different options, make a decision, and then implement the change. Github Discussions seems like the most straightforward option.
</issue>
<code>
[start of docs/conf.py]
1 # flake8: noqa
2
3 from pallets_sphinx_themes import ProjectLink, get_version
4
5 # -*- coding: utf-8 -*-
6 #
7 # Configuration file for the Sphinx documentation builder.
8 #
9 # This file does only contain a selection of the most common options. For a
10 # full list see the documentation:
11 # http://www.sphinx-doc.org/en/stable/config
12
13 # -- Path setup --------------------------------------------------------------
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19 # import os
20 # import sys
21 # sys.path.insert(0, os.path.abspath('.'))
22
23 # -- Project information -----------------------------------------------------
24
25 project = 'Raster Vision'
26 copyright = '2018, Azavea'
27 author = 'Azavea'
28
29 # The short X.Y version
30 version = '0.13'
31 # The full version, including alpha/beta/rc tags
32 release = '0.13.1'
33
34 # -- General configuration ---------------------------------------------------
35
36 # If your documentation needs a minimal Sphinx version, state it here.
37 #
38 # needs_sphinx = '1.0'
39
40 # Add any Sphinx extension module names here, as strings. They can be
41 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
42 # ones.
43 extensions = [
44 'sphinx.ext.autodoc',
45 'sphinx.ext.intersphinx',
46 'pallets_sphinx_themes',
47 'sphinx.ext.napoleon',
48 ]
49
50 # https://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
51 import sys
52 from unittest.mock import MagicMock
53
54
55 class Mock(MagicMock):
56 @classmethod
57 def __getattr__(cls, name):
58 return MagicMock()
59
60
61 MOCK_MODULES = ['pyproj', 'h5py', 'osgeo', 'mask_to_polygons']
62 sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
63
64 autodoc_mock_imports = ['torch', 'torchvision', 'pycocotools']
65
66 intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}
67
68 # Add any paths that contain templates here, relative to this directory.
69 templates_path = ['_templates']
70
71 # The suffix(es) of source filenames.
72 # You can specify multiple suffix as a list of string:
73 #
74 # source_suffix = ['.rst', '.md']
75 source_suffix = '.rst'
76
77 # The master toctree document.
78 master_doc = 'index'
79
80 # The language for content autogenerated by Sphinx. Refer to documentation
81 # for a list of supported languages.
82 #
83 # This is also used if you do content translation via gettext catalogs.
84 # Usually you set "language" from the command line for these cases.
85 language = None
86
87 # List of patterns, relative to source directory, that match files and
88 # directories to ignore when looking for source files.
89 # This pattern also affects html_static_path and html_extra_path .
90 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']
91
92 # The name of the Pygments (syntax highlighting) style to use.
93 # pygments_style = 'sphinx'
94
95 # HTML -----------------------------------------------------------------
96
97 html_theme = 'click'
98 html_theme_options = {'index_sidebar_logo': False}
99 html_context = {
100 'project_links': [
101 ProjectLink('Quickstart', 'quickstart.html'),
102 ProjectLink('Documentation TOC', 'index.html#documentation'),
103 ProjectLink('Examples', 'examples.html'),
104 ProjectLink('Config API Reference', 'index.html#api'),
105 ProjectLink('AWS Batch Setup', 'cloudformation.html'),
106 ProjectLink('Project Website', 'https://rastervision.io/'),
107 ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),
108 ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),
109 ProjectLink('Gitter Channel',
110 'https://gitter.im/azavea/raster-vision'),
111 ProjectLink('Issue Tracker',
112 'https://github.com/azavea/raster-vision/issues/'),
113 ProjectLink('CHANGELOG', 'changelog.html'),
114 ProjectLink('Azavea', 'https://www.azavea.com/'),
115 ],
116 'css_files': [
117 '_static/rastervision.css',
118 'https://media.readthedocs.org/css/badge_only.css'
119 ]
120 }
121 html_sidebars = {
122 'index': ['project.html', 'versions.html', 'searchbox.html'],
123 '**': [
124 'project.html', 'localtoc.html', 'relations.html', 'versions.html',
125 'searchbox.html'
126 ],
127 }
128 singlehtml_sidebars = {
129 'index': ['project.html', 'versions.html', 'localtoc.html']
130 }
131 html_static_path = ['_static']
132 html_favicon = 'img/raster-vision-icon.png'
133 html_logo = 'img/raster-vision-logo.png'
134 html_title = 'Raster Vision Documentation ({})'.format(version)
135 html_show_sourcelink = False
136 html_domain_indices = False
137 html_experimental_html5_writer = True
138
139 # -- Options for HTMLHelp output ---------------------------------------------
140
141 # Output file base name for HTML help builder.
142 htmlhelp_basename = 'RasterVisiondoc'
143
144 # -- Options for LaTeX output ------------------------------------------------
145
146 latex_elements = {
147 # The paper size ('letterpaper' or 'a4paper').
148 #
149 # 'papersize': 'letterpaper',
150
151 # The font size ('10pt', '11pt' or '12pt').
152 #
153 # 'pointsize': '10pt',
154
155 # Additional stuff for the LaTeX preamble.
156 #
157 # 'preamble': '',
158
159 # Latex figure (float) alignment
160 #
161 # 'figure_align': 'htbp',
162 }
163
164 # Grouping the document tree into LaTeX files. List of tuples
165 # (source start file, target name, title,
166 # author, documentclass [howto, manual, or own class]).
167 latex_documents = [
168 (master_doc, 'RasterVision.tex', 'Raster Vision Documentation', 'Azavea',
169 'manual'),
170 ]
171
172 # -- Options for manual page output ------------------------------------------
173
174 # One entry per manual page. List of tuples
175 # (source start file, name, description, authors, manual section).
176 man_pages = [(master_doc, 'RasterVisoin-{}.tex', html_title, [author],
177 'manual')]
178
179 # -- Options for Texinfo output ----------------------------------------------
180
181 # Grouping the document tree into Texinfo files. List of tuples
182 # (source start file, target name, title, author,
183 # dir menu entry, description, category)
184 texinfo_documents = [
185 (master_doc, 'RasterVision', 'Raster Vision Documentation', author,
186 'RasterVision', 'One line description of project.', 'Miscellaneous'),
187 ]
188
189 # -- Extension configuration -------------------------------------------------
190
191 programoutput_prompt_template = '> {command}\n{output}'
192
193 # -- Options for todo extension ----------------------------------------------
194
195 # If true, `todo` and `todoList` produce output, else they produce nothing.
196 todo_include_todos = True
197
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -106,8 +106,8 @@
ProjectLink('Project Website', 'https://rastervision.io/'),
ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),
ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),
- ProjectLink('Gitter Channel',
- 'https://gitter.im/azavea/raster-vision'),
+ ProjectLink('Discussion Forum',
+ 'https://github.com/azavea/raster-vision/discussions'),
ProjectLink('Issue Tracker',
'https://github.com/azavea/raster-vision/issues/'),
ProjectLink('CHANGELOG', 'changelog.html'),
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -106,8 +106,8 @@\n ProjectLink('Project Website', 'https://rastervision.io/'),\n ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),\n ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),\n- ProjectLink('Gitter Channel',\n- 'https://gitter.im/azavea/raster-vision'),\n+ ProjectLink('Discussion Forum',\n+ 'https://github.com/azavea/raster-vision/discussions'),\n ProjectLink('Issue Tracker',\n 'https://github.com/azavea/raster-vision/issues/'),\n ProjectLink('CHANGELOG', 'changelog.html'),\n", "issue": "Switch from Gitter to another tool for usage questions\n## \ud83d\ude80 Feature\r\n\r\nCurrently we use [Gitter](https://gitter.im/azavea/raster-vision) for usage questions, but it's not well designed for this use case. I would like to switch to [Github Discussions](https://github.com/azavea/raster-vision/discussions/landing), [Stack Overflow](https://stackoverflow.com/), or [Discourse](https://www.discourse.org/). \r\n\r\n## Motivation\r\n\r\nGitter is designed more as a chat system, and we need something where people can make posts that can be open/closed, and later searchable to avoid repeat questions.\r\n\r\n## Pitch\r\n\r\nWe should spend a small amount of time comparing the different options, make a decision, and then implement the change. Github Discussions seems like the most straightforward option.\r\n\r\n\n", "before_files": [{"content": "# flake8: noqa\n\nfrom pallets_sphinx_themes import ProjectLink, get_version\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Raster Vision'\ncopyright = '2018, Azavea'\nauthor = 'Azavea'\n\n# The short X.Y version\nversion = '0.13'\n# The full version, including alpha/beta/rc tags\nrelease = '0.13.1'\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'pallets_sphinx_themes',\n 'sphinx.ext.napoleon',\n]\n\n# https://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules\nimport sys\nfrom unittest.mock import MagicMock\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return MagicMock()\n\n\nMOCK_MODULES = ['pyproj', 'h5py', 'osgeo', 'mask_to_polygons']\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\nautodoc_mock_imports = ['torch', 'torchvision', 'pycocotools']\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'sphinx'\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = 'click'\nhtml_theme_options = {'index_sidebar_logo': False}\nhtml_context = {\n 'project_links': [\n ProjectLink('Quickstart', 'quickstart.html'),\n ProjectLink('Documentation TOC', 'index.html#documentation'),\n ProjectLink('Examples', 'examples.html'),\n ProjectLink('Config API Reference', 'index.html#api'),\n ProjectLink('AWS Batch Setup', 'cloudformation.html'),\n ProjectLink('Project Website', 'https://rastervision.io/'),\n ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),\n ProjectLink('GitHub Repo', 'https://github.com/azavea/raster-vision'),\n ProjectLink('Gitter Channel',\n 'https://gitter.im/azavea/raster-vision'),\n ProjectLink('Issue Tracker',\n 'https://github.com/azavea/raster-vision/issues/'),\n ProjectLink('CHANGELOG', 'changelog.html'),\n ProjectLink('Azavea', 'https://www.azavea.com/'),\n ],\n 'css_files': [\n '_static/rastervision.css',\n 'https://media.readthedocs.org/css/badge_only.css'\n ]\n}\nhtml_sidebars = {\n 'index': ['project.html', 'versions.html', 'searchbox.html'],\n '**': [\n 'project.html', 'localtoc.html', 'relations.html', 'versions.html',\n 'searchbox.html'\n ],\n}\nsinglehtml_sidebars = {\n 'index': ['project.html', 'versions.html', 'localtoc.html']\n}\nhtml_static_path = ['_static']\nhtml_favicon = 'img/raster-vision-icon.png'\nhtml_logo = 'img/raster-vision-logo.png'\nhtml_title = 'Raster Vision Documentation ({})'.format(version)\nhtml_show_sourcelink = False\nhtml_domain_indices = False\nhtml_experimental_html5_writer = True\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'RasterVisiondoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'RasterVision.tex', 'Raster Vision Documentation', 'Azavea',\n 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'RasterVisoin-{}.tex', html_title, [author],\n 'manual')]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'RasterVision', 'Raster Vision Documentation', author,\n 'RasterVision', 'One line description of project.', 'Miscellaneous'),\n]\n\n# -- Extension configuration -------------------------------------------------\n\nprogramoutput_prompt_template = '> {command}\\n{output}'\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}]} | 2,739 | 185 |
gh_patches_debug_17267 | rasdani/github-patches | git_diff | pulp__pulpcore-239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix bug where Last-Modified header was being updated on duplicate package uploads
Fixes a bug where the Last-Modified header of a package stored in django-storages was being updated on duplicate uploads.
Closes #5149
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3 with open('README.md') as f:
4 long_description = f.read()
5
6 requirements = [
7 'coreapi',
8 'Django~=2.2', # LTS version, switch only if we have a compelling reason to
9 'django-filter',
10 'djangorestframework',
11 'djangorestframework-queryfields',
12 'drf-nested-routers',
13 'drf-yasg',
14 'gunicorn',
15 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
16 'PyYAML',
17 'rq~=1.0',
18 'redis<3.2.0',
19 'setuptools',
20 'dynaconf~=2.0',
21 'whitenoise',
22 ]
23
24 setup(
25 name='pulpcore',
26 version='3.0.0rc5.dev',
27 description='Pulp Django Application and Related Modules',
28 long_description=long_description,
29 long_description_content_type="text/markdown",
30 license='GPLv2+',
31 packages=find_packages(exclude=['test']),
32 author='Pulp Team',
33 author_email='[email protected]',
34 url='http://www.pulpproject.org',
35 python_requires='>=3.6',
36 install_requires=requirements,
37 extras_require={
38 'postgres': ['psycopg2-binary'],
39 'mysql': ['mysqlclient']
40 },
41 include_package_data=True,
42 classifiers=(
43 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
44 'Operating System :: POSIX :: Linux',
45 'Development Status :: 4 - Beta',
46 'Framework :: Django',
47 'Programming Language :: Python',
48 'Programming Language :: Python :: 3',
49 'Programming Language :: Python :: 3.6',
50 'Programming Language :: Python :: 3.7',
51 ),
52 scripts=['bin/pulp-content'],
53 )
54
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,21 +4,21 @@
long_description = f.read()
requirements = [
- 'coreapi',
- 'Django~=2.2', # LTS version, switch only if we have a compelling reason to
- 'django-filter',
- 'djangorestframework',
- 'djangorestframework-queryfields',
- 'drf-nested-routers',
- 'drf-yasg',
- 'gunicorn',
+ 'coreapi~=2.3.3',
+ 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to
+ 'django-filter~=2.2.0',
+ 'djangorestframework~=3.10.2',
+ 'djangorestframework-queryfields~=1.0.0',
+ 'drf-nested-routers~=0.91.0',
+ 'drf-yasg~=1.16.1',
+ 'gunicorn~=19.9.0',
'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
- 'PyYAML',
- 'rq~=1.0',
- 'redis<3.2.0',
- 'setuptools',
- 'dynaconf~=2.0',
- 'whitenoise',
+ 'PyYAML~=5.1.1',
+ 'rq~=1.1.0',
+ 'redis~=3.1.0',
+ 'setuptools~=41.0.1',
+ 'dynaconf~=2.0.3',
+ 'whitenoise~=4.1.3',
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,21 +4,21 @@\n long_description = f.read()\n \n requirements = [\n- 'coreapi',\n- 'Django~=2.2', # LTS version, switch only if we have a compelling reason to\n- 'django-filter',\n- 'djangorestframework',\n- 'djangorestframework-queryfields',\n- 'drf-nested-routers',\n- 'drf-yasg',\n- 'gunicorn',\n+ 'coreapi~=2.3.3',\n+ 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to\n+ 'django-filter~=2.2.0',\n+ 'djangorestframework~=3.10.2',\n+ 'djangorestframework-queryfields~=1.0.0',\n+ 'drf-nested-routers~=0.91.0',\n+ 'drf-yasg~=1.16.1',\n+ 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n- 'PyYAML',\n- 'rq~=1.0',\n- 'redis<3.2.0',\n- 'setuptools',\n- 'dynaconf~=2.0',\n- 'whitenoise',\n+ 'PyYAML~=5.1.1',\n+ 'rq~=1.1.0',\n+ 'redis~=3.1.0',\n+ 'setuptools~=41.0.1',\n+ 'dynaconf~=2.0.3',\n+ 'whitenoise~=4.1.3',\n ]\n \n setup(\n", "issue": "Fix bug where Last-Modified header was being updated on duplicate package uploads\nFixes a bug where the Last-Modified header of a package stored in django-storages was being updated on duplicate uploads.\r\n\r\nCloses #5149\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nrequirements = [\n 'coreapi',\n 'Django~=2.2', # LTS version, switch only if we have a compelling reason to\n 'django-filter',\n 'djangorestframework',\n 'djangorestframework-queryfields',\n 'drf-nested-routers',\n 'drf-yasg',\n 'gunicorn',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n 'PyYAML',\n 'rq~=1.0',\n 'redis<3.2.0',\n 'setuptools',\n 'dynaconf~=2.0',\n 'whitenoise',\n]\n\nsetup(\n name='pulpcore',\n version='3.0.0rc5.dev',\n description='Pulp Django Application and Related Modules',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='GPLv2+',\n packages=find_packages(exclude=['test']),\n author='Pulp Team',\n author_email='[email protected]',\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n extras_require={\n 'postgres': ['psycopg2-binary'],\n 'mysql': ['mysqlclient']\n },\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Operating System :: POSIX :: Linux',\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ),\n scripts=['bin/pulp-content'],\n)\n", "path": "setup.py"}]} | 1,113 | 425 |
gh_patches_debug_8118 | rasdani/github-patches | git_diff | arviz-devs__arviz-1566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add an examples of coords argument in documentation
**Describe the bug**
There's this fancy coords argument but not a great explanation on how to use it in the code examples below
https://arviz-devs.github.io/arviz/api/generated/arviz.plot_posterior.html

**To Reproduce**
Look at the doc, notice the lack of coords documentation in the code example
**Expected behavior**
Theres an example in the code below
</issue>
<code>
[start of arviz/plots/posteriorplot.py]
1 """Plot posterior densities."""
2 from ..data import convert_to_dataset
3 from ..rcparams import rcParams
4 from ..utils import _var_names, get_coords
5 from .plot_utils import default_grid, filter_plotters_list, get_plotting_function, xarray_var_iter
6
7
8 def plot_posterior(
9 data,
10 var_names=None,
11 filter_vars=None,
12 transform=None,
13 coords=None,
14 grid=None,
15 figsize=None,
16 textsize=None,
17 hdi_prob=None,
18 multimodal=False,
19 skipna=False,
20 round_to=None,
21 point_estimate="auto",
22 group="posterior",
23 rope=None,
24 ref_val=None,
25 kind="kde",
26 bw="default",
27 circular=False,
28 bins=None,
29 ax=None,
30 backend=None,
31 backend_kwargs=None,
32 show=None,
33 **kwargs
34 ):
35 """Plot Posterior densities in the style of John K. Kruschke's book.
36
37 Parameters
38 ----------
39 data: obj
40 Any object that can be converted to an az.InferenceData object
41 Refer to documentation of az.convert_to_dataset for details
42 var_names: list of variable names
43 Variables to be plotted, two variables are required. Prefix the variables by `~`
44 when you want to exclude them from the plot.
45 filter_vars: {None, "like", "regex"}, optional, default=None
46 If `None` (default), interpret var_names as the real variables names. If "like",
47 interpret var_names as substrings of the real variables names. If "regex",
48 interpret var_names as regular expressions on the real variables names. A la
49 `pandas.filter`.
50 transform: callable
51 Function to transform data (defaults to None i.e.the identity function)
52 coords: mapping, optional
53 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
54 grid : tuple
55 Number of rows and columns. Defaults to None, the rows and columns are
56 automatically inferred.
57 figsize: tuple
58 Figure size. If None it will be defined automatically.
59 textsize: float
60 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
61 on figsize.
62 hdi_prob: float, optional
63 Plots highest density interval for chosen percentage of density.
64 Use 'hide' to hide the highest density interval. Defaults to 0.94.
65 multimodal: bool
66 If true (default) it may compute more than one credible interval if the distribution is
67 multimodal and the modes are well separated.
68 skipna : bool
69 If true ignores nan values when computing the hdi and point estimates. Defaults to false.
70 round_to: int, optional
71 Controls formatting of floats. Defaults to 2 or the integer part, whichever is bigger.
72 point_estimate: Optional[str]
73 Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.
74 Defaults to 'auto' i.e. it falls back to default set in rcParams.
75 group: str, optional
76 Specifies which InferenceData group should be plotted. Defaults to ‘posterior’.
77 rope: tuple or dictionary of tuples
78 Lower and upper values of the Region Of Practical Equivalence. If a list is provided, its
79 length should match the number of variables.
80 ref_val: float or dictionary of floats
81 display the percentage below and above the values in ref_val. Must be None (default),
82 a constant, a list or a dictionary like see an example below. If a list is provided, its
83 length should match the number of variables.
84 kind: str
85 Type of plot to display (kde or hist) For discrete variables this argument is ignored and
86 a histogram is always used.
87 bw: float or str, optional
88 If numeric, indicates the bandwidth and must be positive.
89 If str, indicates the method to estimate the bandwidth and must be
90 one of "scott", "silverman", "isj" or "experimental" when `circular` is False
91 and "taylor" (for now) when `circular` is True.
92 Defaults to "default" which means "experimental" when variable is not circular
93 and "taylor" when it is. Only works if `kind == kde`.
94 circular: bool, optional
95 If True, it interprets the values passed are from a circular variable measured in radians
96 and a circular KDE is used. Only valid for 1D KDE. Defaults to False.
97 Only works if `kind == kde`.
98 bins: integer or sequence or 'auto', optional
99 Controls the number of bins, accepts the same keywords `matplotlib.hist()` does. Only works
100 if `kind == hist`. If None (default) it will use `auto` for continuous variables and
101 `range(xmin, xmax + 1)` for discrete variables.
102 ax: numpy array-like of matplotlib axes or bokeh figures, optional
103 A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
104 its own array of plot areas (and return it).
105 backend: str, optional
106 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
107 backend_kwargs: bool, optional
108 These are kwargs specific to the backend being used. For additional documentation
109 check the plotting method of the backend.
110 show: bool, optional
111 Call backend show function.
112 **kwargs
113 Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.
114
115 Returns
116 -------
117 axes: matplotlib axes or bokeh figures
118
119 Examples
120 --------
121 Show a default kernel density plot following style of John Kruschke
122
123 .. plot::
124 :context: close-figs
125
126 >>> import arviz as az
127 >>> data = az.load_arviz_data('centered_eight')
128 >>> az.plot_posterior(data)
129
130 Plot subset variables by specifying variable name exactly
131
132 .. plot::
133 :context: close-figs
134
135 >>> az.plot_posterior(data, var_names=['mu'])
136
137 Plot Region of Practical Equivalence (rope) and select variables with regular expressions
138
139 .. plot::
140 :context: close-figs
141
142 >>> az.plot_posterior(data, var_names=['mu', '^the'], filter_vars="regex", rope=(-1, 1))
143
144 Plot Region of Practical Equivalence for selected distributions
145
146 .. plot::
147 :context: close-figs
148
149 >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}
150 >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)
151
152
153 Add reference lines
154
155 .. plot::
156 :context: close-figs
157
158 >>> az.plot_posterior(data, var_names=['mu', 'theta'], ref_val=0)
159
160 Show point estimate of distribution
161
162 .. plot::
163 :context: close-figs
164
165 >>> az.plot_posterior(data, var_names=['mu', 'theta'], point_estimate='mode')
166
167 Show reference values using variable names and coordinates
168
169 .. plot::
170 :context: close-figs
171
172 >>> az.plot_posterior(data, ref_val= {"theta": [{"school": "Deerfield", "ref_val": 4},
173 ... {"school": "Choate", "ref_val": 3}]})
174
175 Show reference values using a list
176
177 .. plot::
178 :context: close-figs
179
180 >>> az.plot_posterior(data, ref_val=[1] + [5] * 8 + [1])
181
182
183 Plot posterior as a histogram
184
185 .. plot::
186 :context: close-figs
187
188 >>> az.plot_posterior(data, var_names=['mu'], kind='hist')
189
190 Change size of highest density interval
191
192 .. plot::
193 :context: close-figs
194
195 >>> az.plot_posterior(data, var_names=['mu'], hdi_prob=.75)
196 """
197 data = convert_to_dataset(data, group=group)
198 if transform is not None:
199 data = transform(data)
200 var_names = _var_names(var_names, data, filter_vars)
201
202 if coords is None:
203 coords = {}
204
205 if hdi_prob is None:
206 hdi_prob = rcParams["stats.hdi_prob"]
207 elif hdi_prob not in (None, "hide"):
208 if not 1 >= hdi_prob > 0:
209 raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
210
211 if point_estimate == "auto":
212 point_estimate = rcParams["plot.point_estimate"]
213 elif point_estimate not in {"mean", "median", "mode", None}:
214 raise ValueError("The value of point_estimate must be either mean, median, mode or None.")
215
216 plotters = filter_plotters_list(
217 list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),
218 "plot_posterior",
219 )
220 length_plotters = len(plotters)
221 rows, cols = default_grid(length_plotters, grid=grid)
222
223 posteriorplot_kwargs = dict(
224 ax=ax,
225 length_plotters=length_plotters,
226 rows=rows,
227 cols=cols,
228 figsize=figsize,
229 plotters=plotters,
230 bw=bw,
231 circular=circular,
232 bins=bins,
233 kind=kind,
234 point_estimate=point_estimate,
235 round_to=round_to,
236 hdi_prob=hdi_prob,
237 multimodal=multimodal,
238 skipna=skipna,
239 textsize=textsize,
240 ref_val=ref_val,
241 rope=rope,
242 kwargs=kwargs,
243 backend_kwargs=backend_kwargs,
244 show=show,
245 )
246
247 if backend is None:
248 backend = rcParams["plot.backend"]
249 backend = backend.lower()
250
251 # TODO: Add backend kwargs
252 plot = get_plotting_function("plot_posterior", "posteriorplot", backend)
253 ax = plot(**posteriorplot_kwargs)
254 return ax
255
[end of arviz/plots/posteriorplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/arviz/plots/posteriorplot.py b/arviz/plots/posteriorplot.py
--- a/arviz/plots/posteriorplot.py
+++ b/arviz/plots/posteriorplot.py
@@ -149,6 +149,13 @@
>>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}
>>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)
+ Using `coords` argument to plot only a subset of data
+
+ .. plot::
+ :context: close-figs
+
+ >>> coords = {"school": ["Choate","Phillips Exeter"]}
+ >>> az.plot_posterior(data, var_names=["mu", "theta"], coords=coords)
Add reference lines
| {"golden_diff": "diff --git a/arviz/plots/posteriorplot.py b/arviz/plots/posteriorplot.py\n--- a/arviz/plots/posteriorplot.py\n+++ b/arviz/plots/posteriorplot.py\n@@ -149,6 +149,13 @@\n >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)\n \n+ Using `coords` argument to plot only a subset of data\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> coords = {\"school\": [\"Choate\",\"Phillips Exeter\"]}\n+ >>> az.plot_posterior(data, var_names=[\"mu\", \"theta\"], coords=coords)\n \n Add reference lines\n", "issue": "Add an examples of coords argument in documentation\n**Describe the bug**\r\nThere's this fancy coords argument but not a great explanation on how to use it in the code examples below\r\nhttps://arviz-devs.github.io/arviz/api/generated/arviz.plot_posterior.html\r\n\r\n\r\n\r\n\r\n**To Reproduce**\r\nLook at the doc, notice the lack of coords documentation in the code example\r\n\r\n**Expected behavior**\r\nTheres an example in the code below\r\n\n", "before_files": [{"content": "\"\"\"Plot posterior densities.\"\"\"\nfrom ..data import convert_to_dataset\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\nfrom .plot_utils import default_grid, filter_plotters_list, get_plotting_function, xarray_var_iter\n\n\ndef plot_posterior(\n data,\n var_names=None,\n filter_vars=None,\n transform=None,\n coords=None,\n grid=None,\n figsize=None,\n textsize=None,\n hdi_prob=None,\n multimodal=False,\n skipna=False,\n round_to=None,\n point_estimate=\"auto\",\n group=\"posterior\",\n rope=None,\n ref_val=None,\n kind=\"kde\",\n bw=\"default\",\n circular=False,\n bins=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n **kwargs\n):\n \"\"\"Plot Posterior densities in the style of John K. Kruschke's book.\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names: list of variable names\n Variables to be plotted, two variables are required. Prefix the variables by `~`\n when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n `pandas.filter`.\n transform: callable\n Function to transform data (defaults to None i.e.the identity function)\n coords: mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n grid : tuple\n Number of rows and columns. Defaults to None, the rows and columns are\n automatically inferred.\n figsize: tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n hdi_prob: float, optional\n Plots highest density interval for chosen percentage of density.\n Use 'hide' to hide the highest density interval. Defaults to 0.94.\n multimodal: bool\n If true (default) it may compute more than one credible interval if the distribution is\n multimodal and the modes are well separated.\n skipna : bool\n If true ignores nan values when computing the hdi and point estimates. Defaults to false.\n round_to: int, optional\n Controls formatting of floats. Defaults to 2 or the integer part, whichever is bigger.\n point_estimate: Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in rcParams.\n group: str, optional\n Specifies which InferenceData group should be plotted. Defaults to \u2018posterior\u2019.\n rope: tuple or dictionary of tuples\n Lower and upper values of the Region Of Practical Equivalence. If a list is provided, its\n length should match the number of variables.\n ref_val: float or dictionary of floats\n display the percentage below and above the values in ref_val. Must be None (default),\n a constant, a list or a dictionary like see an example below. If a list is provided, its\n length should match the number of variables.\n kind: str\n Type of plot to display (kde or hist) For discrete variables this argument is ignored and\n a histogram is always used.\n bw: float or str, optional\n If numeric, indicates the bandwidth and must be positive.\n If str, indicates the method to estimate the bandwidth and must be\n one of \"scott\", \"silverman\", \"isj\" or \"experimental\" when `circular` is False\n and \"taylor\" (for now) when `circular` is True.\n Defaults to \"default\" which means \"experimental\" when variable is not circular\n and \"taylor\" when it is. Only works if `kind == kde`.\n circular: bool, optional\n If True, it interprets the values passed are from a circular variable measured in radians\n and a circular KDE is used. Only valid for 1D KDE. Defaults to False.\n Only works if `kind == kde`.\n bins: integer or sequence or 'auto', optional\n Controls the number of bins, accepts the same keywords `matplotlib.hist()` does. Only works\n if `kind == hist`. If None (default) it will use `auto` for continuous variables and\n `range(xmin, xmax + 1)` for discrete variables.\n ax: numpy array-like of matplotlib axes or bokeh figures, optional\n A 2D array of locations into which to plot the densities. If not supplied, Arviz will create\n its own array of plot areas (and return it).\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show: bool, optional\n Call backend show function.\n **kwargs\n Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.\n\n Returns\n -------\n axes: matplotlib axes or bokeh figures\n\n Examples\n --------\n Show a default kernel density plot following style of John Kruschke\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('centered_eight')\n >>> az.plot_posterior(data)\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'])\n\n Plot Region of Practical Equivalence (rope) and select variables with regular expressions\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', '^the'], filter_vars=\"regex\", rope=(-1, 1))\n\n Plot Region of Practical Equivalence for selected distributions\n\n .. plot::\n :context: close-figs\n\n >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)\n\n\n Add reference lines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], ref_val=0)\n\n Show point estimate of distribution\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], point_estimate='mode')\n\n Show reference values using variable names and coordinates\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val= {\"theta\": [{\"school\": \"Deerfield\", \"ref_val\": 4},\n ... {\"school\": \"Choate\", \"ref_val\": 3}]})\n\n Show reference values using a list\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val=[1] + [5] * 8 + [1])\n\n\n Plot posterior as a histogram\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], kind='hist')\n\n Change size of highest density interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], hdi_prob=.75)\n \"\"\"\n data = convert_to_dataset(data, group=group)\n if transform is not None:\n data = transform(data)\n var_names = _var_names(var_names, data, filter_vars)\n\n if coords is None:\n coords = {}\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n elif hdi_prob not in (None, \"hide\"):\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n if point_estimate == \"auto\":\n point_estimate = rcParams[\"plot.point_estimate\"]\n elif point_estimate not in {\"mean\", \"median\", \"mode\", None}:\n raise ValueError(\"The value of point_estimate must be either mean, median, mode or None.\")\n\n plotters = filter_plotters_list(\n list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),\n \"plot_posterior\",\n )\n length_plotters = len(plotters)\n rows, cols = default_grid(length_plotters, grid=grid)\n\n posteriorplot_kwargs = dict(\n ax=ax,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n figsize=figsize,\n plotters=plotters,\n bw=bw,\n circular=circular,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n hdi_prob=hdi_prob,\n multimodal=multimodal,\n skipna=skipna,\n textsize=textsize,\n ref_val=ref_val,\n rope=rope,\n kwargs=kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_posterior\", \"posteriorplot\", backend)\n ax = plot(**posteriorplot_kwargs)\n return ax\n", "path": "arviz/plots/posteriorplot.py"}]} | 3,556 | 196 |
gh_patches_debug_25641 | rasdani/github-patches | git_diff | sublimelsp__LSP-1573 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Rename…" code action is broken
(v1.1.6) The _Rename_ code action doesn’t work correctly. I wasn’t able to use it in VSCode so I can’t tell whether it comes from the language server of the Sublime extension.
Here is a minimal example:
```elm
module Test exposing (..)
import Html exposing (Html)
view : Html msg
view =
Html.text body
body : String
body =
"..."
```
When renaming `body` for instance:
* if the new name has the same length, it seems to work fine
* if the new name is longer (`bodyxyz` in the example below), the last few characters are duplicated:
```elm
view : Html msg
view =
Html.text bodyxyzxyz
bodyxyzxyz : String
bodyxyzxyz =
"..."
```
* if the new name is shorter (`a` in this example), well…
```elm
view : Html msg
view =
Html.text aaString
a "..."
```
</issue>
<code>
[start of plugin/core/edit.py]
1 from .logging import debug
2 from .open import open_file
3 from .promise import Promise
4 from .typing import List, Dict, Any, Iterable, Optional, Tuple
5 from .url import uri_to_filename
6 from functools import partial
7 import operator
8 import sublime
9
10
11 # tuple of start, end, newText, version
12 TextEdit = Tuple[Tuple[int, int], Tuple[int, int], str, Optional[int]]
13
14
15 def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:
16 changes = {} # type: Dict[str, List[TextEdit]]
17 raw_changes = workspace_edit.get('changes')
18 if isinstance(raw_changes, dict):
19 for uri, file_changes in raw_changes.items():
20 changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)
21 document_changes = workspace_edit.get('documentChanges')
22 if isinstance(document_changes, list):
23 for document_change in document_changes:
24 if 'kind' in document_change:
25 debug('Ignoring unsupported "resourceOperations" edit type')
26 continue
27 uri = document_change.get('textDocument').get('uri')
28 version = document_change.get('textDocument').get('version')
29 text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))
30 changes.setdefault(uri_to_filename(uri), []).extend(text_edit)
31 return changes
32
33
34 def parse_range(range: Dict[str, int]) -> Tuple[int, int]:
35 return range['line'], range['character']
36
37
38 def parse_text_edit(text_edit: Dict[str, Any], version: int = None) -> TextEdit:
39 return (
40 parse_range(text_edit['range']['start']),
41 parse_range(text_edit['range']['end']),
42 # Strip away carriage returns -- SublimeText takes care of that.
43 text_edit.get('newText', '').replace("\r", ""),
44 version
45 )
46
47
48 def sort_by_application_order(changes: Iterable[TextEdit]) -> List[TextEdit]:
49 # The spec reads:
50 # > However, it is possible that multiple edits have the same start position: multiple
51 # > inserts, or any number of inserts followed by a single remove or replace edit. If
52 # > multiple inserts have the same position, the order in the array defines the order in
53 # > which the inserted strings appear in the resulting text.
54 # So we sort by start position. But if multiple text edits start at the same position,
55 # we use the index in the array as the key.
56
57 return list(sorted(changes, key=operator.itemgetter(0)))
58
59
60 def apply_workspace_edit(window: sublime.Window, changes: Dict[str, List[TextEdit]]) -> Promise:
61 """Apply workspace edits. This function must be called from the main thread!"""
62 return Promise.all([open_file(window, fn).then(partial(_apply_edits, edits)) for fn, edits in changes.items()])
63
64
65 def _apply_edits(edits: List[TextEdit], view: Optional[sublime.View]) -> None:
66 if view and view.is_valid():
67 # Text commands run blocking. After this call has returned the changes are applied.
68 view.run_command("lsp_apply_document_edit", {"changes": edits})
69
[end of plugin/core/edit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/core/edit.py b/plugin/core/edit.py
--- a/plugin/core/edit.py
+++ b/plugin/core/edit.py
@@ -14,10 +14,6 @@
def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:
changes = {} # type: Dict[str, List[TextEdit]]
- raw_changes = workspace_edit.get('changes')
- if isinstance(raw_changes, dict):
- for uri, file_changes in raw_changes.items():
- changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)
document_changes = workspace_edit.get('documentChanges')
if isinstance(document_changes, list):
for document_change in document_changes:
@@ -28,6 +24,11 @@
version = document_change.get('textDocument').get('version')
text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))
changes.setdefault(uri_to_filename(uri), []).extend(text_edit)
+ else:
+ raw_changes = workspace_edit.get('changes')
+ if isinstance(raw_changes, dict):
+ for uri, file_changes in raw_changes.items():
+ changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)
return changes
| {"golden_diff": "diff --git a/plugin/core/edit.py b/plugin/core/edit.py\n--- a/plugin/core/edit.py\n+++ b/plugin/core/edit.py\n@@ -14,10 +14,6 @@\n \n def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:\n changes = {} # type: Dict[str, List[TextEdit]]\n- raw_changes = workspace_edit.get('changes')\n- if isinstance(raw_changes, dict):\n- for uri, file_changes in raw_changes.items():\n- changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n document_changes = workspace_edit.get('documentChanges')\n if isinstance(document_changes, list):\n for document_change in document_changes:\n@@ -28,6 +24,11 @@\n version = document_change.get('textDocument').get('version')\n text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))\n changes.setdefault(uri_to_filename(uri), []).extend(text_edit)\n+ else:\n+ raw_changes = workspace_edit.get('changes')\n+ if isinstance(raw_changes, dict):\n+ for uri, file_changes in raw_changes.items():\n+ changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n return changes\n", "issue": "\"Rename\u2026\" code action is broken\n(v1.1.6) The _Rename_ code action doesn\u2019t work correctly. I wasn\u2019t able to use it in VSCode so I can\u2019t tell whether it comes from the language server of the Sublime extension.\r\n\r\nHere is a minimal example:\r\n\r\n```elm\r\nmodule Test exposing (..)\r\n\r\nimport Html exposing (Html)\r\n\r\n\r\nview : Html msg\r\nview =\r\n Html.text body\r\n\r\n\r\nbody : String\r\nbody =\r\n \"...\"\r\n```\r\n\r\nWhen renaming `body` for instance:\r\n\r\n* if the new name has the same length, it seems to work fine\r\n* if the new name is longer (`bodyxyz` in the example below), the last few characters are duplicated:\r\n```elm\r\n\r\nview : Html msg\r\nview =\r\n Html.text bodyxyzxyz\r\n\r\n\r\nbodyxyzxyz : String\r\nbodyxyzxyz =\r\n \"...\"\r\n```\r\n* if the new name is shorter (`a` in this example), well\u2026\r\n```elm\r\nview : Html msg\r\nview =\r\n Html.text aaString\r\na \"...\"\r\n```\n", "before_files": [{"content": "from .logging import debug\nfrom .open import open_file\nfrom .promise import Promise\nfrom .typing import List, Dict, Any, Iterable, Optional, Tuple\nfrom .url import uri_to_filename\nfrom functools import partial\nimport operator\nimport sublime\n\n\n# tuple of start, end, newText, version\nTextEdit = Tuple[Tuple[int, int], Tuple[int, int], str, Optional[int]]\n\n\ndef parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:\n changes = {} # type: Dict[str, List[TextEdit]]\n raw_changes = workspace_edit.get('changes')\n if isinstance(raw_changes, dict):\n for uri, file_changes in raw_changes.items():\n changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n document_changes = workspace_edit.get('documentChanges')\n if isinstance(document_changes, list):\n for document_change in document_changes:\n if 'kind' in document_change:\n debug('Ignoring unsupported \"resourceOperations\" edit type')\n continue\n uri = document_change.get('textDocument').get('uri')\n version = document_change.get('textDocument').get('version')\n text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))\n changes.setdefault(uri_to_filename(uri), []).extend(text_edit)\n return changes\n\n\ndef parse_range(range: Dict[str, int]) -> Tuple[int, int]:\n return range['line'], range['character']\n\n\ndef parse_text_edit(text_edit: Dict[str, Any], version: int = None) -> TextEdit:\n return (\n parse_range(text_edit['range']['start']),\n parse_range(text_edit['range']['end']),\n # Strip away carriage returns -- SublimeText takes care of that.\n text_edit.get('newText', '').replace(\"\\r\", \"\"),\n version\n )\n\n\ndef sort_by_application_order(changes: Iterable[TextEdit]) -> List[TextEdit]:\n # The spec reads:\n # > However, it is possible that multiple edits have the same start position: multiple\n # > inserts, or any number of inserts followed by a single remove or replace edit. If\n # > multiple inserts have the same position, the order in the array defines the order in\n # > which the inserted strings appear in the resulting text.\n # So we sort by start position. But if multiple text edits start at the same position,\n # we use the index in the array as the key.\n\n return list(sorted(changes, key=operator.itemgetter(0)))\n\n\ndef apply_workspace_edit(window: sublime.Window, changes: Dict[str, List[TextEdit]]) -> Promise:\n \"\"\"Apply workspace edits. This function must be called from the main thread!\"\"\"\n return Promise.all([open_file(window, fn).then(partial(_apply_edits, edits)) for fn, edits in changes.items()])\n\n\ndef _apply_edits(edits: List[TextEdit], view: Optional[sublime.View]) -> None:\n if view and view.is_valid():\n # Text commands run blocking. After this call has returned the changes are applied.\n view.run_command(\"lsp_apply_document_edit\", {\"changes\": edits})\n", "path": "plugin/core/edit.py"}]} | 1,558 | 282 |
gh_patches_debug_14435 | rasdani/github-patches | git_diff | fossasia__open-event-server-5247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inconsistent data on Admin Statistics end points
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server
**Current behavior:**
<!-- Describe how the bug manifests. -->
The admin statistics end points return various stats about events, users etc.
Currently they are completely out of sync with the database.
For instance, the `admin/statisitics/events` returns

Where as the actual event count when generically querying for events is 92:

**Expected behavior:**
<!-- Describe what the behavior would be without the bug. -->
The counts should be consistent.
</issue>
<code>
[start of app/api/schema/admin_statistics_schema/events.py]
1 from marshmallow_jsonapi.flask import Schema
2 from marshmallow_jsonapi import fields
3 from app.models.event import Event
4 from app.api.helpers.db import get_count
5 from app.api.helpers.utilities import dasherize
6 from datetime import datetime
7 import pytz
8
9
10 class AdminStatisticsEventSchema(Schema):
11 """
12 Api schema
13 """
14 class Meta:
15 """
16 Meta class
17 """
18 type_ = 'admin-statistics-event'
19 self_view = 'v1.admin_statistics_event_detail'
20 inflect = dasherize
21
22 id = fields.String()
23 draft = fields.Method("events_draft_count")
24 published = fields.Method("events_published_count")
25 past = fields.Method("events_past_count")
26
27 def events_draft_count(self, obj):
28 return get_count(Event.query.filter_by(state='draft'))
29
30 def events_published_count(self, obj):
31 return get_count(Event.query.filter_by(state='published'))
32
33 def events_past_count(self, obj):
34 return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))
35
[end of app/api/schema/admin_statistics_schema/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/schema/admin_statistics_schema/events.py b/app/api/schema/admin_statistics_schema/events.py
--- a/app/api/schema/admin_statistics_schema/events.py
+++ b/app/api/schema/admin_statistics_schema/events.py
@@ -25,10 +25,12 @@
past = fields.Method("events_past_count")
def events_draft_count(self, obj):
- return get_count(Event.query.filter_by(state='draft'))
+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))
+ return get_count(events.filter_by(state='draft'))
def events_published_count(self, obj):
- return get_count(Event.query.filter_by(state='published'))
+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))
+ return get_count(events.filter_by(state='published'))
def events_past_count(self, obj):
return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))
| {"golden_diff": "diff --git a/app/api/schema/admin_statistics_schema/events.py b/app/api/schema/admin_statistics_schema/events.py\n--- a/app/api/schema/admin_statistics_schema/events.py\n+++ b/app/api/schema/admin_statistics_schema/events.py\n@@ -25,10 +25,12 @@\n past = fields.Method(\"events_past_count\")\n \n def events_draft_count(self, obj):\n- return get_count(Event.query.filter_by(state='draft'))\n+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))\n+ return get_count(events.filter_by(state='draft'))\n \n def events_published_count(self, obj):\n- return get_count(Event.query.filter_by(state='published'))\n+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))\n+ return get_count(events.filter_by(state='published'))\n \n def events_past_count(self, obj):\n return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))\n", "issue": "Inconsistent data on Admin Statistics end points\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\nThe admin statistics end points return various stats about events, users etc.\r\nCurrently they are completely out of sync with the database.\r\nFor instance, the `admin/statisitics/events` returns \r\n\r\n\r\nWhere as the actual event count when generically querying for events is 92:\r\n\r\n\r\n**Expected behavior:**\r\n<!-- Describe what the behavior would be without the bug. -->\r\nThe counts should be consistent.\n", "before_files": [{"content": "from marshmallow_jsonapi.flask import Schema\nfrom marshmallow_jsonapi import fields\nfrom app.models.event import Event\nfrom app.api.helpers.db import get_count\nfrom app.api.helpers.utilities import dasherize\nfrom datetime import datetime\nimport pytz\n\n\nclass AdminStatisticsEventSchema(Schema):\n \"\"\"\n Api schema\n \"\"\"\n class Meta:\n \"\"\"\n Meta class\n \"\"\"\n type_ = 'admin-statistics-event'\n self_view = 'v1.admin_statistics_event_detail'\n inflect = dasherize\n\n id = fields.String()\n draft = fields.Method(\"events_draft_count\")\n published = fields.Method(\"events_published_count\")\n past = fields.Method(\"events_past_count\")\n\n def events_draft_count(self, obj):\n return get_count(Event.query.filter_by(state='draft'))\n\n def events_published_count(self, obj):\n return get_count(Event.query.filter_by(state='published'))\n\n def events_past_count(self, obj):\n return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))\n", "path": "app/api/schema/admin_statistics_schema/events.py"}]} | 1,112 | 202 |
gh_patches_debug_28999 | rasdani/github-patches | git_diff | biopython__biopython-3662 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Order of coordinates of a multi-part feature that maps to the reverse strand maybe 'wrong' in GenBank file generated from SnapGene file?
### Setup
I am reporting a problem with Biopython version, Python version, and operating
system as follows:
```
3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37)
[GCC 9.3.0]
CPython
Linux-5.4.72-microsoft-standard-WSL2-x86_64-with-debian-bullseye-sid
1.79
```
### Expected behaviour
Cloning programs/webapps such as SnapGene or Benchling should display all features included in a GenBank file that has been converted from a SnapGene file with BioPython.
### Actual behaviour
Cloning programs/webapps such as SnapGene or Benchling do not display all features included in a GenBank file that has been converted from a SnapGene file with BioPython. The problematic features are those that contain multiple elements on the reverse strand, for example `complement(join(3873..3941,3081..3872))`. If I reverse the order of those elements, that is, `complement(join(3081..3872,3873..3941))`, then everything's good, and the feature is shown in my fav cloning programs/webapp. I guess the elements of a feature that maps to the reverse/complement strand should nonetheless be listed in an 'increasing order'?
Although it's most likely a coincidence, I also noticed that for the feature mentioned above, BioPython fails to include one of the notes that SnapGene instead includes in the exported file (the one at the very bottom of the left panel)

### Steps to reproduce
I am attaching the original SnapGene map and those converted to GenBank with SnapGene and BioPython to this message.
[test_plasmid.zip](https://github.com/biopython/biopython/files/6765312/test_plasmid.zip)
</issue>
<code>
[start of Bio/SeqIO/SnapGeneIO.py]
1 # Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.
2 #
3 # This file is part of the Biopython distribution and governed by your
4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
5 # Please see the LICENSE file that should have been included as part of this
6 # package.
7 """Bio.SeqIO support for the SnapGene file format.
8
9 The SnapGene binary format is the native format used by the SnapGene program
10 from GSL Biotech LLC.
11 """
12 from datetime import datetime
13 from re import sub
14 from struct import unpack
15 from xml.dom.minidom import parseString
16
17 from Bio.Seq import Seq
18 from Bio.SeqFeature import FeatureLocation
19 from Bio.SeqFeature import SeqFeature
20 from Bio.SeqRecord import SeqRecord
21
22 from .Interfaces import SequenceIterator
23
24
25 def _iterate(handle):
26 """Iterate over the packets of a SnapGene file.
27
28 A SnapGene file is made of packets, each packet being a TLV-like
29 structure comprising:
30
31 - 1 single byte indicating the packet's type;
32 - 1 big-endian long integer (4 bytes) indicating the length of the
33 packet's data;
34 - the actual data.
35 """
36 while True:
37 packet_type = handle.read(1)
38 if len(packet_type) < 1: # No more packet
39 return
40 packet_type = unpack(">B", packet_type)[0]
41
42 length = handle.read(4)
43 if len(length) < 4:
44 raise ValueError("Unexpected end of packet")
45 length = unpack(">I", length)[0]
46
47 data = handle.read(length)
48 if len(data) < length:
49 raise ValueError("Unexpected end of packet")
50
51 yield (packet_type, length, data)
52
53
54 def _parse_dna_packet(length, data, record):
55 """Parse a DNA sequence packet.
56
57 A DNA sequence packet contains a single byte flag followed by the
58 sequence itself.
59 """
60 if record.seq:
61 raise ValueError("The file contains more than one DNA packet")
62
63 flags, sequence = unpack(">B%ds" % (length - 1), data)
64 record.seq = Seq(sequence.decode("ASCII"))
65 record.annotations["molecule_type"] = "DNA"
66 if flags & 0x01:
67 record.annotations["topology"] = "circular"
68 else:
69 record.annotations["topology"] = "linear"
70
71
72 def _parse_notes_packet(length, data, record):
73 """Parse a 'Notes' packet.
74
75 This type of packet contains some metadata about the sequence. They
76 are stored as a XML string with a 'Notes' root node.
77 """
78 xml = parseString(data.decode("UTF-8"))
79 type = _get_child_value(xml, "Type")
80 if type == "Synthetic":
81 record.annotations["data_file_division"] = "SYN"
82 else:
83 record.annotations["data_file_division"] = "UNC"
84
85 date = _get_child_value(xml, "LastModified")
86 if date:
87 record.annotations["date"] = datetime.strptime(date, "%Y.%m.%d")
88
89 acc = _get_child_value(xml, "AccessionNumber")
90 if acc:
91 record.id = acc
92
93 comment = _get_child_value(xml, "Comments")
94 if comment:
95 record.name = comment.split(" ", 1)[0]
96 record.description = comment
97 if not acc:
98 record.id = record.name
99
100
101 def _parse_cookie_packet(length, data, record):
102 """Parse a SnapGene cookie packet.
103
104 Every SnapGene file starts with a packet of this type. It acts as
105 a magic cookie identifying the file as a SnapGene file.
106 """
107 cookie, seq_type, exp_version, imp_version = unpack(">8sHHH", data)
108 if cookie.decode("ASCII") != "SnapGene":
109 raise ValueError("The file is not a valid SnapGene file")
110
111
112 def _parse_location(rangespec, strand, record):
113 start, end = [int(x) for x in rangespec.split("-")]
114 # Account for SnapGene's 1-based coordinates
115 start = start - 1
116 if start > end:
117 # Range wrapping the end of the sequence
118 l1 = FeatureLocation(start, len(record), strand=strand)
119 l2 = FeatureLocation(0, end, strand=strand)
120 location = l1 + l2
121 else:
122 location = FeatureLocation(start, end, strand=strand)
123 return location
124
125
126 def _parse_features_packet(length, data, record):
127 """Parse a sequence features packet.
128
129 This packet stores sequence features (except primer binding sites,
130 which are in a dedicated Primers packet). The data is a XML string
131 starting with a 'Features' root node.
132 """
133 xml = parseString(data.decode("UTF-8"))
134 for feature in xml.getElementsByTagName("Feature"):
135 quals = {}
136
137 type = _get_attribute_value(feature, "type", default="misc_feature")
138
139 strand = +1
140 directionality = int(
141 _get_attribute_value(feature, "directionality", default="1")
142 )
143 if directionality == 2:
144 strand = -1
145
146 location = None
147 for segment in feature.getElementsByTagName("Segment"):
148 rng = _get_attribute_value(segment, "range")
149 if not location:
150 location = _parse_location(rng, strand, record)
151 else:
152 location = location + _parse_location(rng, strand, record)
153 if not location:
154 raise ValueError("Missing feature location")
155
156 for qualifier in feature.getElementsByTagName("Q"):
157 qname = _get_attribute_value(
158 qualifier, "name", error="Missing qualifier name"
159 )
160 qvalues = []
161 for value in qualifier.getElementsByTagName("V"):
162 if value.hasAttribute("text"):
163 qvalues.append(_decode(value.attributes["text"].value))
164 elif value.hasAttribute("predef"):
165 qvalues.append(_decode(value.attributes["predef"].value))
166 elif value.hasAttribute("int"):
167 qvalues.append(int(value.attributes["int"].value))
168 quals[qname] = qvalues
169
170 name = _get_attribute_value(feature, "name")
171 if name:
172 if "label" not in quals:
173 # No explicit label attribute, use the SnapGene name
174 quals["label"] = [name]
175 elif name not in quals["label"]:
176 # The SnapGene name is different from the label,
177 # add a specific attribute to represent it
178 quals["name"] = [name]
179
180 feature = SeqFeature(location, type=type, qualifiers=quals)
181 record.features.append(feature)
182
183
184 def _parse_primers_packet(length, data, record):
185 """Parse a Primers packet.
186
187 A Primers packet is similar to a Features packet but specifically
188 stores primer binding features. The data is a XML string starting
189 with a 'Primers' root node.
190 """
191 xml = parseString(data.decode("UTF-8"))
192 for primer in xml.getElementsByTagName("Primer"):
193 quals = {}
194
195 name = _get_attribute_value(primer, "name")
196 if name:
197 quals["label"] = [name]
198
199 for site in primer.getElementsByTagName("BindingSite"):
200 rng = _get_attribute_value(
201 site, "location", error="Missing binding site location"
202 )
203 strand = int(_get_attribute_value(site, "boundStrand", default="0"))
204 if strand == 1:
205 strand = -1
206 else:
207 strand = +1
208
209 feature = SeqFeature(
210 _parse_location(rng, strand, record),
211 type="primer_bind",
212 qualifiers=quals,
213 )
214 record.features.append(feature)
215
216
217 _packet_handlers = {
218 0x00: _parse_dna_packet,
219 0x05: _parse_primers_packet,
220 0x06: _parse_notes_packet,
221 0x0A: _parse_features_packet,
222 }
223
224
225 # Helper functions to process the XML data in
226 # some of the segments
227
228
229 def _decode(text):
230 # Get rid of HTML tags in some values
231 return sub("<[^>]+>", "", text)
232
233
234 def _get_attribute_value(node, name, default=None, error=None):
235 if node.hasAttribute(name):
236 return _decode(node.attributes[name].value)
237 elif error:
238 raise ValueError(error)
239 else:
240 return default
241
242
243 def _get_child_value(node, name, default=None, error=None):
244 children = node.getElementsByTagName(name)
245 if (
246 children
247 and children[0].childNodes
248 and children[0].firstChild.nodeType == node.TEXT_NODE
249 ):
250 return _decode(children[0].firstChild.data)
251 elif error:
252 raise ValueError(error)
253 else:
254 return default
255
256
257 class SnapGeneIterator(SequenceIterator):
258 """Parser for SnapGene files."""
259
260 def __init__(self, source):
261 """Parse a SnapGene file and return a SeqRecord object.
262
263 Argument source is a file-like object or a path to a file.
264
265 Note that a SnapGene file can only contain one sequence, so this
266 iterator will always return a single record.
267 """
268 super().__init__(source, mode="b", fmt="SnapGene")
269
270 def parse(self, handle):
271 """Start parsing the file, and return a SeqRecord generator."""
272 records = self.iterate(handle)
273 return records
274
275 def iterate(self, handle):
276 """Iterate over the records in the SnapGene file."""
277 record = SeqRecord(None)
278 packets = _iterate(handle)
279 try:
280 packet_type, length, data = next(packets)
281 except StopIteration:
282 raise ValueError("Empty file.") from None
283
284 if packet_type != 0x09:
285 raise ValueError("The file does not start with a SnapGene cookie packet")
286 _parse_cookie_packet(length, data, record)
287
288 for (packet_type, length, data) in packets:
289 handler = _packet_handlers.get(packet_type)
290 if handler is not None:
291 handler(length, data, record)
292
293 if not record.seq:
294 raise ValueError("No DNA packet in file")
295
296 yield record
297
[end of Bio/SeqIO/SnapGeneIO.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Bio/SeqIO/SnapGeneIO.py b/Bio/SeqIO/SnapGeneIO.py
--- a/Bio/SeqIO/SnapGeneIO.py
+++ b/Bio/SeqIO/SnapGeneIO.py
@@ -144,12 +144,33 @@
strand = -1
location = None
+ subparts = []
+ n_parts = 0
for segment in feature.getElementsByTagName("Segment"):
+ if _get_attribute_value(segment, "type", "standard") == "gap":
+ continue
rng = _get_attribute_value(segment, "range")
+ n_parts += 1
+ next_location = _parse_location(rng, strand, record)
if not location:
- location = _parse_location(rng, strand, record)
+ location = next_location
+ elif strand == -1:
+ # Reverse segments order for reverse-strand features
+ location = next_location + location
else:
- location = location + _parse_location(rng, strand, record)
+ location = location + next_location
+
+ name = _get_attribute_value(segment, "name")
+ if name:
+ subparts.append([n_parts, name])
+
+ if len(subparts) > 0:
+ # Add a "parts" qualifiers to represent "named subfeatures"
+ if strand == -1:
+ # Reverse segment indexes and order for reverse-strand features
+ subparts = reversed([[n_parts - i + 1, name] for i, name in subparts])
+ quals["parts"] = [";".join("{}:{}".format(i, name) for i, name in subparts)]
+
if not location:
raise ValueError("Missing feature location")
@@ -221,7 +242,6 @@
0x0A: _parse_features_packet,
}
-
# Helper functions to process the XML data in
# some of the segments
| {"golden_diff": "diff --git a/Bio/SeqIO/SnapGeneIO.py b/Bio/SeqIO/SnapGeneIO.py\n--- a/Bio/SeqIO/SnapGeneIO.py\n+++ b/Bio/SeqIO/SnapGeneIO.py\n@@ -144,12 +144,33 @@\n strand = -1\n \n location = None\n+ subparts = []\n+ n_parts = 0\n for segment in feature.getElementsByTagName(\"Segment\"):\n+ if _get_attribute_value(segment, \"type\", \"standard\") == \"gap\":\n+ continue\n rng = _get_attribute_value(segment, \"range\")\n+ n_parts += 1\n+ next_location = _parse_location(rng, strand, record)\n if not location:\n- location = _parse_location(rng, strand, record)\n+ location = next_location\n+ elif strand == -1:\n+ # Reverse segments order for reverse-strand features\n+ location = next_location + location\n else:\n- location = location + _parse_location(rng, strand, record)\n+ location = location + next_location\n+\n+ name = _get_attribute_value(segment, \"name\")\n+ if name:\n+ subparts.append([n_parts, name])\n+\n+ if len(subparts) > 0:\n+ # Add a \"parts\" qualifiers to represent \"named subfeatures\"\n+ if strand == -1:\n+ # Reverse segment indexes and order for reverse-strand features\n+ subparts = reversed([[n_parts - i + 1, name] for i, name in subparts])\n+ quals[\"parts\"] = [\";\".join(\"{}:{}\".format(i, name) for i, name in subparts)]\n+\n if not location:\n raise ValueError(\"Missing feature location\")\n \n@@ -221,7 +242,6 @@\n 0x0A: _parse_features_packet,\n }\n \n-\n # Helper functions to process the XML data in\n # some of the segments\n", "issue": "Order of coordinates of a multi-part feature that maps to the reverse strand maybe 'wrong' in GenBank file generated from SnapGene file?\n### Setup\r\n\r\nI am reporting a problem with Biopython version, Python version, and operating\r\nsystem as follows:\r\n\r\n```\r\n3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) \r\n[GCC 9.3.0]\r\nCPython\r\nLinux-5.4.72-microsoft-standard-WSL2-x86_64-with-debian-bullseye-sid\r\n1.79\r\n```\r\n\r\n### Expected behaviour\r\nCloning programs/webapps such as SnapGene or Benchling should display all features included in a GenBank file that has been converted from a SnapGene file with BioPython.\r\n\r\n### Actual behaviour\r\nCloning programs/webapps such as SnapGene or Benchling do not display all features included in a GenBank file that has been converted from a SnapGene file with BioPython. The problematic features are those that contain multiple elements on the reverse strand, for example `complement(join(3873..3941,3081..3872))`. If I reverse the order of those elements, that is, `complement(join(3081..3872,3873..3941))`, then everything's good, and the feature is shown in my fav cloning programs/webapp. I guess the elements of a feature that maps to the reverse/complement strand should nonetheless be listed in an 'increasing order'? \r\n\r\nAlthough it's most likely a coincidence, I also noticed that for the feature mentioned above, BioPython fails to include one of the notes that SnapGene instead includes in the exported file (the one at the very bottom of the left panel)\r\n\r\n\r\n\r\n### Steps to reproduce\r\n\r\nI am attaching the original SnapGene map and those converted to GenBank with SnapGene and BioPython to this message.\r\n\r\n[test_plasmid.zip](https://github.com/biopython/biopython/files/6765312/test_plasmid.zip)\n", "before_files": [{"content": "# Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Bio.SeqIO support for the SnapGene file format.\n\nThe SnapGene binary format is the native format used by the SnapGene program\nfrom GSL Biotech LLC.\n\"\"\"\nfrom datetime import datetime\nfrom re import sub\nfrom struct import unpack\nfrom xml.dom.minidom import parseString\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqFeature import FeatureLocation\nfrom Bio.SeqFeature import SeqFeature\nfrom Bio.SeqRecord import SeqRecord\n\nfrom .Interfaces import SequenceIterator\n\n\ndef _iterate(handle):\n \"\"\"Iterate over the packets of a SnapGene file.\n\n A SnapGene file is made of packets, each packet being a TLV-like\n structure comprising:\n\n - 1 single byte indicating the packet's type;\n - 1 big-endian long integer (4 bytes) indicating the length of the\n packet's data;\n - the actual data.\n \"\"\"\n while True:\n packet_type = handle.read(1)\n if len(packet_type) < 1: # No more packet\n return\n packet_type = unpack(\">B\", packet_type)[0]\n\n length = handle.read(4)\n if len(length) < 4:\n raise ValueError(\"Unexpected end of packet\")\n length = unpack(\">I\", length)[0]\n\n data = handle.read(length)\n if len(data) < length:\n raise ValueError(\"Unexpected end of packet\")\n\n yield (packet_type, length, data)\n\n\ndef _parse_dna_packet(length, data, record):\n \"\"\"Parse a DNA sequence packet.\n\n A DNA sequence packet contains a single byte flag followed by the\n sequence itself.\n \"\"\"\n if record.seq:\n raise ValueError(\"The file contains more than one DNA packet\")\n\n flags, sequence = unpack(\">B%ds\" % (length - 1), data)\n record.seq = Seq(sequence.decode(\"ASCII\"))\n record.annotations[\"molecule_type\"] = \"DNA\"\n if flags & 0x01:\n record.annotations[\"topology\"] = \"circular\"\n else:\n record.annotations[\"topology\"] = \"linear\"\n\n\ndef _parse_notes_packet(length, data, record):\n \"\"\"Parse a 'Notes' packet.\n\n This type of packet contains some metadata about the sequence. They\n are stored as a XML string with a 'Notes' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n type = _get_child_value(xml, \"Type\")\n if type == \"Synthetic\":\n record.annotations[\"data_file_division\"] = \"SYN\"\n else:\n record.annotations[\"data_file_division\"] = \"UNC\"\n\n date = _get_child_value(xml, \"LastModified\")\n if date:\n record.annotations[\"date\"] = datetime.strptime(date, \"%Y.%m.%d\")\n\n acc = _get_child_value(xml, \"AccessionNumber\")\n if acc:\n record.id = acc\n\n comment = _get_child_value(xml, \"Comments\")\n if comment:\n record.name = comment.split(\" \", 1)[0]\n record.description = comment\n if not acc:\n record.id = record.name\n\n\ndef _parse_cookie_packet(length, data, record):\n \"\"\"Parse a SnapGene cookie packet.\n\n Every SnapGene file starts with a packet of this type. It acts as\n a magic cookie identifying the file as a SnapGene file.\n \"\"\"\n cookie, seq_type, exp_version, imp_version = unpack(\">8sHHH\", data)\n if cookie.decode(\"ASCII\") != \"SnapGene\":\n raise ValueError(\"The file is not a valid SnapGene file\")\n\n\ndef _parse_location(rangespec, strand, record):\n start, end = [int(x) for x in rangespec.split(\"-\")]\n # Account for SnapGene's 1-based coordinates\n start = start - 1\n if start > end:\n # Range wrapping the end of the sequence\n l1 = FeatureLocation(start, len(record), strand=strand)\n l2 = FeatureLocation(0, end, strand=strand)\n location = l1 + l2\n else:\n location = FeatureLocation(start, end, strand=strand)\n return location\n\n\ndef _parse_features_packet(length, data, record):\n \"\"\"Parse a sequence features packet.\n\n This packet stores sequence features (except primer binding sites,\n which are in a dedicated Primers packet). The data is a XML string\n starting with a 'Features' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for feature in xml.getElementsByTagName(\"Feature\"):\n quals = {}\n\n type = _get_attribute_value(feature, \"type\", default=\"misc_feature\")\n\n strand = +1\n directionality = int(\n _get_attribute_value(feature, \"directionality\", default=\"1\")\n )\n if directionality == 2:\n strand = -1\n\n location = None\n for segment in feature.getElementsByTagName(\"Segment\"):\n rng = _get_attribute_value(segment, \"range\")\n if not location:\n location = _parse_location(rng, strand, record)\n else:\n location = location + _parse_location(rng, strand, record)\n if not location:\n raise ValueError(\"Missing feature location\")\n\n for qualifier in feature.getElementsByTagName(\"Q\"):\n qname = _get_attribute_value(\n qualifier, \"name\", error=\"Missing qualifier name\"\n )\n qvalues = []\n for value in qualifier.getElementsByTagName(\"V\"):\n if value.hasAttribute(\"text\"):\n qvalues.append(_decode(value.attributes[\"text\"].value))\n elif value.hasAttribute(\"predef\"):\n qvalues.append(_decode(value.attributes[\"predef\"].value))\n elif value.hasAttribute(\"int\"):\n qvalues.append(int(value.attributes[\"int\"].value))\n quals[qname] = qvalues\n\n name = _get_attribute_value(feature, \"name\")\n if name:\n if \"label\" not in quals:\n # No explicit label attribute, use the SnapGene name\n quals[\"label\"] = [name]\n elif name not in quals[\"label\"]:\n # The SnapGene name is different from the label,\n # add a specific attribute to represent it\n quals[\"name\"] = [name]\n\n feature = SeqFeature(location, type=type, qualifiers=quals)\n record.features.append(feature)\n\n\ndef _parse_primers_packet(length, data, record):\n \"\"\"Parse a Primers packet.\n\n A Primers packet is similar to a Features packet but specifically\n stores primer binding features. The data is a XML string starting\n with a 'Primers' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for primer in xml.getElementsByTagName(\"Primer\"):\n quals = {}\n\n name = _get_attribute_value(primer, \"name\")\n if name:\n quals[\"label\"] = [name]\n\n for site in primer.getElementsByTagName(\"BindingSite\"):\n rng = _get_attribute_value(\n site, \"location\", error=\"Missing binding site location\"\n )\n strand = int(_get_attribute_value(site, \"boundStrand\", default=\"0\"))\n if strand == 1:\n strand = -1\n else:\n strand = +1\n\n feature = SeqFeature(\n _parse_location(rng, strand, record),\n type=\"primer_bind\",\n qualifiers=quals,\n )\n record.features.append(feature)\n\n\n_packet_handlers = {\n 0x00: _parse_dna_packet,\n 0x05: _parse_primers_packet,\n 0x06: _parse_notes_packet,\n 0x0A: _parse_features_packet,\n}\n\n\n# Helper functions to process the XML data in\n# some of the segments\n\n\ndef _decode(text):\n # Get rid of HTML tags in some values\n return sub(\"<[^>]+>\", \"\", text)\n\n\ndef _get_attribute_value(node, name, default=None, error=None):\n if node.hasAttribute(name):\n return _decode(node.attributes[name].value)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\ndef _get_child_value(node, name, default=None, error=None):\n children = node.getElementsByTagName(name)\n if (\n children\n and children[0].childNodes\n and children[0].firstChild.nodeType == node.TEXT_NODE\n ):\n return _decode(children[0].firstChild.data)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\nclass SnapGeneIterator(SequenceIterator):\n \"\"\"Parser for SnapGene files.\"\"\"\n\n def __init__(self, source):\n \"\"\"Parse a SnapGene file and return a SeqRecord object.\n\n Argument source is a file-like object or a path to a file.\n\n Note that a SnapGene file can only contain one sequence, so this\n iterator will always return a single record.\n \"\"\"\n super().__init__(source, mode=\"b\", fmt=\"SnapGene\")\n\n def parse(self, handle):\n \"\"\"Start parsing the file, and return a SeqRecord generator.\"\"\"\n records = self.iterate(handle)\n return records\n\n def iterate(self, handle):\n \"\"\"Iterate over the records in the SnapGene file.\"\"\"\n record = SeqRecord(None)\n packets = _iterate(handle)\n try:\n packet_type, length, data = next(packets)\n except StopIteration:\n raise ValueError(\"Empty file.\") from None\n\n if packet_type != 0x09:\n raise ValueError(\"The file does not start with a SnapGene cookie packet\")\n _parse_cookie_packet(length, data, record)\n\n for (packet_type, length, data) in packets:\n handler = _packet_handlers.get(packet_type)\n if handler is not None:\n handler(length, data, record)\n\n if not record.seq:\n raise ValueError(\"No DNA packet in file\")\n\n yield record\n", "path": "Bio/SeqIO/SnapGeneIO.py"}]} | 4,071 | 433 |
gh_patches_debug_19428 | rasdani/github-patches | git_diff | horovod__horovod-1904 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.8 incompatibility with nccl_built check
See: https://github.com/huge-success/sanic/issues/1774
```
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:110: in wrapper
retval = f(*args, **kwargs)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:151: in nccl_built
result = _check_extension_lambda(
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:90: in _check_extension_lambda
p.start()
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/process.py:121: in start
self._popen = self._Popen(self)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/context.py:224: in _Popen
return _default_context.get_context().Process._Popen(process_obj)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/context.py:283: in _Popen
return Popen(process_obj)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_spawn_posix.py:32: in __init__
super().__init__(process_obj)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_fork.py:19: in __init__
self._launch(process_obj)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_spawn_posix.py:47: in _launch
reduction.dump(process_obj, fp)
/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/reduction.py:60: in dump
ForkingPickler(file, protocol).dump(obj)
E AttributeError: Can't pickle local object '_check_extension_lambda.<locals>._target_fn'
```
</issue>
<code>
[start of horovod/common/util.py]
1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 # Modifications copyright (C) 2019 Uber Technologies, Inc.
3 # Modifications copyright Microsoft
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 # =============================================================================
17
18 from contextlib import contextmanager
19 from multiprocessing import Process, Queue
20 import os
21 import sysconfig
22
23 EXTENSIONS = ['tensorflow', 'torch', 'mxnet']
24
25
26 def get_ext_suffix():
27 """Determine library extension for various versions of Python."""
28 ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
29 if ext_suffix:
30 return ext_suffix
31
32 ext_suffix = sysconfig.get_config_var('SO')
33 if ext_suffix:
34 return ext_suffix
35
36 return '.so'
37
38
39 def get_extension_full_path(pkg_path, *args):
40 assert len(args) >= 1
41 dir_path = os.path.join(os.path.dirname(pkg_path), *args[:-1])
42 full_path = os.path.join(dir_path, args[-1] + get_ext_suffix())
43 return full_path
44
45
46 def check_extension(ext_name, ext_env_var, pkg_path, *args):
47 full_path = get_extension_full_path(pkg_path, *args)
48 if not os.path.exists(full_path):
49 raise ImportError(
50 'Extension %s has not been built. If this is not expected, reinstall '
51 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))
52
53
54 def _check_extension_lambda(ext_base_name, fn, fn_desc, verbose):
55 """
56 Tries to load the extension in a new process. If successful, puts fn(ext)
57 to the queue or False otherwise. Mutes all stdout/stderr.
58 """
59 def _target_fn(ext_base_name, fn, fn_desc, queue, verbose):
60 import importlib
61 import sys
62 import traceback
63
64 if verbose:
65 print('Checking whether extension {ext_base_name} was {fn_desc}.'.format(
66 ext_base_name=ext_base_name, fn_desc=fn_desc))
67 else:
68 # Suppress output
69 sys.stdout = open(os.devnull, 'w')
70 sys.stderr = open(os.devnull, 'w')
71
72 try:
73 ext = importlib.import_module('.' + ext_base_name, 'horovod')
74 result = fn(ext)
75 except:
76 traceback.print_exc()
77 result = None
78
79 if verbose:
80 print('Extension {ext_base_name} {flag} {fn_desc}.'.format(
81 ext_base_name=ext_base_name, flag=('was' if result else 'was NOT'),
82 fn_desc=fn_desc))
83
84 queue.put(result)
85
86 queue = Queue()
87 p = Process(target=_target_fn,
88 args=(ext_base_name, fn, fn_desc, queue, verbose))
89 p.daemon = True
90 p.start()
91 p.join()
92 return queue.get_nowait()
93
94
95 def extension_available(ext_base_name, verbose=False):
96 available_fn = lambda ext: ext is not None
97 return _check_extension_lambda(
98 ext_base_name, available_fn, 'built', verbose) or False
99
100
101 def _cache(f):
102 cache = dict()
103
104 def wrapper(*args, **kwargs):
105 key = (args, frozenset(kwargs.items()))
106
107 if key in cache:
108 return cache[key]
109 else:
110 retval = f(*args, **kwargs)
111 cache[key] = retval
112 return retval
113
114 return wrapper
115
116
117 @_cache
118 def gpu_available(ext_base_name, verbose=False):
119 available_fn = lambda ext: ext._check_has_gpu()
120 return _check_extension_lambda(
121 ext_base_name, available_fn, 'running with GPU', verbose) or False
122
123
124 @_cache
125 def mpi_built(verbose=False):
126 for ext_base_name in EXTENSIONS:
127 built_fn = lambda ext: ext.mpi_built()
128 result = _check_extension_lambda(
129 ext_base_name, built_fn, 'built with MPI', verbose)
130 if result is not None:
131 return result
132 return False
133
134
135 @_cache
136 def gloo_built(verbose=False):
137 for ext_base_name in EXTENSIONS:
138 built_fn = lambda ext: ext.gloo_built()
139 result = _check_extension_lambda(
140 ext_base_name, built_fn, 'built with Gloo', verbose)
141 if result is not None:
142 return result
143 raise RuntimeError('Failed to determine if Gloo support has been built. '
144 'Run again with --verbose for more details.')
145
146
147 @_cache
148 def nccl_built(verbose=False):
149 for ext_base_name in EXTENSIONS:
150 built_fn = lambda ext: ext.nccl_built()
151 result = _check_extension_lambda(
152 ext_base_name, built_fn, 'built with NCCL', verbose)
153 if result is not None:
154 return result
155 raise RuntimeError('Failed to determine if NCCL support has been built. '
156 'Run again with --verbose for more details.')
157
158
159 @_cache
160 def ddl_built(verbose=False):
161 for ext_base_name in EXTENSIONS:
162 built_fn = lambda ext: ext.ddl_built()
163 result = _check_extension_lambda(
164 ext_base_name, built_fn, 'built with DDL', verbose)
165 if result is not None:
166 return result
167 raise RuntimeError('Failed to determine if DDL support has been built. '
168 'Run again with --verbose for more details.')
169
170
171 @_cache
172 def ccl_built(verbose=False):
173 for ext_base_name in EXTENSIONS:
174 built_fn = lambda ext: ext.ccl_built()
175 result = _check_extension_lambda(
176 ext_base_name, built_fn, 'built with CCL', verbose)
177 if result is not None:
178 return result
179 raise RuntimeError('Failed to determine if CCL support has been built. '
180 'Run again with --verbose for more details.')
181
182
183 @contextmanager
184 def env(**kwargs):
185 # ignore args with None values
186 for k in list(kwargs.keys()):
187 if kwargs[k] is None:
188 del kwargs[k]
189
190 # backup environment
191 backup = {}
192 for k in kwargs.keys():
193 backup[k] = os.environ.get(k)
194
195 # set new values & yield
196 for k, v in kwargs.items():
197 os.environ[k] = v
198
199 try:
200 yield
201 finally:
202 # restore environment
203 for k in kwargs.keys():
204 if backup[k] is not None:
205 os.environ[k] = backup[k]
206 else:
207 del os.environ[k]
208
209
210 def get_average_backwards_compatibility_fun(reduce_ops):
211 """
212 Handle backwards compatibility between the old average and the new op parameters.
213 Old code using the average parameter (e.g. hvd.allreduce(tensor, average=False))
214 gets unchanged behavior, but mixing old and new is disallowed (e.g. no
215 hvd.allreduce(tensor, average=False, op=hvd.Adasum)).
216 """
217 def impl(op, average):
218 if op != None:
219 if average != None:
220 raise ValueError('The op parameter supersedes average. Please provide only one of them.')
221 return op
222 elif average != None:
223 return reduce_ops.Average if average else reduce_ops.Sum
224 else:
225 return reduce_ops.Average
226 return impl
227
228
229 def num_rank_is_power_2(num_rank):
230 """
231 Tests if the given number of ranks is of power of 2. This check is required
232 for Adasum allreduce.
233 TODO support non-power of 2 ranks.
234 """
235 return num_rank != 0 and ((num_rank & (num_rank -1)) == 0)
236
[end of horovod/common/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/horovod/common/util.py b/horovod/common/util.py
--- a/horovod/common/util.py
+++ b/horovod/common/util.py
@@ -15,11 +15,15 @@
# limitations under the License.
# =============================================================================
-from contextlib import contextmanager
-from multiprocessing import Process, Queue
+import multiprocessing
import os
+import sys
import sysconfig
+from contextlib import contextmanager
+
+
+_PY3 = sys.version_info[0] == 3
EXTENSIONS = ['tensorflow', 'torch', 'mxnet']
@@ -83,9 +87,11 @@
queue.put(result)
- queue = Queue()
- p = Process(target=_target_fn,
- args=(ext_base_name, fn, fn_desc, queue, verbose))
+ # 'fork' is required because horovodrun is a frozen executable
+ ctx = multiprocessing.get_context('fork') if _PY3 else multiprocessing
+ queue = ctx.Queue()
+ p = ctx.Process(target=_target_fn,
+ args=(ext_base_name, fn, fn_desc, queue, verbose))
p.daemon = True
p.start()
p.join()
| {"golden_diff": "diff --git a/horovod/common/util.py b/horovod/common/util.py\n--- a/horovod/common/util.py\n+++ b/horovod/common/util.py\n@@ -15,11 +15,15 @@\n # limitations under the License.\n # =============================================================================\n \n-from contextlib import contextmanager\n-from multiprocessing import Process, Queue\n+import multiprocessing\n import os\n+import sys\n import sysconfig\n \n+from contextlib import contextmanager\n+\n+\n+_PY3 = sys.version_info[0] == 3\n EXTENSIONS = ['tensorflow', 'torch', 'mxnet']\n \n \n@@ -83,9 +87,11 @@\n \n queue.put(result)\n \n- queue = Queue()\n- p = Process(target=_target_fn,\n- args=(ext_base_name, fn, fn_desc, queue, verbose))\n+ # 'fork' is required because horovodrun is a frozen executable\n+ ctx = multiprocessing.get_context('fork') if _PY3 else multiprocessing\n+ queue = ctx.Queue()\n+ p = ctx.Process(target=_target_fn,\n+ args=(ext_base_name, fn, fn_desc, queue, verbose))\n p.daemon = True\n p.start()\n p.join()\n", "issue": "Python 3.8 incompatibility with nccl_built check\nSee: https://github.com/huge-success/sanic/issues/1774\r\n\r\n```\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:110: in wrapper\r\n retval = f(*args, **kwargs)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:151: in nccl_built\r\n result = _check_extension_lambda(\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/site-packages/horovod/common/util.py:90: in _check_extension_lambda\r\n p.start()\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/process.py:121: in start\r\n self._popen = self._Popen(self)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/context.py:224: in _Popen\r\n return _default_context.get_context().Process._Popen(process_obj)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/context.py:283: in _Popen\r\n return Popen(process_obj)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_spawn_posix.py:32: in __init__\r\n super().__init__(process_obj)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_fork.py:19: in __init__\r\n self._launch(process_obj)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/popen_spawn_posix.py:47: in _launch\r\n reduction.dump(process_obj, fp)\r\n/Users/runner/hostedtoolcache/Python/3.8.2/x64/lib/python3.8/multiprocessing/reduction.py:60: in dump\r\n ForkingPickler(file, protocol).dump(obj)\r\nE AttributeError: Can't pickle local object '_check_extension_lambda.<locals>._target_fn'\r\n```\n", "before_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications copyright (C) 2019 Uber Technologies, Inc.\n# Modifications copyright Microsoft\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nfrom contextlib import contextmanager\nfrom multiprocessing import Process, Queue\nimport os\nimport sysconfig\n\nEXTENSIONS = ['tensorflow', 'torch', 'mxnet']\n\n\ndef get_ext_suffix():\n \"\"\"Determine library extension for various versions of Python.\"\"\"\n ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')\n if ext_suffix:\n return ext_suffix\n\n ext_suffix = sysconfig.get_config_var('SO')\n if ext_suffix:\n return ext_suffix\n\n return '.so'\n\n\ndef get_extension_full_path(pkg_path, *args):\n assert len(args) >= 1\n dir_path = os.path.join(os.path.dirname(pkg_path), *args[:-1])\n full_path = os.path.join(dir_path, args[-1] + get_ext_suffix())\n return full_path\n\n\ndef check_extension(ext_name, ext_env_var, pkg_path, *args):\n full_path = get_extension_full_path(pkg_path, *args)\n if not os.path.exists(full_path):\n raise ImportError(\n 'Extension %s has not been built. If this is not expected, reinstall '\n 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))\n\n\ndef _check_extension_lambda(ext_base_name, fn, fn_desc, verbose):\n \"\"\"\n Tries to load the extension in a new process. If successful, puts fn(ext)\n to the queue or False otherwise. Mutes all stdout/stderr.\n \"\"\"\n def _target_fn(ext_base_name, fn, fn_desc, queue, verbose):\n import importlib\n import sys\n import traceback\n\n if verbose:\n print('Checking whether extension {ext_base_name} was {fn_desc}.'.format(\n ext_base_name=ext_base_name, fn_desc=fn_desc))\n else:\n # Suppress output\n sys.stdout = open(os.devnull, 'w')\n sys.stderr = open(os.devnull, 'w')\n\n try:\n ext = importlib.import_module('.' + ext_base_name, 'horovod')\n result = fn(ext)\n except:\n traceback.print_exc()\n result = None\n\n if verbose:\n print('Extension {ext_base_name} {flag} {fn_desc}.'.format(\n ext_base_name=ext_base_name, flag=('was' if result else 'was NOT'),\n fn_desc=fn_desc))\n\n queue.put(result)\n\n queue = Queue()\n p = Process(target=_target_fn,\n args=(ext_base_name, fn, fn_desc, queue, verbose))\n p.daemon = True\n p.start()\n p.join()\n return queue.get_nowait()\n\n\ndef extension_available(ext_base_name, verbose=False):\n available_fn = lambda ext: ext is not None\n return _check_extension_lambda(\n ext_base_name, available_fn, 'built', verbose) or False\n\n\ndef _cache(f):\n cache = dict()\n\n def wrapper(*args, **kwargs):\n key = (args, frozenset(kwargs.items()))\n\n if key in cache:\n return cache[key]\n else:\n retval = f(*args, **kwargs)\n cache[key] = retval\n return retval\n\n return wrapper\n\n\n@_cache\ndef gpu_available(ext_base_name, verbose=False):\n available_fn = lambda ext: ext._check_has_gpu()\n return _check_extension_lambda(\n ext_base_name, available_fn, 'running with GPU', verbose) or False\n\n\n@_cache\ndef mpi_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.mpi_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with MPI', verbose)\n if result is not None:\n return result\n return False\n\n\n@_cache\ndef gloo_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.gloo_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with Gloo', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if Gloo support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef nccl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.nccl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with NCCL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if NCCL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef ddl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.ddl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with DDL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if DDL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@_cache\ndef ccl_built(verbose=False):\n for ext_base_name in EXTENSIONS:\n built_fn = lambda ext: ext.ccl_built()\n result = _check_extension_lambda(\n ext_base_name, built_fn, 'built with CCL', verbose)\n if result is not None:\n return result\n raise RuntimeError('Failed to determine if CCL support has been built. '\n 'Run again with --verbose for more details.')\n\n\n@contextmanager\ndef env(**kwargs):\n # ignore args with None values\n for k in list(kwargs.keys()):\n if kwargs[k] is None:\n del kwargs[k]\n\n # backup environment\n backup = {}\n for k in kwargs.keys():\n backup[k] = os.environ.get(k)\n\n # set new values & yield\n for k, v in kwargs.items():\n os.environ[k] = v\n\n try:\n yield\n finally:\n # restore environment\n for k in kwargs.keys():\n if backup[k] is not None:\n os.environ[k] = backup[k]\n else:\n del os.environ[k]\n\n\ndef get_average_backwards_compatibility_fun(reduce_ops):\n \"\"\"\n Handle backwards compatibility between the old average and the new op parameters.\n Old code using the average parameter (e.g. hvd.allreduce(tensor, average=False))\n gets unchanged behavior, but mixing old and new is disallowed (e.g. no\n hvd.allreduce(tensor, average=False, op=hvd.Adasum)).\n \"\"\"\n def impl(op, average):\n if op != None:\n if average != None:\n raise ValueError('The op parameter supersedes average. Please provide only one of them.')\n return op\n elif average != None:\n return reduce_ops.Average if average else reduce_ops.Sum\n else:\n return reduce_ops.Average\n return impl\n\n\ndef num_rank_is_power_2(num_rank):\n \"\"\"\n Tests if the given number of ranks is of power of 2. This check is required\n for Adasum allreduce.\n TODO support non-power of 2 ranks.\n \"\"\"\n return num_rank != 0 and ((num_rank & (num_rank -1)) == 0)\n", "path": "horovod/common/util.py"}]} | 3,421 | 268 |
gh_patches_debug_2398 | rasdani/github-patches | git_diff | microsoft__MLOS-477 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SMAC optimizer messes up mlos_bench logging
SMAC optimizer completely overrides our logging setup and installs its own formatter, output handler, and so on. As a result, as soon as SMAC optimizer is initialized, mlos_bench stops writing to its log file, and all logging goes to stdout, in different format, and at different log level (always INFO). We need to find a way to make SMAC use our logger instead of setting up its own from scratch
</issue>
<code>
[start of mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py]
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Contains the wrapper class for SMAC Bayesian optimizers.
7 See Also: <https://automl.github.io/SMAC3/main/index.html>
8 """
9
10 from pathlib import Path
11 from typing import Dict, List, Optional, TYPE_CHECKING
12 from tempfile import TemporaryDirectory
13
14 import ConfigSpace
15 import numpy.typing as npt
16 import pandas as pd
17
18 from mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer
19 from mlos_core.spaces.adapters.adapter import BaseSpaceAdapter
20
21
22 class SmacOptimizer(BaseBayesianOptimizer):
23 """Wrapper class for SMAC based Bayesian optimization.
24
25 Parameters
26 ----------
27 parameter_space : ConfigSpace.ConfigurationSpace
28 The parameter space to optimize.
29
30 space_adapter : BaseSpaceAdapter
31 The space adapter class to employ for parameter space transformations.
32
33 seed : Optional[int]
34 By default SMAC uses a known seed (0) to keep results reproducible.
35 However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.
36
37 run_name : Optional[str]
38 Name of this run. This is used to easily distinguish across different runs.
39 If set to `None` (default), SMAC will generate a hash from metadata.
40
41 output_directory : Optional[str]
42 The directory where SMAC output will saved. If set to `None` (default), a temporary dir will be used.
43
44 max_trials : int
45 Maximum number of trials (i.e., function evaluations) to be run. Defaults to 100.
46 Note that modifying this value directly affects the value of `n_random_init`, if latter is set to `None`.
47
48 n_random_init : Optional[int]
49 Number of points evaluated at start to bootstrap the optimizer. Defaults to 10.
50
51 n_random_probability: Optional[float]
52 Probability of choosing to evaluate a random configuration during optimization.
53 Defaults to `0.1`. Setting this to a higher value favors exploration over exploitation.
54 """
55
56 def __init__(self, *, # pylint: disable=too-many-locals
57 parameter_space: ConfigSpace.ConfigurationSpace,
58 space_adapter: Optional[BaseSpaceAdapter] = None,
59 seed: Optional[int] = 0,
60 run_name: Optional[str] = None,
61 output_directory: Optional[str] = None,
62 max_trials: int = 100,
63 n_random_init: Optional[int] = 10,
64 n_random_probability: Optional[float] = 0.1):
65
66 super().__init__(
67 parameter_space=parameter_space,
68 space_adapter=space_adapter,
69 )
70
71 # Declare at the top because we need it in __del__/cleanup()
72 self._temp_output_directory: Optional[TemporaryDirectory] = None
73
74 # pylint: disable=import-outside-toplevel
75 from smac import HyperparameterOptimizationFacade as Optimizer_Smac
76 from smac import Scenario
77 from smac.intensifier.abstract_intensifier import AbstractIntensifier
78 from smac.initial_design import LatinHypercubeInitialDesign
79 from smac.main.config_selector import ConfigSelector
80 from smac.random_design.probability_design import ProbabilityRandomDesign
81 from smac.runhistory import TrialInfo
82
83 # Store for TrialInfo instances returned by .ask()
84 self.trial_info_map: Dict[ConfigSpace.Configuration, TrialInfo] = {}
85
86 # The default when not specified is to use a known seed (0) to keep results reproducible.
87 # However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.
88 # https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario
89 seed = -1 if seed is None else seed
90
91 # Create temporary directory for SMAC output (if none provided)
92 if output_directory is None:
93 # pylint: disable=consider-using-with
94 try:
95 self._temp_output_directory = TemporaryDirectory(ignore_cleanup_errors=True) # Argument added in Python 3.10
96 except TypeError:
97 self._temp_output_directory = TemporaryDirectory()
98 output_directory = self._temp_output_directory.name
99
100 scenario: Scenario = Scenario(
101 self.optimizer_parameter_space,
102 name=run_name,
103 output_directory=Path(output_directory),
104 deterministic=True,
105 n_trials=max_trials,
106 seed=seed or -1, # if -1, SMAC will generate a random seed internally
107 n_workers=1, # Use a single thread for evaluating trials
108 )
109 intensifier: AbstractIntensifier = Optimizer_Smac.get_intensifier(scenario, max_config_calls=1)
110 config_selector: ConfigSelector = ConfigSelector(scenario, retrain_after=1)
111
112 initial_design: Optional[LatinHypercubeInitialDesign] = None
113 if n_random_init is not None:
114 initial_design = LatinHypercubeInitialDesign(scenario=scenario, n_configs=n_random_init)
115 random_design: Optional[ProbabilityRandomDesign] = None
116 if n_random_probability is not None:
117 random_design = ProbabilityRandomDesign(probability=n_random_probability)
118
119 self.base_optimizer = Optimizer_Smac(
120 scenario,
121 SmacOptimizer._dummy_target_func,
122 initial_design=initial_design,
123 intensifier=intensifier,
124 random_design=random_design,
125 config_selector=config_selector,
126 overwrite=True,
127 )
128
129 def __del__(self) -> None:
130 # Best-effort attempt to clean up, in case the user forgets to call .cleanup()
131 self.cleanup()
132
133 @staticmethod
134 def _dummy_target_func(config: ConfigSpace.Configuration, seed: int = 0) -> None:
135 """Dummy target function for SMAC optimizer.
136
137 Since we only use the ask-and-tell interface, this is never called.
138
139 Parameters
140 ----------
141 config : ConfigSpace.Configuration
142 Configuration to evaluate.
143
144 seed : int
145 Random seed to use for the target function. Not actually used.
146 """
147 # NOTE: Providing a target function when using the ask-and-tell interface is an imperfection of the API
148 # -- this planned to be fixed in some future release: https://github.com/automl/SMAC3/issues/946
149 raise RuntimeError('This function should never be called.')
150
151 def _register(self, configurations: pd.DataFrame, scores: pd.Series, context: Optional[pd.DataFrame] = None) -> None:
152 """Registers the given configurations and scores.
153
154 Parameters
155 ----------
156 configurations : pd.DataFrame
157 Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
158
159 scores : pd.Series
160 Scores from running the configurations. The index is the same as the index of the configurations.
161
162 context : pd.DataFrame
163 Not Yet Implemented.
164 """
165 from smac.runhistory import StatusType, TrialInfo, TrialValue # pylint: disable=import-outside-toplevel
166
167 if context is not None:
168 raise NotImplementedError()
169
170 # Register each trial (one-by-one)
171 for config, score in zip(self._to_configspace_configs(configurations), scores.tolist()):
172 # Retrieve previously generated TrialInfo (returned by .ask()) or create new TrialInfo instance
173 info: TrialInfo = self.trial_info_map.get(config, TrialInfo(config=config, seed=self.base_optimizer.scenario.seed))
174 value: TrialValue = TrialValue(cost=score, time=0.0, status=StatusType.SUCCESS)
175 self.base_optimizer.tell(info, value, save=False)
176
177 # Save optimizer once we register all configs
178 self.base_optimizer.optimizer.save()
179
180 def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:
181 """Suggests a new configuration.
182
183 Parameters
184 ----------
185 context : pd.DataFrame
186 Not Yet Implemented.
187
188 Returns
189 -------
190 configuration : pd.DataFrame
191 Pandas dataframe with a single row. Column names are the parameter names.
192 """
193 if TYPE_CHECKING:
194 from smac.runhistory import TrialInfo # pylint: disable=import-outside-toplevel
195
196 if context is not None:
197 raise NotImplementedError()
198
199 trial: TrialInfo = self.base_optimizer.ask()
200 self.trial_info_map[trial.config] = trial
201 return pd.DataFrame([trial.config], columns=list(self.optimizer_parameter_space.keys()))
202
203 def register_pending(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> None:
204 raise NotImplementedError()
205
206 def surrogate_predict(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:
207 from smac.utils.configspace import convert_configurations_to_array # pylint: disable=import-outside-toplevel
208
209 if context is not None:
210 raise NotImplementedError()
211 if self._space_adapter:
212 raise NotImplementedError()
213
214 # pylint: disable=protected-access
215 if len(self._observations) < self.base_optimizer._initial_design._n_configs:
216 raise RuntimeError('Surrogate model can make predictions *only* after all initial points have been evaluated')
217 if self.base_optimizer._config_selector._model is None:
218 raise RuntimeError('Surrogate model is not yet trained')
219
220 configs: npt.NDArray = convert_configurations_to_array(self._to_configspace_configs(configurations))
221 mean_predictions, _ = self.base_optimizer._config_selector._model.predict(configs)
222 return mean_predictions.reshape(-1,)
223
224 def acquisition_function(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:
225 if context is not None:
226 raise NotImplementedError()
227 if self._space_adapter:
228 raise NotImplementedError()
229
230 # pylint: disable=protected-access
231 if self.base_optimizer._config_selector._acquisition_function is None:
232 raise RuntimeError('Acquisition function is not yet initialized')
233
234 configs: list = self._to_configspace_configs(configurations)
235 return self.base_optimizer._config_selector._acquisition_function(configs).reshape(-1,)
236
237 def cleanup(self) -> None:
238 if self._temp_output_directory is not None:
239 self._temp_output_directory.cleanup()
240 self._temp_output_directory = None
241
242 def _to_configspace_configs(self, configurations: pd.DataFrame) -> List[ConfigSpace.Configuration]:
243 """Convert a dataframe of configurations to a list of ConfigSpace configurations.
244
245 Parameters
246 ----------
247 configurations : pd.DataFrame
248 Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.
249
250 Returns
251 -------
252 configurations : list
253 List of ConfigSpace configurations.
254 """
255 return [
256 ConfigSpace.Configuration(self.optimizer_parameter_space, values=config.to_dict())
257 for (_, config) in configurations.iterrows()
258 ]
259
[end of mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py
--- a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py
+++ b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py
@@ -124,6 +124,7 @@
random_design=random_design,
config_selector=config_selector,
overwrite=True,
+ logging_level=False, # Use the existing logger
)
def __del__(self) -> None:
| {"golden_diff": "diff --git a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py\n--- a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py\n+++ b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py\n@@ -124,6 +124,7 @@\n random_design=random_design,\n config_selector=config_selector,\n overwrite=True,\n+ logging_level=False, # Use the existing logger\n )\n \n def __del__(self) -> None:\n", "issue": "SMAC optimizer messes up mlos_bench logging\nSMAC optimizer completely overrides our logging setup and installs its own formatter, output handler, and so on. As a result, as soon as SMAC optimizer is initialized, mlos_bench stops writing to its log file, and all logging goes to stdout, in different format, and at different log level (always INFO). We need to find a way to make SMAC use our logger instead of setting up its own from scratch\n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nContains the wrapper class for SMAC Bayesian optimizers.\nSee Also: <https://automl.github.io/SMAC3/main/index.html>\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, TYPE_CHECKING\nfrom tempfile import TemporaryDirectory\n\nimport ConfigSpace\nimport numpy.typing as npt\nimport pandas as pd\n\nfrom mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer\nfrom mlos_core.spaces.adapters.adapter import BaseSpaceAdapter\n\n\nclass SmacOptimizer(BaseBayesianOptimizer):\n \"\"\"Wrapper class for SMAC based Bayesian optimization.\n\n Parameters\n ----------\n parameter_space : ConfigSpace.ConfigurationSpace\n The parameter space to optimize.\n\n space_adapter : BaseSpaceAdapter\n The space adapter class to employ for parameter space transformations.\n\n seed : Optional[int]\n By default SMAC uses a known seed (0) to keep results reproducible.\n However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n\n run_name : Optional[str]\n Name of this run. This is used to easily distinguish across different runs.\n If set to `None` (default), SMAC will generate a hash from metadata.\n\n output_directory : Optional[str]\n The directory where SMAC output will saved. If set to `None` (default), a temporary dir will be used.\n\n max_trials : int\n Maximum number of trials (i.e., function evaluations) to be run. Defaults to 100.\n Note that modifying this value directly affects the value of `n_random_init`, if latter is set to `None`.\n\n n_random_init : Optional[int]\n Number of points evaluated at start to bootstrap the optimizer. Defaults to 10.\n\n n_random_probability: Optional[float]\n Probability of choosing to evaluate a random configuration during optimization.\n Defaults to `0.1`. Setting this to a higher value favors exploration over exploitation.\n \"\"\"\n\n def __init__(self, *, # pylint: disable=too-many-locals\n parameter_space: ConfigSpace.ConfigurationSpace,\n space_adapter: Optional[BaseSpaceAdapter] = None,\n seed: Optional[int] = 0,\n run_name: Optional[str] = None,\n output_directory: Optional[str] = None,\n max_trials: int = 100,\n n_random_init: Optional[int] = 10,\n n_random_probability: Optional[float] = 0.1):\n\n super().__init__(\n parameter_space=parameter_space,\n space_adapter=space_adapter,\n )\n\n # Declare at the top because we need it in __del__/cleanup()\n self._temp_output_directory: Optional[TemporaryDirectory] = None\n\n # pylint: disable=import-outside-toplevel\n from smac import HyperparameterOptimizationFacade as Optimizer_Smac\n from smac import Scenario\n from smac.intensifier.abstract_intensifier import AbstractIntensifier\n from smac.initial_design import LatinHypercubeInitialDesign\n from smac.main.config_selector import ConfigSelector\n from smac.random_design.probability_design import ProbabilityRandomDesign\n from smac.runhistory import TrialInfo\n\n # Store for TrialInfo instances returned by .ask()\n self.trial_info_map: Dict[ConfigSpace.Configuration, TrialInfo] = {}\n\n # The default when not specified is to use a known seed (0) to keep results reproducible.\n # However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n # https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario\n seed = -1 if seed is None else seed\n\n # Create temporary directory for SMAC output (if none provided)\n if output_directory is None:\n # pylint: disable=consider-using-with\n try:\n self._temp_output_directory = TemporaryDirectory(ignore_cleanup_errors=True) # Argument added in Python 3.10\n except TypeError:\n self._temp_output_directory = TemporaryDirectory()\n output_directory = self._temp_output_directory.name\n\n scenario: Scenario = Scenario(\n self.optimizer_parameter_space,\n name=run_name,\n output_directory=Path(output_directory),\n deterministic=True,\n n_trials=max_trials,\n seed=seed or -1, # if -1, SMAC will generate a random seed internally\n n_workers=1, # Use a single thread for evaluating trials\n )\n intensifier: AbstractIntensifier = Optimizer_Smac.get_intensifier(scenario, max_config_calls=1)\n config_selector: ConfigSelector = ConfigSelector(scenario, retrain_after=1)\n\n initial_design: Optional[LatinHypercubeInitialDesign] = None\n if n_random_init is not None:\n initial_design = LatinHypercubeInitialDesign(scenario=scenario, n_configs=n_random_init)\n random_design: Optional[ProbabilityRandomDesign] = None\n if n_random_probability is not None:\n random_design = ProbabilityRandomDesign(probability=n_random_probability)\n\n self.base_optimizer = Optimizer_Smac(\n scenario,\n SmacOptimizer._dummy_target_func,\n initial_design=initial_design,\n intensifier=intensifier,\n random_design=random_design,\n config_selector=config_selector,\n overwrite=True,\n )\n\n def __del__(self) -> None:\n # Best-effort attempt to clean up, in case the user forgets to call .cleanup()\n self.cleanup()\n\n @staticmethod\n def _dummy_target_func(config: ConfigSpace.Configuration, seed: int = 0) -> None:\n \"\"\"Dummy target function for SMAC optimizer.\n\n Since we only use the ask-and-tell interface, this is never called.\n\n Parameters\n ----------\n config : ConfigSpace.Configuration\n Configuration to evaluate.\n\n seed : int\n Random seed to use for the target function. Not actually used.\n \"\"\"\n # NOTE: Providing a target function when using the ask-and-tell interface is an imperfection of the API\n # -- this planned to be fixed in some future release: https://github.com/automl/SMAC3/issues/946\n raise RuntimeError('This function should never be called.')\n\n def _register(self, configurations: pd.DataFrame, scores: pd.Series, context: Optional[pd.DataFrame] = None) -> None:\n \"\"\"Registers the given configurations and scores.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n scores : pd.Series\n Scores from running the configurations. The index is the same as the index of the configurations.\n\n context : pd.DataFrame\n Not Yet Implemented.\n \"\"\"\n from smac.runhistory import StatusType, TrialInfo, TrialValue # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n # Register each trial (one-by-one)\n for config, score in zip(self._to_configspace_configs(configurations), scores.tolist()):\n # Retrieve previously generated TrialInfo (returned by .ask()) or create new TrialInfo instance\n info: TrialInfo = self.trial_info_map.get(config, TrialInfo(config=config, seed=self.base_optimizer.scenario.seed))\n value: TrialValue = TrialValue(cost=score, time=0.0, status=StatusType.SUCCESS)\n self.base_optimizer.tell(info, value, save=False)\n\n # Save optimizer once we register all configs\n self.base_optimizer.optimizer.save()\n\n def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:\n \"\"\"Suggests a new configuration.\n\n Parameters\n ----------\n context : pd.DataFrame\n Not Yet Implemented.\n\n Returns\n -------\n configuration : pd.DataFrame\n Pandas dataframe with a single row. Column names are the parameter names.\n \"\"\"\n if TYPE_CHECKING:\n from smac.runhistory import TrialInfo # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n trial: TrialInfo = self.base_optimizer.ask()\n self.trial_info_map[trial.config] = trial\n return pd.DataFrame([trial.config], columns=list(self.optimizer_parameter_space.keys()))\n\n def register_pending(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> None:\n raise NotImplementedError()\n\n def surrogate_predict(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n from smac.utils.configspace import convert_configurations_to_array # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if len(self._observations) < self.base_optimizer._initial_design._n_configs:\n raise RuntimeError('Surrogate model can make predictions *only* after all initial points have been evaluated')\n if self.base_optimizer._config_selector._model is None:\n raise RuntimeError('Surrogate model is not yet trained')\n\n configs: npt.NDArray = convert_configurations_to_array(self._to_configspace_configs(configurations))\n mean_predictions, _ = self.base_optimizer._config_selector._model.predict(configs)\n return mean_predictions.reshape(-1,)\n\n def acquisition_function(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if self.base_optimizer._config_selector._acquisition_function is None:\n raise RuntimeError('Acquisition function is not yet initialized')\n\n configs: list = self._to_configspace_configs(configurations)\n return self.base_optimizer._config_selector._acquisition_function(configs).reshape(-1,)\n\n def cleanup(self) -> None:\n if self._temp_output_directory is not None:\n self._temp_output_directory.cleanup()\n self._temp_output_directory = None\n\n def _to_configspace_configs(self, configurations: pd.DataFrame) -> List[ConfigSpace.Configuration]:\n \"\"\"Convert a dataframe of configurations to a list of ConfigSpace configurations.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n Returns\n -------\n configurations : list\n List of ConfigSpace configurations.\n \"\"\"\n return [\n ConfigSpace.Configuration(self.optimizer_parameter_space, values=config.to_dict())\n for (_, config) in configurations.iterrows()\n ]\n", "path": "mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py"}]} | 3,640 | 144 |
gh_patches_debug_4620 | rasdani/github-patches | git_diff | getmoto__moto-1969 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Listing CloudFormation stacks should include deleted stacks
Using the AWS API directly returns stacks with a status `DELETE_COMPLETE`; using moto those stacks are not returned.
The existing unit tests actually test for this incorrect behaviour:
https://github.com/spulec/moto/blob/4a275cc/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py#L516-L518
The tests should be updated and the `list_stacks` method on the backend should include the `self.deleted_stacks` data (https://github.com/spulec/moto/blob/4a275cc/moto/cloudformation/models.py#L225-L226)
</issue>
<code>
[start of moto/cloudformation/models.py]
1 from __future__ import unicode_literals
2 from datetime import datetime
3 import json
4 import yaml
5 import uuid
6
7 import boto.cloudformation
8 from moto.compat import OrderedDict
9 from moto.core import BaseBackend, BaseModel
10
11 from .parsing import ResourceMap, OutputMap
12 from .utils import (
13 generate_changeset_id,
14 generate_stack_id,
15 yaml_tag_constructor,
16 )
17 from .exceptions import ValidationError
18
19
20 class FakeStack(BaseModel):
21
22 def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False):
23 self.stack_id = stack_id
24 self.name = name
25 self.template = template
26 self._parse_template()
27 self.parameters = parameters
28 self.region_name = region_name
29 self.notification_arns = notification_arns if notification_arns else []
30 self.role_arn = role_arn
31 self.tags = tags if tags else {}
32 self.events = []
33 if create_change_set:
34 self._add_stack_event("REVIEW_IN_PROGRESS",
35 resource_status_reason="User Initiated")
36 else:
37 self._add_stack_event("CREATE_IN_PROGRESS",
38 resource_status_reason="User Initiated")
39
40 self.description = self.template_dict.get('Description')
41 self.cross_stack_resources = cross_stack_resources or {}
42 self.resource_map = self._create_resource_map()
43 self.output_map = self._create_output_map()
44 self._add_stack_event("CREATE_COMPLETE")
45 self.status = 'CREATE_COMPLETE'
46
47 def _create_resource_map(self):
48 resource_map = ResourceMap(
49 self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict, self.cross_stack_resources)
50 resource_map.create()
51 return resource_map
52
53 def _create_output_map(self):
54 output_map = OutputMap(self.resource_map, self.template_dict, self.stack_id)
55 output_map.create()
56 return output_map
57
58 def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):
59 self.events.append(FakeEvent(
60 stack_id=self.stack_id,
61 stack_name=self.name,
62 logical_resource_id=self.name,
63 physical_resource_id=self.stack_id,
64 resource_type="AWS::CloudFormation::Stack",
65 resource_status=resource_status,
66 resource_status_reason=resource_status_reason,
67 resource_properties=resource_properties,
68 ))
69
70 def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):
71 # not used yet... feel free to help yourself
72 resource = self.resource_map[logical_resource_id]
73 self.events.append(FakeEvent(
74 stack_id=self.stack_id,
75 stack_name=self.name,
76 logical_resource_id=logical_resource_id,
77 physical_resource_id=resource.physical_resource_id,
78 resource_type=resource.type,
79 resource_status=resource_status,
80 resource_status_reason=resource_status_reason,
81 resource_properties=resource_properties,
82 ))
83
84 def _parse_template(self):
85 yaml.add_multi_constructor('', yaml_tag_constructor)
86 try:
87 self.template_dict = yaml.load(self.template)
88 except yaml.parser.ParserError:
89 self.template_dict = json.loads(self.template)
90
91 @property
92 def stack_parameters(self):
93 return self.resource_map.resolved_parameters
94
95 @property
96 def stack_resources(self):
97 return self.resource_map.values()
98
99 @property
100 def stack_outputs(self):
101 return self.output_map.values()
102
103 @property
104 def exports(self):
105 return self.output_map.exports
106
107 def update(self, template, role_arn=None, parameters=None, tags=None):
108 self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated")
109 self.template = template
110 self._parse_template()
111 self.resource_map.update(self.template_dict, parameters)
112 self.output_map = self._create_output_map()
113 self._add_stack_event("UPDATE_COMPLETE")
114 self.status = "UPDATE_COMPLETE"
115 self.role_arn = role_arn
116 # only overwrite tags if passed
117 if tags is not None:
118 self.tags = tags
119 # TODO: update tags in the resource map
120
121 def delete(self):
122 self._add_stack_event("DELETE_IN_PROGRESS",
123 resource_status_reason="User Initiated")
124 self.resource_map.delete()
125 self._add_stack_event("DELETE_COMPLETE")
126 self.status = "DELETE_COMPLETE"
127
128
129 class FakeEvent(BaseModel):
130
131 def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):
132 self.stack_id = stack_id
133 self.stack_name = stack_name
134 self.logical_resource_id = logical_resource_id
135 self.physical_resource_id = physical_resource_id
136 self.resource_type = resource_type
137 self.resource_status = resource_status
138 self.resource_status_reason = resource_status_reason
139 self.resource_properties = resource_properties
140 self.timestamp = datetime.utcnow()
141 self.event_id = uuid.uuid4()
142
143
144 class CloudFormationBackend(BaseBackend):
145
146 def __init__(self):
147 self.stacks = OrderedDict()
148 self.deleted_stacks = {}
149 self.exports = OrderedDict()
150 self.change_sets = OrderedDict()
151
152 def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False):
153 stack_id = generate_stack_id(name)
154 new_stack = FakeStack(
155 stack_id=stack_id,
156 name=name,
157 template=template,
158 parameters=parameters,
159 region_name=region_name,
160 notification_arns=notification_arns,
161 tags=tags,
162 role_arn=role_arn,
163 cross_stack_resources=self.exports,
164 create_change_set=create_change_set,
165 )
166 self.stacks[stack_id] = new_stack
167 self._validate_export_uniqueness(new_stack)
168 for export in new_stack.exports:
169 self.exports[export.name] = export
170 return new_stack
171
172 def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):
173 if change_set_type == 'UPDATE':
174 stacks = self.stacks.values()
175 stack = None
176 for s in stacks:
177 if s.name == stack_name:
178 stack = s
179 if stack is None:
180 raise ValidationError(stack_name)
181
182 else:
183 stack = self.create_stack(stack_name, template, parameters,
184 region_name, notification_arns, tags,
185 role_arn, create_change_set=True)
186 change_set_id = generate_changeset_id(change_set_name, region_name)
187 self.stacks[change_set_name] = {'Id': change_set_id,
188 'StackId': stack.stack_id}
189 self.change_sets[change_set_id] = stack
190 return change_set_id, stack.stack_id
191
192 def execute_change_set(self, change_set_name, stack_name=None):
193 stack = None
194 if change_set_name in self.change_sets:
195 # This means arn was passed in
196 stack = self.change_sets[change_set_name]
197 else:
198 for cs in self.change_sets:
199 if self.change_sets[cs].name == change_set_name:
200 stack = self.change_sets[cs]
201 if stack is None:
202 raise ValidationError(stack_name)
203 if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS':
204 stack._add_stack_event('CREATE_COMPLETE')
205 else:
206 stack._add_stack_event('UPDATE_IN_PROGRESS')
207 stack._add_stack_event('UPDATE_COMPLETE')
208 return True
209
210 def describe_stacks(self, name_or_stack_id):
211 stacks = self.stacks.values()
212 if name_or_stack_id:
213 for stack in stacks:
214 if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:
215 return [stack]
216 if self.deleted_stacks:
217 deleted_stacks = self.deleted_stacks.values()
218 for stack in deleted_stacks:
219 if stack.stack_id == name_or_stack_id:
220 return [stack]
221 raise ValidationError(name_or_stack_id)
222 else:
223 return list(stacks)
224
225 def list_stacks(self):
226 return self.stacks.values()
227
228 def get_stack(self, name_or_stack_id):
229 all_stacks = dict(self.deleted_stacks, **self.stacks)
230 if name_or_stack_id in all_stacks:
231 # Lookup by stack id - deleted stacks incldued
232 return all_stacks[name_or_stack_id]
233 else:
234 # Lookup by stack name - undeleted stacks only
235 for stack in self.stacks.values():
236 if stack.name == name_or_stack_id:
237 return stack
238
239 def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):
240 stack = self.get_stack(name)
241 stack.update(template, role_arn, parameters=parameters, tags=tags)
242 return stack
243
244 def list_stack_resources(self, stack_name_or_id):
245 stack = self.get_stack(stack_name_or_id)
246 return stack.stack_resources
247
248 def delete_stack(self, name_or_stack_id):
249 if name_or_stack_id in self.stacks:
250 # Delete by stack id
251 stack = self.stacks.pop(name_or_stack_id, None)
252 stack.delete()
253 self.deleted_stacks[stack.stack_id] = stack
254 [self.exports.pop(export.name) for export in stack.exports]
255 return self.stacks.pop(name_or_stack_id, None)
256 else:
257 # Delete by stack name
258 for stack in list(self.stacks.values()):
259 if stack.name == name_or_stack_id:
260 self.delete_stack(stack.stack_id)
261
262 def list_exports(self, token):
263 all_exports = list(self.exports.values())
264 if token is None:
265 exports = all_exports[0:100]
266 next_token = '100' if len(all_exports) > 100 else None
267 else:
268 token = int(token)
269 exports = all_exports[token:token + 100]
270 next_token = str(token + 100) if len(all_exports) > token + 100 else None
271 return exports, next_token
272
273 def _validate_export_uniqueness(self, stack):
274 new_stack_export_names = [x.name for x in stack.exports]
275 export_names = self.exports.keys()
276 if not set(export_names).isdisjoint(new_stack_export_names):
277 raise ValidationError(stack.stack_id, message='Export names must be unique across a given region')
278
279
280 cloudformation_backends = {}
281 for region in boto.cloudformation.regions():
282 cloudformation_backends[region.name] = CloudFormationBackend()
283
[end of moto/cloudformation/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py
--- a/moto/cloudformation/models.py
+++ b/moto/cloudformation/models.py
@@ -223,7 +223,11 @@
return list(stacks)
def list_stacks(self):
- return self.stacks.values()
+ return [
+ v for v in self.stacks.values()
+ ] + [
+ v for v in self.deleted_stacks.values()
+ ]
def get_stack(self, name_or_stack_id):
all_stacks = dict(self.deleted_stacks, **self.stacks)
| {"golden_diff": "diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py\n--- a/moto/cloudformation/models.py\n+++ b/moto/cloudformation/models.py\n@@ -223,7 +223,11 @@\n return list(stacks)\n \n def list_stacks(self):\n- return self.stacks.values()\n+ return [\n+ v for v in self.stacks.values()\n+ ] + [\n+ v for v in self.deleted_stacks.values()\n+ ]\n \n def get_stack(self, name_or_stack_id):\n all_stacks = dict(self.deleted_stacks, **self.stacks)\n", "issue": "Listing CloudFormation stacks should include deleted stacks\nUsing the AWS API directly returns stacks with a status `DELETE_COMPLETE`; using moto those stacks are not returned.\r\n\r\nThe existing unit tests actually test for this incorrect behaviour:\r\nhttps://github.com/spulec/moto/blob/4a275cc/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py#L516-L518\r\n\r\nThe tests should be updated and the `list_stacks` method on the backend should include the `self.deleted_stacks` data (https://github.com/spulec/moto/blob/4a275cc/moto/cloudformation/models.py#L225-L226)\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom datetime import datetime\nimport json\nimport yaml\nimport uuid\n\nimport boto.cloudformation\nfrom moto.compat import OrderedDict\nfrom moto.core import BaseBackend, BaseModel\n\nfrom .parsing import ResourceMap, OutputMap\nfrom .utils import (\n generate_changeset_id,\n generate_stack_id,\n yaml_tag_constructor,\n)\nfrom .exceptions import ValidationError\n\n\nclass FakeStack(BaseModel):\n\n def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False):\n self.stack_id = stack_id\n self.name = name\n self.template = template\n self._parse_template()\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n self.role_arn = role_arn\n self.tags = tags if tags else {}\n self.events = []\n if create_change_set:\n self._add_stack_event(\"REVIEW_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n else:\n self._add_stack_event(\"CREATE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n\n self.description = self.template_dict.get('Description')\n self.cross_stack_resources = cross_stack_resources or {}\n self.resource_map = self._create_resource_map()\n self.output_map = self._create_output_map()\n self._add_stack_event(\"CREATE_COMPLETE\")\n self.status = 'CREATE_COMPLETE'\n\n def _create_resource_map(self):\n resource_map = ResourceMap(\n self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict, self.cross_stack_resources)\n resource_map.create()\n return resource_map\n\n def _create_output_map(self):\n output_map = OutputMap(self.resource_map, self.template_dict, self.stack_id)\n output_map.create()\n return output_map\n\n def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=self.name,\n physical_resource_id=self.stack_id,\n resource_type=\"AWS::CloudFormation::Stack\",\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):\n # not used yet... feel free to help yourself\n resource = self.resource_map[logical_resource_id]\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=logical_resource_id,\n physical_resource_id=resource.physical_resource_id,\n resource_type=resource.type,\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _parse_template(self):\n yaml.add_multi_constructor('', yaml_tag_constructor)\n try:\n self.template_dict = yaml.load(self.template)\n except yaml.parser.ParserError:\n self.template_dict = json.loads(self.template)\n\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\n\n @property\n def stack_resources(self):\n return self.resource_map.values()\n\n @property\n def stack_outputs(self):\n return self.output_map.values()\n\n @property\n def exports(self):\n return self.output_map.exports\n\n def update(self, template, role_arn=None, parameters=None, tags=None):\n self._add_stack_event(\"UPDATE_IN_PROGRESS\", resource_status_reason=\"User Initiated\")\n self.template = template\n self._parse_template()\n self.resource_map.update(self.template_dict, parameters)\n self.output_map = self._create_output_map()\n self._add_stack_event(\"UPDATE_COMPLETE\")\n self.status = \"UPDATE_COMPLETE\"\n self.role_arn = role_arn\n # only overwrite tags if passed\n if tags is not None:\n self.tags = tags\n # TODO: update tags in the resource map\n\n def delete(self):\n self._add_stack_event(\"DELETE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n self.resource_map.delete()\n self._add_stack_event(\"DELETE_COMPLETE\")\n self.status = \"DELETE_COMPLETE\"\n\n\nclass FakeEvent(BaseModel):\n\n def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):\n self.stack_id = stack_id\n self.stack_name = stack_name\n self.logical_resource_id = logical_resource_id\n self.physical_resource_id = physical_resource_id\n self.resource_type = resource_type\n self.resource_status = resource_status\n self.resource_status_reason = resource_status_reason\n self.resource_properties = resource_properties\n self.timestamp = datetime.utcnow()\n self.event_id = uuid.uuid4()\n\n\nclass CloudFormationBackend(BaseBackend):\n\n def __init__(self):\n self.stacks = OrderedDict()\n self.deleted_stacks = {}\n self.exports = OrderedDict()\n self.change_sets = OrderedDict()\n\n def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False):\n stack_id = generate_stack_id(name)\n new_stack = FakeStack(\n stack_id=stack_id,\n name=name,\n template=template,\n parameters=parameters,\n region_name=region_name,\n notification_arns=notification_arns,\n tags=tags,\n role_arn=role_arn,\n cross_stack_resources=self.exports,\n create_change_set=create_change_set,\n )\n self.stacks[stack_id] = new_stack\n self._validate_export_uniqueness(new_stack)\n for export in new_stack.exports:\n self.exports[export.name] = export\n return new_stack\n\n def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):\n if change_set_type == 'UPDATE':\n stacks = self.stacks.values()\n stack = None\n for s in stacks:\n if s.name == stack_name:\n stack = s\n if stack is None:\n raise ValidationError(stack_name)\n\n else:\n stack = self.create_stack(stack_name, template, parameters,\n region_name, notification_arns, tags,\n role_arn, create_change_set=True)\n change_set_id = generate_changeset_id(change_set_name, region_name)\n self.stacks[change_set_name] = {'Id': change_set_id,\n 'StackId': stack.stack_id}\n self.change_sets[change_set_id] = stack\n return change_set_id, stack.stack_id\n\n def execute_change_set(self, change_set_name, stack_name=None):\n stack = None\n if change_set_name in self.change_sets:\n # This means arn was passed in\n stack = self.change_sets[change_set_name]\n else:\n for cs in self.change_sets:\n if self.change_sets[cs].name == change_set_name:\n stack = self.change_sets[cs]\n if stack is None:\n raise ValidationError(stack_name)\n if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS':\n stack._add_stack_event('CREATE_COMPLETE')\n else:\n stack._add_stack_event('UPDATE_IN_PROGRESS')\n stack._add_stack_event('UPDATE_COMPLETE')\n return True\n\n def describe_stacks(self, name_or_stack_id):\n stacks = self.stacks.values()\n if name_or_stack_id:\n for stack in stacks:\n if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:\n return [stack]\n if self.deleted_stacks:\n deleted_stacks = self.deleted_stacks.values()\n for stack in deleted_stacks:\n if stack.stack_id == name_or_stack_id:\n return [stack]\n raise ValidationError(name_or_stack_id)\n else:\n return list(stacks)\n\n def list_stacks(self):\n return self.stacks.values()\n\n def get_stack(self, name_or_stack_id):\n all_stacks = dict(self.deleted_stacks, **self.stacks)\n if name_or_stack_id in all_stacks:\n # Lookup by stack id - deleted stacks incldued\n return all_stacks[name_or_stack_id]\n else:\n # Lookup by stack name - undeleted stacks only\n for stack in self.stacks.values():\n if stack.name == name_or_stack_id:\n return stack\n\n def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):\n stack = self.get_stack(name)\n stack.update(template, role_arn, parameters=parameters, tags=tags)\n return stack\n\n def list_stack_resources(self, stack_name_or_id):\n stack = self.get_stack(stack_name_or_id)\n return stack.stack_resources\n\n def delete_stack(self, name_or_stack_id):\n if name_or_stack_id in self.stacks:\n # Delete by stack id\n stack = self.stacks.pop(name_or_stack_id, None)\n stack.delete()\n self.deleted_stacks[stack.stack_id] = stack\n [self.exports.pop(export.name) for export in stack.exports]\n return self.stacks.pop(name_or_stack_id, None)\n else:\n # Delete by stack name\n for stack in list(self.stacks.values()):\n if stack.name == name_or_stack_id:\n self.delete_stack(stack.stack_id)\n\n def list_exports(self, token):\n all_exports = list(self.exports.values())\n if token is None:\n exports = all_exports[0:100]\n next_token = '100' if len(all_exports) > 100 else None\n else:\n token = int(token)\n exports = all_exports[token:token + 100]\n next_token = str(token + 100) if len(all_exports) > token + 100 else None\n return exports, next_token\n\n def _validate_export_uniqueness(self, stack):\n new_stack_export_names = [x.name for x in stack.exports]\n export_names = self.exports.keys()\n if not set(export_names).isdisjoint(new_stack_export_names):\n raise ValidationError(stack.stack_id, message='Export names must be unique across a given region')\n\n\ncloudformation_backends = {}\nfor region in boto.cloudformation.regions():\n cloudformation_backends[region.name] = CloudFormationBackend()\n", "path": "moto/cloudformation/models.py"}]} | 3,703 | 136 |
gh_patches_debug_33921 | rasdani/github-patches | git_diff | coqui-ai__TTS-1227 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ParallelWaveGAN config should be adjusted
Hi,
I have tried training with the current default config and it drops the learning rate too fast and the model converges to generate noise. Because when `scheduler_after_epoch=False`, `ExponentialLR` with `gamma=0.999` will cause the learning rate to reach 0.000 in 10k steps.
Config from the original paper are:
```
batch_size=8,
stft_loss_weight=1.0,
mse_G_loss_weight=4.0,
steps_to_start_discriminator=100000,
lr_gen=0.0001,
lr_disc=0.00005,
lr_scheduler_gen="StepLR",
lr_scheduler_gen_params={"gamma": 0.5, "step_size": 200000, "last_epoch": -1},
lr_scheduler_disc="StepLR",
lr_scheduler_disc_params={"gamma": 0.5, "step_size": 200000, "last_epoch": -1},
scheduler_after_epoch=False,
```
It is also possible to use ExponentialLR with some float rounding error:
```
lr_scheduler_gen="ExponentialLR", # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
lr_scheduler_gen_params={"gamma": 0.5**(1/200000), "last_epoch": -1},
lr_scheduler_disc="ExponentialLR", # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
lr_scheduler_disc_params={"gamma": 0.5**(1/200000), "last_epoch": -1},
```
With more GPU memory the batch_size can be increased and steps reduced.
</issue>
<code>
[start of TTS/vocoder/configs/parallel_wavegan_config.py]
1 from dataclasses import dataclass, field
2
3 from .shared_configs import BaseGANVocoderConfig
4
5
6 @dataclass
7 class ParallelWaveganConfig(BaseGANVocoderConfig):
8 """Defines parameters for ParallelWavegan vocoder.
9
10 Args:
11 model (str):
12 Model name used for selecting the right configuration at initialization. Defaults to `gan`.
13 discriminator_model (str): One of the discriminators from `TTS.vocoder.models.*_discriminator`. Defaults to
14 'parallel_wavegan_discriminator`.
15 discriminator_model_params (dict): The discriminator model kwargs. Defaults to
16 '{"num_layers": 10}`
17 generator_model (str): One of the generators from TTS.vocoder.models.*`. Every other non-GAN vocoder model is
18 considered as a generator too. Defaults to `parallel_wavegan_generator`.
19 generator_model_param (dict):
20 The generator model kwargs. Defaults to `{"upsample_factors": [4, 4, 4, 4], "stacks": 3, "num_res_blocks": 30}`.
21 batch_size (int):
22 Batch size used at training. Larger values use more memory. Defaults to 16.
23 seq_len (int):
24 Audio segment length used at training. Larger values use more memory. Defaults to 8192.
25 pad_short (int):
26 Additional padding applied to the audio samples shorter than `seq_len`. Defaults to 0.
27 use_noise_augment (bool):
28 enable / disable random noise added to the input waveform. The noise is added after computing the
29 features. Defaults to True.
30 use_cache (bool):
31 enable / disable in memory caching of the computed features. It can cause OOM error if the system RAM is
32 not large enough. Defaults to True.
33 steps_to_start_discriminator (int):
34 Number of steps required to start training the discriminator. Defaults to 0.
35 use_stft_loss (bool):`
36 enable / disable use of STFT loss originally used by ParallelWaveGAN model. Defaults to True.
37 use_subband_stft (bool):
38 enable / disable use of subband loss computation originally used by MultiBandMelgan model. Defaults to True.
39 use_mse_gan_loss (bool):
40 enable / disable using Mean Squeare Error GAN loss. Defaults to True.
41 use_hinge_gan_loss (bool):
42 enable / disable using Hinge GAN loss. You should choose either Hinge or MSE loss for training GAN models.
43 Defaults to False.
44 use_feat_match_loss (bool):
45 enable / disable using Feature Matching loss originally used by MelGAN model. Defaults to True.
46 use_l1_spec_loss (bool):
47 enable / disable using L1 spectrogram loss originally used by HifiGAN model. Defaults to False.
48 stft_loss_params (dict): STFT loss parameters. Default to
49 `{"n_ffts": [1024, 2048, 512], "hop_lengths": [120, 240, 50], "win_lengths": [600, 1200, 240]}`
50 stft_loss_weight (float): STFT loss weight that multiplies the computed loss before summing up the total
51 model loss. Defaults to 0.5.
52 subband_stft_loss_weight (float):
53 Subband STFT loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.
54 mse_G_loss_weight (float):
55 MSE generator loss weight that multiplies the computed loss before summing up the total loss. faults to 2.5.
56 hinge_G_loss_weight (float):
57 Hinge generator loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.
58 feat_match_loss_weight (float):
59 Feature matching loss weight that multiplies the computed loss before summing up the total loss. faults to 0.
60 l1_spec_loss_weight (float):
61 L1 spectrogram loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.
62 lr_gen (float):
63 Generator model initial learning rate. Defaults to 0.0002.
64 lr_disc (float):
65 Discriminator model initial learning rate. Defaults to 0.0002.
66 optimizer (torch.optim.Optimizer):
67 Optimizer used for the training. Defaults to `AdamW`.
68 optimizer_params (dict):
69 Optimizer kwargs. Defaults to `{"betas": [0.8, 0.99], "weight_decay": 0.0}`
70 lr_scheduler_gen (torch.optim.Scheduler):
71 Learning rate scheduler for the generator. Defaults to `ExponentialLR`.
72 lr_scheduler_gen_params (dict):
73 Parameters for the generator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`.
74 lr_scheduler_disc (torch.optim.Scheduler):
75 Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.
76 lr_scheduler_dict_params (dict):
77 Parameters for the discriminator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`.
78 """
79
80 model: str = "parallel_wavegan"
81
82 # Model specific params
83 discriminator_model: str = "parallel_wavegan_discriminator"
84 discriminator_model_params: dict = field(default_factory=lambda: {"num_layers": 10})
85 generator_model: str = "parallel_wavegan_generator"
86 generator_model_params: dict = field(
87 default_factory=lambda: {"upsample_factors": [4, 4, 4, 4], "stacks": 3, "num_res_blocks": 30}
88 )
89
90 # Training - overrides
91 batch_size: int = 6
92 seq_len: int = 25600
93 pad_short: int = 2000
94 use_noise_augment: bool = False
95 use_cache: bool = True
96 steps_to_start_discriminator: int = 200000
97
98 # LOSS PARAMETERS - overrides
99 use_stft_loss: bool = True
100 use_subband_stft_loss: bool = False
101 use_mse_gan_loss: bool = True
102 use_hinge_gan_loss: bool = False
103 use_feat_match_loss: bool = False # requires MelGAN Discriminators (MelGAN and HifiGAN)
104 use_l1_spec_loss: bool = False
105
106 stft_loss_params: dict = field(
107 default_factory=lambda: {
108 "n_ffts": [1024, 2048, 512],
109 "hop_lengths": [120, 240, 50],
110 "win_lengths": [600, 1200, 240],
111 }
112 )
113
114 # loss weights - overrides
115 stft_loss_weight: float = 0.5
116 subband_stft_loss_weight: float = 0
117 mse_G_loss_weight: float = 2.5
118 hinge_G_loss_weight: float = 0
119 feat_match_loss_weight: float = 0
120 l1_spec_loss_weight: float = 0
121
122 # optimizer overrides
123 lr_gen: float = 0.0002 # Initial learning rate.
124 lr_disc: float = 0.0002 # Initial learning rate.
125 optimizer: str = "AdamW"
126 optimizer_params: dict = field(default_factory=lambda: {"betas": [0.8, 0.99], "weight_decay": 0.0})
127 lr_scheduler_gen: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
128 lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
129 lr_scheduler_disc: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
130 lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
131
[end of TTS/vocoder/configs/parallel_wavegan_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/TTS/vocoder/configs/parallel_wavegan_config.py b/TTS/vocoder/configs/parallel_wavegan_config.py
--- a/TTS/vocoder/configs/parallel_wavegan_config.py
+++ b/TTS/vocoder/configs/parallel_wavegan_config.py
@@ -70,11 +70,11 @@
lr_scheduler_gen (torch.optim.Scheduler):
Learning rate scheduler for the generator. Defaults to `ExponentialLR`.
lr_scheduler_gen_params (dict):
- Parameters for the generator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`.
+ Parameters for the generator learning rate scheduler. Defaults to `{"gamma": 0.5, "step_size": 200000, "last_epoch": -1}`.
lr_scheduler_disc (torch.optim.Scheduler):
Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.
lr_scheduler_dict_params (dict):
- Parameters for the discriminator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`.
+ Parameters for the discriminator learning rate scheduler. Defaults to `{"gamma": 0.5, "step_size": 200000, "last_epoch": -1}`.
"""
model: str = "parallel_wavegan"
@@ -124,7 +124,8 @@
lr_disc: float = 0.0002 # Initial learning rate.
optimizer: str = "AdamW"
optimizer_params: dict = field(default_factory=lambda: {"betas": [0.8, 0.99], "weight_decay": 0.0})
- lr_scheduler_gen: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
- lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
- lr_scheduler_disc: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
- lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
+ lr_scheduler_gen: str = "StepLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
+ lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.5, "step_size": 200000, "last_epoch": -1})
+ lr_scheduler_disc: str = "StepLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
+ lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.5, "step_size": 200000, "last_epoch": -1})
+ scheduler_after_epoch: bool = False
| {"golden_diff": "diff --git a/TTS/vocoder/configs/parallel_wavegan_config.py b/TTS/vocoder/configs/parallel_wavegan_config.py\n--- a/TTS/vocoder/configs/parallel_wavegan_config.py\n+++ b/TTS/vocoder/configs/parallel_wavegan_config.py\n@@ -70,11 +70,11 @@\n lr_scheduler_gen (torch.optim.Scheduler):\n Learning rate scheduler for the generator. Defaults to `ExponentialLR`.\n lr_scheduler_gen_params (dict):\n- Parameters for the generator learning rate scheduler. Defaults to `{\"gamma\": 0.999, \"last_epoch\": -1}`.\n+ Parameters for the generator learning rate scheduler. Defaults to `{\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1}`.\n lr_scheduler_disc (torch.optim.Scheduler):\n Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.\n lr_scheduler_dict_params (dict):\n- Parameters for the discriminator learning rate scheduler. Defaults to `{\"gamma\": 0.999, \"last_epoch\": -1}`.\n+ Parameters for the discriminator learning rate scheduler. Defaults to `{\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1}`.\n \"\"\"\n \n model: str = \"parallel_wavegan\"\n@@ -124,7 +124,8 @@\n lr_disc: float = 0.0002 # Initial learning rate.\n optimizer: str = \"AdamW\"\n optimizer_params: dict = field(default_factory=lambda: {\"betas\": [0.8, 0.99], \"weight_decay\": 0.0})\n- lr_scheduler_gen: str = \"ExponentialLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n- lr_scheduler_gen_params: dict = field(default_factory=lambda: {\"gamma\": 0.999, \"last_epoch\": -1})\n- lr_scheduler_disc: str = \"ExponentialLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n- lr_scheduler_disc_params: dict = field(default_factory=lambda: {\"gamma\": 0.999, \"last_epoch\": -1})\n+ lr_scheduler_gen: str = \"StepLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n+ lr_scheduler_gen_params: dict = field(default_factory=lambda: {\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1})\n+ lr_scheduler_disc: str = \"StepLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n+ lr_scheduler_disc_params: dict = field(default_factory=lambda: {\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1})\n+ scheduler_after_epoch: bool = False\n", "issue": "ParallelWaveGAN config should be adjusted\nHi,\r\n\r\nI have tried training with the current default config and it drops the learning rate too fast and the model converges to generate noise. Because when `scheduler_after_epoch=False`, `ExponentialLR` with `gamma=0.999` will cause the learning rate to reach 0.000 in 10k steps.\r\n\r\nConfig from the original paper are:\r\n ```\r\n batch_size=8,\r\n stft_loss_weight=1.0,\r\n mse_G_loss_weight=4.0,\r\n steps_to_start_discriminator=100000,\r\n lr_gen=0.0001,\r\n lr_disc=0.00005,\r\n lr_scheduler_gen=\"StepLR\",\r\n lr_scheduler_gen_params={\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1},\r\n lr_scheduler_disc=\"StepLR\",\r\n lr_scheduler_disc_params={\"gamma\": 0.5, \"step_size\": 200000, \"last_epoch\": -1},\r\n scheduler_after_epoch=False,\r\n```\r\n\r\nIt is also possible to use ExponentialLR with some float rounding error:\r\n```\r\n lr_scheduler_gen=\"ExponentialLR\", # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\r\n lr_scheduler_gen_params={\"gamma\": 0.5**(1/200000), \"last_epoch\": -1},\r\n lr_scheduler_disc=\"ExponentialLR\", # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\r\n lr_scheduler_disc_params={\"gamma\": 0.5**(1/200000), \"last_epoch\": -1},\r\n```\r\n\r\nWith more GPU memory the batch_size can be increased and steps reduced.\n", "before_files": [{"content": "from dataclasses import dataclass, field\n\nfrom .shared_configs import BaseGANVocoderConfig\n\n\n@dataclass\nclass ParallelWaveganConfig(BaseGANVocoderConfig):\n \"\"\"Defines parameters for ParallelWavegan vocoder.\n\n Args:\n model (str):\n Model name used for selecting the right configuration at initialization. Defaults to `gan`.\n discriminator_model (str): One of the discriminators from `TTS.vocoder.models.*_discriminator`. Defaults to\n 'parallel_wavegan_discriminator`.\n discriminator_model_params (dict): The discriminator model kwargs. Defaults to\n '{\"num_layers\": 10}`\n generator_model (str): One of the generators from TTS.vocoder.models.*`. Every other non-GAN vocoder model is\n considered as a generator too. Defaults to `parallel_wavegan_generator`.\n generator_model_param (dict):\n The generator model kwargs. Defaults to `{\"upsample_factors\": [4, 4, 4, 4], \"stacks\": 3, \"num_res_blocks\": 30}`.\n batch_size (int):\n Batch size used at training. Larger values use more memory. Defaults to 16.\n seq_len (int):\n Audio segment length used at training. Larger values use more memory. Defaults to 8192.\n pad_short (int):\n Additional padding applied to the audio samples shorter than `seq_len`. Defaults to 0.\n use_noise_augment (bool):\n enable / disable random noise added to the input waveform. The noise is added after computing the\n features. Defaults to True.\n use_cache (bool):\n enable / disable in memory caching of the computed features. It can cause OOM error if the system RAM is\n not large enough. Defaults to True.\n steps_to_start_discriminator (int):\n Number of steps required to start training the discriminator. Defaults to 0.\n use_stft_loss (bool):`\n enable / disable use of STFT loss originally used by ParallelWaveGAN model. Defaults to True.\n use_subband_stft (bool):\n enable / disable use of subband loss computation originally used by MultiBandMelgan model. Defaults to True.\n use_mse_gan_loss (bool):\n enable / disable using Mean Squeare Error GAN loss. Defaults to True.\n use_hinge_gan_loss (bool):\n enable / disable using Hinge GAN loss. You should choose either Hinge or MSE loss for training GAN models.\n Defaults to False.\n use_feat_match_loss (bool):\n enable / disable using Feature Matching loss originally used by MelGAN model. Defaults to True.\n use_l1_spec_loss (bool):\n enable / disable using L1 spectrogram loss originally used by HifiGAN model. Defaults to False.\n stft_loss_params (dict): STFT loss parameters. Default to\n `{\"n_ffts\": [1024, 2048, 512], \"hop_lengths\": [120, 240, 50], \"win_lengths\": [600, 1200, 240]}`\n stft_loss_weight (float): STFT loss weight that multiplies the computed loss before summing up the total\n model loss. Defaults to 0.5.\n subband_stft_loss_weight (float):\n Subband STFT loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n mse_G_loss_weight (float):\n MSE generator loss weight that multiplies the computed loss before summing up the total loss. faults to 2.5.\n hinge_G_loss_weight (float):\n Hinge generator loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n feat_match_loss_weight (float):\n Feature matching loss weight that multiplies the computed loss before summing up the total loss. faults to 0.\n l1_spec_loss_weight (float):\n L1 spectrogram loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0.\n lr_gen (float):\n Generator model initial learning rate. Defaults to 0.0002.\n lr_disc (float):\n Discriminator model initial learning rate. Defaults to 0.0002.\n optimizer (torch.optim.Optimizer):\n Optimizer used for the training. Defaults to `AdamW`.\n optimizer_params (dict):\n Optimizer kwargs. Defaults to `{\"betas\": [0.8, 0.99], \"weight_decay\": 0.0}`\n lr_scheduler_gen (torch.optim.Scheduler):\n Learning rate scheduler for the generator. Defaults to `ExponentialLR`.\n lr_scheduler_gen_params (dict):\n Parameters for the generator learning rate scheduler. Defaults to `{\"gamma\": 0.999, \"last_epoch\": -1}`.\n lr_scheduler_disc (torch.optim.Scheduler):\n Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`.\n lr_scheduler_dict_params (dict):\n Parameters for the discriminator learning rate scheduler. Defaults to `{\"gamma\": 0.999, \"last_epoch\": -1}`.\n \"\"\"\n\n model: str = \"parallel_wavegan\"\n\n # Model specific params\n discriminator_model: str = \"parallel_wavegan_discriminator\"\n discriminator_model_params: dict = field(default_factory=lambda: {\"num_layers\": 10})\n generator_model: str = \"parallel_wavegan_generator\"\n generator_model_params: dict = field(\n default_factory=lambda: {\"upsample_factors\": [4, 4, 4, 4], \"stacks\": 3, \"num_res_blocks\": 30}\n )\n\n # Training - overrides\n batch_size: int = 6\n seq_len: int = 25600\n pad_short: int = 2000\n use_noise_augment: bool = False\n use_cache: bool = True\n steps_to_start_discriminator: int = 200000\n\n # LOSS PARAMETERS - overrides\n use_stft_loss: bool = True\n use_subband_stft_loss: bool = False\n use_mse_gan_loss: bool = True\n use_hinge_gan_loss: bool = False\n use_feat_match_loss: bool = False # requires MelGAN Discriminators (MelGAN and HifiGAN)\n use_l1_spec_loss: bool = False\n\n stft_loss_params: dict = field(\n default_factory=lambda: {\n \"n_ffts\": [1024, 2048, 512],\n \"hop_lengths\": [120, 240, 50],\n \"win_lengths\": [600, 1200, 240],\n }\n )\n\n # loss weights - overrides\n stft_loss_weight: float = 0.5\n subband_stft_loss_weight: float = 0\n mse_G_loss_weight: float = 2.5\n hinge_G_loss_weight: float = 0\n feat_match_loss_weight: float = 0\n l1_spec_loss_weight: float = 0\n\n # optimizer overrides\n lr_gen: float = 0.0002 # Initial learning rate.\n lr_disc: float = 0.0002 # Initial learning rate.\n optimizer: str = \"AdamW\"\n optimizer_params: dict = field(default_factory=lambda: {\"betas\": [0.8, 0.99], \"weight_decay\": 0.0})\n lr_scheduler_gen: str = \"ExponentialLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n lr_scheduler_gen_params: dict = field(default_factory=lambda: {\"gamma\": 0.999, \"last_epoch\": -1})\n lr_scheduler_disc: str = \"ExponentialLR\" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html\n lr_scheduler_disc_params: dict = field(default_factory=lambda: {\"gamma\": 0.999, \"last_epoch\": -1})\n", "path": "TTS/vocoder/configs/parallel_wavegan_config.py"}]} | 2,990 | 658 |
gh_patches_debug_49498 | rasdani/github-patches | git_diff | pex-tool__pex-1516 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.55
On the docket:
+ [x] Add official support for Python 3.10 (#1512)
+ [x] Always register global options. (#1511)
+ [x] Fix RTD generation by pinning docutils low. (#1509)
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.54"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.54"
+__version__ = "2.1.55"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.54\"\n+__version__ = \"2.1.55\"\n", "issue": "Release 2.1.55\nOn the docket:\r\n+ [x] Add official support for Python 3.10 (#1512)\r\n+ [x] Always register global options. (#1511)\r\n+ [x] Fix RTD generation by pinning docutils low. (#1509)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.54\"\n", "path": "pex/version.py"}]} | 652 | 96 |
gh_patches_debug_16916 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-452 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC] Clarify original-dataframe mutation behavior in pyjanitor function docstrings
# Brief Description of Fix
Currently, some pyjanitor functions mutate the original dataframe and others return a copy. Solutions are heavily discussed in #79 but no conclusion has been reached. At the moment, it is unclear, without experimentation from the user, which behavior applies in each function.
In the interim, I propose to explicitly clarify this behavior in each function's docstring so the user has a clear idea regarding the function's mutating behavior. Below is a sample of what this could look like for `.clean_names()`:
"""
Clean column names.
Takes all column names, converts them to lowercase, then replaces all
spaces with underscores. <b>Does not mutate original dataframe.</b>
"""
Happy to add this line somewhere else in the docstring if inappropriate here.
- [Link to documentation page](https://pyjanitor.readthedocs.io/reference/index.html)
- [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/blob/dev/janitor/functions.py)
</issue>
<code>
[start of janitor/biology.py]
1 """
2 Biology and bioinformatics-oriented data cleaning functions.
3 """
4
5 import pandas as pd
6 import pandas_flavor as pf
7
8 from .utils import deprecated_alias, import_message
9
10 try:
11 from Bio import SeqIO
12 except ImportError:
13 import_message(
14 "biology", "biopython", "conda install -c conda-forge biopython"
15 )
16
17
18 @pf.register_dataframe_method
19 @deprecated_alias(col_name="column_name")
20 def join_fasta(
21 df: pd.DataFrame, filename: str, id_col: str, column_name
22 ) -> pd.DataFrame:
23 """
24 Convenience method to join in a FASTA file as a column.
25
26 This allows us to add the string sequence of a FASTA file as a new column
27 of data in the dataframe.
28
29 This function only attaches the string representation of the SeqRecord.Seq
30 object from Biopython. Does not attach the full SeqRecord. Alphabet is
31 also not stored, under the assumption that the data scientist has domain
32 knowledge of what kind of sequence is being read in (nucleotide vs. amino
33 acid.)
34
35 For more advanced functions, please use phylopandas.
36
37 :param df: A pandas DataFrame.
38 :param filename: Path to the FASTA file.
39 :param id_col: The column in the DataFrame that houses sequence IDs.
40 :param column_name: The name of the new column.
41 """
42 seqrecords = {
43 x.id: x.seq.__str__() for x in SeqIO.parse(filename, "fasta")
44 }
45 seq_col = [seqrecords[i] for i in df[id_col]]
46 df[column_name] = seq_col
47 return df
48
[end of janitor/biology.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/biology.py b/janitor/biology.py
--- a/janitor/biology.py
+++ b/janitor/biology.py
@@ -26,12 +26,14 @@
This allows us to add the string sequence of a FASTA file as a new column
of data in the dataframe.
- This function only attaches the string representation of the SeqRecord.Seq
+ This method only attaches the string representation of the SeqRecord.Seq
object from Biopython. Does not attach the full SeqRecord. Alphabet is
also not stored, under the assumption that the data scientist has domain
knowledge of what kind of sequence is being read in (nucleotide vs. amino
acid.)
+ This method mutates the original DataFrame.
+
For more advanced functions, please use phylopandas.
:param df: A pandas DataFrame.
| {"golden_diff": "diff --git a/janitor/biology.py b/janitor/biology.py\n--- a/janitor/biology.py\n+++ b/janitor/biology.py\n@@ -26,12 +26,14 @@\n This allows us to add the string sequence of a FASTA file as a new column\n of data in the dataframe.\n \n- This function only attaches the string representation of the SeqRecord.Seq\n+ This method only attaches the string representation of the SeqRecord.Seq\n object from Biopython. Does not attach the full SeqRecord. Alphabet is\n also not stored, under the assumption that the data scientist has domain\n knowledge of what kind of sequence is being read in (nucleotide vs. amino\n acid.)\n \n+ This method mutates the original DataFrame.\n+\n For more advanced functions, please use phylopandas.\n \n :param df: A pandas DataFrame.\n", "issue": "[DOC] Clarify original-dataframe mutation behavior in pyjanitor function docstrings\n# Brief Description of Fix\r\nCurrently, some pyjanitor functions mutate the original dataframe and others return a copy. Solutions are heavily discussed in #79 but no conclusion has been reached. At the moment, it is unclear, without experimentation from the user, which behavior applies in each function. \r\n\r\nIn the interim, I propose to explicitly clarify this behavior in each function's docstring so the user has a clear idea regarding the function's mutating behavior. Below is a sample of what this could look like for `.clean_names()`:\r\n\r\n\"\"\"\r\nClean column names.\r\n Takes all column names, converts them to lowercase, then replaces all\r\n spaces with underscores. <b>Does not mutate original dataframe.</b>\r\n\"\"\"\r\n\r\nHappy to add this line somewhere else in the docstring if inappropriate here. \r\n\r\n- [Link to documentation page](https://pyjanitor.readthedocs.io/reference/index.html)\r\n- [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/blob/dev/janitor/functions.py)\r\n\n", "before_files": [{"content": "\"\"\"\nBiology and bioinformatics-oriented data cleaning functions.\n\"\"\"\n\nimport pandas as pd\nimport pandas_flavor as pf\n\nfrom .utils import deprecated_alias, import_message\n\ntry:\n from Bio import SeqIO\nexcept ImportError:\n import_message(\n \"biology\", \"biopython\", \"conda install -c conda-forge biopython\"\n )\n\n\[email protected]_dataframe_method\n@deprecated_alias(col_name=\"column_name\")\ndef join_fasta(\n df: pd.DataFrame, filename: str, id_col: str, column_name\n) -> pd.DataFrame:\n \"\"\"\n Convenience method to join in a FASTA file as a column.\n\n This allows us to add the string sequence of a FASTA file as a new column\n of data in the dataframe.\n\n This function only attaches the string representation of the SeqRecord.Seq\n object from Biopython. Does not attach the full SeqRecord. Alphabet is\n also not stored, under the assumption that the data scientist has domain\n knowledge of what kind of sequence is being read in (nucleotide vs. amino\n acid.)\n\n For more advanced functions, please use phylopandas.\n\n :param df: A pandas DataFrame.\n :param filename: Path to the FASTA file.\n :param id_col: The column in the DataFrame that houses sequence IDs.\n :param column_name: The name of the new column.\n \"\"\"\n seqrecords = {\n x.id: x.seq.__str__() for x in SeqIO.parse(filename, \"fasta\")\n }\n seq_col = [seqrecords[i] for i in df[id_col]]\n df[column_name] = seq_col\n return df\n", "path": "janitor/biology.py"}]} | 1,208 | 198 |
gh_patches_debug_25388 | rasdani/github-patches | git_diff | internetarchive__openlibrary-6283 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix BWB Importbot Low Quality records
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
Importbot is importing low-quality records from unspecified source. Titles are public domain reprints (possibly print on demand) with keyword-stuffed titles.
### Evidence / Screenshot (if possible)
<img width="858" alt="Screenshot 2020-11-24 at 21 31 56" src="https://user-images.githubusercontent.com/17739465/100149057-e87a3b80-2e9d-11eb-9291-b49854aa65b7.png">
Example: https://openlibrary.org/search?q=adventures+of+sherlock+holmes+conan+doyle&mode=everything&page=3
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
Books have valid ISBNs and human editors should be allowed to add manually but these should be blocked from import by importbot.
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
@mekarpeles
</issue>
<code>
[start of scripts/partner_batch_imports.py]
1 """
2 Process partner bibliographic csv data into importable json book
3 records and then batch submit into the ImportBot
4 `import_item` table (http://openlibrary.org/admin/imports)
5 which queues items to be imported via the
6 Open Library JSON import API: https://openlibrary.org/api/import
7
8 To Run:
9
10 PYTHONPATH=. python ./scripts/partner_batch_imports.py /olsystem/etc/openlibrary.yml
11 """
12
13 import os
14 import re
15 import sys
16 import web
17 import datetime
18 from datetime import timedelta
19 import logging
20 import requests
21
22 from infogami import config # noqa: F401
23 from openlibrary.config import load_config
24 from openlibrary.core.imports import Batch
25 from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
26
27 logger = logging.getLogger("openlibrary.importer.bwb")
28
29 SCHEMA_URL = (
30 "https://raw.githubusercontent.com/internetarchive"
31 "/openlibrary-client/master/olclient/schemata/import.schema.json"
32 )
33
34
35 class Biblio:
36
37 ACTIVE_FIELDS = [
38 'title',
39 'isbn_13',
40 'publish_date',
41 'publishers',
42 'weight',
43 'authors',
44 'lc_classifications',
45 'pagination',
46 'languages',
47 'subjects',
48 'source_records',
49 ]
50 INACTIVE_FIELDS = [
51 "copyright",
52 "issn",
53 "doi",
54 "lccn",
55 "dewey",
56 "length",
57 "width",
58 "height",
59 ]
60 REQUIRED_FIELDS = requests.get(SCHEMA_URL).json()['required']
61
62 NONBOOK = """A2 AA AB AJ AVI AZ BK BM C3 CD CE CF CR CRM CRW CX D3 DA DD DF DI DL DO DR
63 DRM DRW DS DV EC FC FI FM FR FZ GB GC GM GR H3 H5 L3 L5 LP MAC MC MF MG MH ML MS MSX MZ
64 N64 NGA NGB NGC NGE NT OR OS PC PP PRP PS PSC PY QU RE RV SA SD SG SH SK SL SMD SN SO SO1
65 SO2 SR SU TA TB TR TS TY UX V35 V8 VC VD VE VF VK VM VN VO VP VS VU VY VZ WA WC WI WL WM
66 WP WT WX XL XZ ZF ZZ""".split()
67
68 def __init__(self, data):
69 self.isbn = data[124]
70 self.source_id = 'bwb:%s' % self.isbn
71 self.isbn_13 = [self.isbn]
72 self.title = data[10]
73 self.primary_format = data[6]
74 self.publish_date = data[20][:4] # YYYY, YYYYMMDD
75 self.publishers = [data[135]]
76 self.weight = data[39]
77 self.authors = self.contributors(data)
78 self.lc_classifications = [data[147]] if data[147] else []
79 self.pagination = data[36]
80 self.languages = [data[37].lower()]
81 self.source_records = [self.source_id]
82 self.subjects = [
83 s.capitalize().replace('_', ', ')
84 for s in data[91:100]
85 # + data[101:120]
86 # + data[153:158]
87 if s
88 ]
89
90 # Inactive fields
91 self.copyright = data[19]
92 self.issn = data[54]
93 self.doi = data[145]
94 self.lccn = data[146]
95 self.dewey = data[49]
96 # physical_dimensions
97 # e.g. "5.4 x 4.7 x 0.2 inches"
98 self.length, self.width, self.height = data[40:43]
99
100 # Assert importable
101 for field in self.REQUIRED_FIELDS + ['isbn_13']:
102 assert getattr(self, field), field
103 # This seems to be eliminating books too aggressively
104 #assert self.primary_format not in self.NONBOOK, f"{self.primary_format} is NONBOOK"
105
106 @staticmethod
107 def contributors(data):
108 def make_author(name, _, typ):
109 author = {'name': name}
110 if typ == 'X':
111 # set corporate contributor
112 author['entity_type'] = 'org'
113 # TODO: sort out contributor types
114 # AU = author
115 # ED = editor
116 return author
117
118 contributors = (
119 (data[21 + i * 3], data[22 + i * 3], data[23 + i * 3]) for i in range(5)
120 )
121
122 # form list of author dicts
123 authors = [make_author(*c) for c in contributors if c[0]]
124 return authors
125
126 def json(self):
127 return {
128 field: getattr(self, field)
129 for field in self.ACTIVE_FIELDS
130 if getattr(self, field)
131 }
132
133
134 def load_state(path, logfile):
135 """Retrieves starting point from logfile, if log exists
136
137 Takes as input a path which expands to an ordered candidate list
138 of bettworldbks* filenames to process, the location of the
139 logfile, and determines which of those files are remaining, as
140 well as what our offset is in that file.
141
142 e.g. if we request path containing f1, f2, f3 and our log
143 says f2,100 then we start our processing at f2 at the 100th line.
144
145 This assumes the script is being called w/ e.g.:
146 /1/var/tmp/imports/2021-08/Bibliographic/*/
147 """
148 filenames = sorted(
149 os.path.join(path, f) for f in os.listdir(path) if f.startswith("bettworldbks")
150 )
151 try:
152 with open(logfile) as fin:
153 active_fname, offset = next(fin).strip().split(',')
154 unfinished_filenames = filenames[filenames.index(active_fname) :]
155 return unfinished_filenames, int(offset)
156 except (ValueError, OSError):
157 return filenames, 0
158
159
160 def update_state(logfile, fname, line_num=0):
161 """Records the last file we began processing and the current line"""
162 with open(logfile, 'w') as fout:
163 fout.write(f'{fname},{line_num}\n')
164
165
166 def csv_to_ol_json_item(line):
167 """converts a line to a book item"""
168 try:
169 data = line.decode().strip().split('|')
170 except UnicodeDecodeError:
171 data = line.decode('ISO-8859-1').strip().split('|')
172
173 b = Biblio(data)
174 return {'ia_id': b.source_id, 'data': b.json()}
175
176
177 def batch_import(path, batch, batch_size=5000):
178 logfile = os.path.join(path, 'import.log')
179 filenames, offset = load_state(path, logfile)
180
181 for fname in filenames:
182 book_items = []
183 with open(fname, 'rb') as f:
184 logger.info(f"Processing: {fname} from line {offset}")
185 for line_num, line in enumerate(f):
186
187 # skip over already processed records
188 if offset:
189 if offset > line_num:
190 continue
191 offset = 0
192
193 try:
194 book_items.append(csv_to_ol_json_item(line))
195 except AssertionError as e:
196 logger.info(f"Error: {e} from {line}")
197
198 # If we have enough items, submit a batch
199 if not ((line_num + 1) % batch_size):
200 batch.add_items(book_items)
201 update_state(logfile, fname, line_num)
202 book_items = [] # clear added items
203
204 # Add any remaining book_items to batch
205 if book_items:
206 batch.add_items(book_items)
207 update_state(logfile, fname, line_num)
208
209
210 def main(ol_config: str, batch_path: str):
211 load_config(ol_config)
212
213 # Partner data is offset ~15 days from start of month
214 date = datetime.date.today() - timedelta(days=15)
215 batch_name = "%s-%04d%02d" % ('bwb', date.year, date.month)
216 batch = Batch.find(batch_name) or Batch.new(batch_name)
217 batch_import(batch_path, batch)
218
219
220 if __name__ == '__main__':
221 FnToCLI(main).run()
222
[end of scripts/partner_batch_imports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/partner_batch_imports.py b/scripts/partner_batch_imports.py
--- a/scripts/partner_batch_imports.py
+++ b/scripts/partner_batch_imports.py
@@ -173,6 +173,10 @@
b = Biblio(data)
return {'ia_id': b.source_id, 'data': b.json()}
+def is_low_quality_book(book_item):
+ """check if a book item is of low quality"""
+ return ("notebook" in book_item.title.casefold() and "independently published" in book_item.publisher.casefold())
+
def batch_import(path, batch, batch_size=5000):
logfile = os.path.join(path, 'import.log')
@@ -191,7 +195,9 @@
offset = 0
try:
- book_items.append(csv_to_ol_json_item(line))
+ book_item = csv_to_ol_json_item(line)
+ if not is_low_quality_book(book_item["data"]):
+ book_items.append(book_item)
except AssertionError as e:
logger.info(f"Error: {e} from {line}")
@@ -206,7 +212,6 @@
batch.add_items(book_items)
update_state(logfile, fname, line_num)
-
def main(ol_config: str, batch_path: str):
load_config(ol_config)
| {"golden_diff": "diff --git a/scripts/partner_batch_imports.py b/scripts/partner_batch_imports.py\n--- a/scripts/partner_batch_imports.py\n+++ b/scripts/partner_batch_imports.py\n@@ -173,6 +173,10 @@\n b = Biblio(data)\n return {'ia_id': b.source_id, 'data': b.json()}\n \n+def is_low_quality_book(book_item):\n+ \"\"\"check if a book item is of low quality\"\"\"\n+ return (\"notebook\" in book_item.title.casefold() and \"independently published\" in book_item.publisher.casefold())\n+\n \n def batch_import(path, batch, batch_size=5000):\n logfile = os.path.join(path, 'import.log')\n@@ -191,7 +195,9 @@\n offset = 0\n \n try:\n- book_items.append(csv_to_ol_json_item(line))\n+ book_item = csv_to_ol_json_item(line)\n+ if not is_low_quality_book(book_item[\"data\"]):\n+ book_items.append(book_item)\n except AssertionError as e:\n logger.info(f\"Error: {e} from {line}\")\n \n@@ -206,7 +212,6 @@\n batch.add_items(book_items)\n update_state(logfile, fname, line_num)\n \n-\n def main(ol_config: str, batch_path: str):\n load_config(ol_config)\n", "issue": "Fix BWB Importbot Low Quality records\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\nImportbot is importing low-quality records from unspecified source. Titles are public domain reprints (possibly print on demand) with keyword-stuffed titles. \r\n\r\n### Evidence / Screenshot (if possible)\r\n<img width=\"858\" alt=\"Screenshot 2020-11-24 at 21 31 56\" src=\"https://user-images.githubusercontent.com/17739465/100149057-e87a3b80-2e9d-11eb-9291-b49854aa65b7.png\">\r\n\r\nExample: https://openlibrary.org/search?q=adventures+of+sherlock+holmes+conan+doyle&mode=everything&page=3\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\nBooks have valid ISBNs and human editors should be allowed to add manually but these should be blocked from import by importbot.\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@mekarpeles \n", "before_files": [{"content": "\"\"\"\nProcess partner bibliographic csv data into importable json book\nrecords and then batch submit into the ImportBot\n`import_item` table (http://openlibrary.org/admin/imports)\nwhich queues items to be imported via the\nOpen Library JSON import API: https://openlibrary.org/api/import\n\nTo Run:\n\nPYTHONPATH=. python ./scripts/partner_batch_imports.py /olsystem/etc/openlibrary.yml\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport web\nimport datetime\nfrom datetime import timedelta\nimport logging\nimport requests\n\nfrom infogami import config # noqa: F401\nfrom openlibrary.config import load_config\nfrom openlibrary.core.imports import Batch\nfrom scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI\n\nlogger = logging.getLogger(\"openlibrary.importer.bwb\")\n\nSCHEMA_URL = (\n \"https://raw.githubusercontent.com/internetarchive\"\n \"/openlibrary-client/master/olclient/schemata/import.schema.json\"\n)\n\n\nclass Biblio:\n\n ACTIVE_FIELDS = [\n 'title',\n 'isbn_13',\n 'publish_date',\n 'publishers',\n 'weight',\n 'authors',\n 'lc_classifications',\n 'pagination',\n 'languages',\n 'subjects',\n 'source_records',\n ]\n INACTIVE_FIELDS = [\n \"copyright\",\n \"issn\",\n \"doi\",\n \"lccn\",\n \"dewey\",\n \"length\",\n \"width\",\n \"height\",\n ]\n REQUIRED_FIELDS = requests.get(SCHEMA_URL).json()['required']\n\n NONBOOK = \"\"\"A2 AA AB AJ AVI AZ BK BM C3 CD CE CF CR CRM CRW CX D3 DA DD DF DI DL DO DR\n DRM DRW DS DV EC FC FI FM FR FZ GB GC GM GR H3 H5 L3 L5 LP MAC MC MF MG MH ML MS MSX MZ\n N64 NGA NGB NGC NGE NT OR OS PC PP PRP PS PSC PY QU RE RV SA SD SG SH SK SL SMD SN SO SO1\n SO2 SR SU TA TB TR TS TY UX V35 V8 VC VD VE VF VK VM VN VO VP VS VU VY VZ WA WC WI WL WM\n WP WT WX XL XZ ZF ZZ\"\"\".split()\n\n def __init__(self, data):\n self.isbn = data[124]\n self.source_id = 'bwb:%s' % self.isbn\n self.isbn_13 = [self.isbn]\n self.title = data[10]\n self.primary_format = data[6]\n self.publish_date = data[20][:4] # YYYY, YYYYMMDD\n self.publishers = [data[135]]\n self.weight = data[39]\n self.authors = self.contributors(data)\n self.lc_classifications = [data[147]] if data[147] else []\n self.pagination = data[36]\n self.languages = [data[37].lower()]\n self.source_records = [self.source_id]\n self.subjects = [\n s.capitalize().replace('_', ', ')\n for s in data[91:100]\n # + data[101:120]\n # + data[153:158]\n if s\n ]\n\n # Inactive fields\n self.copyright = data[19]\n self.issn = data[54]\n self.doi = data[145]\n self.lccn = data[146]\n self.dewey = data[49]\n # physical_dimensions\n # e.g. \"5.4 x 4.7 x 0.2 inches\"\n self.length, self.width, self.height = data[40:43]\n\n # Assert importable\n for field in self.REQUIRED_FIELDS + ['isbn_13']:\n assert getattr(self, field), field\n # This seems to be eliminating books too aggressively\n #assert self.primary_format not in self.NONBOOK, f\"{self.primary_format} is NONBOOK\"\n\n @staticmethod\n def contributors(data):\n def make_author(name, _, typ):\n author = {'name': name}\n if typ == 'X':\n # set corporate contributor\n author['entity_type'] = 'org'\n # TODO: sort out contributor types\n # AU = author\n # ED = editor\n return author\n\n contributors = (\n (data[21 + i * 3], data[22 + i * 3], data[23 + i * 3]) for i in range(5)\n )\n\n # form list of author dicts\n authors = [make_author(*c) for c in contributors if c[0]]\n return authors\n\n def json(self):\n return {\n field: getattr(self, field)\n for field in self.ACTIVE_FIELDS\n if getattr(self, field)\n }\n\n\ndef load_state(path, logfile):\n \"\"\"Retrieves starting point from logfile, if log exists\n\n Takes as input a path which expands to an ordered candidate list\n of bettworldbks* filenames to process, the location of the\n logfile, and determines which of those files are remaining, as\n well as what our offset is in that file.\n\n e.g. if we request path containing f1, f2, f3 and our log\n says f2,100 then we start our processing at f2 at the 100th line.\n\n This assumes the script is being called w/ e.g.:\n /1/var/tmp/imports/2021-08/Bibliographic/*/\n \"\"\"\n filenames = sorted(\n os.path.join(path, f) for f in os.listdir(path) if f.startswith(\"bettworldbks\")\n )\n try:\n with open(logfile) as fin:\n active_fname, offset = next(fin).strip().split(',')\n unfinished_filenames = filenames[filenames.index(active_fname) :]\n return unfinished_filenames, int(offset)\n except (ValueError, OSError):\n return filenames, 0\n\n\ndef update_state(logfile, fname, line_num=0):\n \"\"\"Records the last file we began processing and the current line\"\"\"\n with open(logfile, 'w') as fout:\n fout.write(f'{fname},{line_num}\\n')\n\n\ndef csv_to_ol_json_item(line):\n \"\"\"converts a line to a book item\"\"\"\n try:\n data = line.decode().strip().split('|')\n except UnicodeDecodeError:\n data = line.decode('ISO-8859-1').strip().split('|')\n\n b = Biblio(data)\n return {'ia_id': b.source_id, 'data': b.json()}\n\n\ndef batch_import(path, batch, batch_size=5000):\n logfile = os.path.join(path, 'import.log')\n filenames, offset = load_state(path, logfile)\n\n for fname in filenames:\n book_items = []\n with open(fname, 'rb') as f:\n logger.info(f\"Processing: {fname} from line {offset}\")\n for line_num, line in enumerate(f):\n\n # skip over already processed records\n if offset:\n if offset > line_num:\n continue\n offset = 0\n\n try:\n book_items.append(csv_to_ol_json_item(line))\n except AssertionError as e:\n logger.info(f\"Error: {e} from {line}\")\n\n # If we have enough items, submit a batch\n if not ((line_num + 1) % batch_size):\n batch.add_items(book_items)\n update_state(logfile, fname, line_num)\n book_items = [] # clear added items\n\n # Add any remaining book_items to batch\n if book_items:\n batch.add_items(book_items)\n update_state(logfile, fname, line_num)\n\n\ndef main(ol_config: str, batch_path: str):\n load_config(ol_config)\n\n # Partner data is offset ~15 days from start of month\n date = datetime.date.today() - timedelta(days=15)\n batch_name = \"%s-%04d%02d\" % ('bwb', date.year, date.month)\n batch = Batch.find(batch_name) or Batch.new(batch_name)\n batch_import(batch_path, batch)\n\n\nif __name__ == '__main__':\n FnToCLI(main).run()\n", "path": "scripts/partner_batch_imports.py"}]} | 3,251 | 304 |
gh_patches_debug_20700 | rasdani/github-patches | git_diff | learningequality__kolibri-8371 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unsupported browser template does not use theme styling
### Observed behavior
The unsupported browser template uses static css in the template - mostly this is fine, but the colours used in there are Kolibri default theme colours, not those drawn from the currently loaded theme plugin.
### Expected behavior
Should use the themes defined by the currently active theme hook.
### User-facing consequences
Custom themes might look odd for unsupported browsers.
</issue>
<code>
[start of kolibri/core/views.py]
1 from django.contrib.auth import logout
2 from django.core.urlresolvers import reverse
3 from django.http import Http404
4 from django.http import HttpResponse
5 from django.http import HttpResponseRedirect
6 from django.shortcuts import redirect
7 from django.urls import is_valid_path
8 from django.urls import translate_url
9 from django.utils.decorators import method_decorator
10 from django.utils.six.moves.urllib.parse import urlsplit
11 from django.utils.six.moves.urllib.parse import urlunsplit
12 from django.utils.translation import check_for_language
13 from django.utils.translation import LANGUAGE_SESSION_KEY
14 from django.utils.translation import ugettext_lazy as _
15 from django.views.decorators.http import require_POST
16 from django.views.generic.base import TemplateView
17 from django.views.generic.base import View
18 from django.views.i18n import LANGUAGE_QUERY_PARAMETER
19 from django.views.static import serve
20
21 from kolibri.core.auth.constants import user_kinds
22 from kolibri.core.auth.models import Role
23 from kolibri.core.decorators import cache_no_user_data
24 from kolibri.core.device.hooks import SetupHook
25 from kolibri.core.device.translation import get_accept_headers_language
26 from kolibri.core.device.translation import get_device_language
27 from kolibri.core.device.translation import get_settings_language
28 from kolibri.core.device.utils import allow_guest_access
29 from kolibri.core.device.utils import device_provisioned
30 from kolibri.core.hooks import LogoutRedirectHook
31 from kolibri.core.hooks import RoleBasedRedirectHook
32
33
34 # Modified from django.views.i18n
35 @require_POST
36 def set_language(request):
37 """
38 Since this view changes how the user will see the rest of the site, it must
39 only be accessed as a POST request. If called as a GET request, it will
40 error.
41 """
42 lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)
43 next_url = urlsplit(request.POST.get("next")) if request.POST.get("next") else None
44 if lang_code and check_for_language(lang_code):
45 if next_url and is_valid_path(next_url.path):
46 # If it is a recognized Kolibri path, then translate it to the new language and return it.
47 next_path = urlunsplit(
48 (
49 next_url[0],
50 next_url[1],
51 translate_url(next_url[2], lang_code),
52 next_url[3],
53 next_url[4],
54 )
55 )
56 else:
57 next_path = translate_url(reverse("kolibri:core:redirect_user"), lang_code)
58 response = HttpResponse(next_path)
59 if hasattr(request, "session"):
60 request.session[LANGUAGE_SESSION_KEY] = lang_code
61 else:
62 lang_code = (
63 get_device_language()
64 or get_accept_headers_language(request)
65 or get_settings_language()
66 )
67 if next_url and is_valid_path(next_url.path):
68 # If it is a recognized Kolibri path, then translate it using the default language code for this device
69 next_path = urlunsplit(
70 (
71 next_url[0],
72 next_url[1],
73 translate_url(next_url[2], lang_code),
74 next_url[3],
75 next_url[4],
76 )
77 )
78 else:
79 next_path = translate_url(reverse("kolibri:core:redirect_user"), lang_code)
80 response = HttpResponse(next_path)
81 if hasattr(request, "session"):
82 request.session.pop(LANGUAGE_SESSION_KEY, "")
83 return response
84
85
86 def logout_view(request):
87 logout(request)
88 if LogoutRedirectHook.is_enabled():
89 return HttpResponseRedirect(
90 next(obj.url for obj in LogoutRedirectHook.registered_hooks)
91 )
92 return HttpResponseRedirect(reverse("kolibri:core:redirect_user"))
93
94
95 def get_urls_by_role(role):
96 for hook in RoleBasedRedirectHook.registered_hooks:
97 if role in hook.roles:
98 yield hook.url
99
100
101 def get_url_by_role(role):
102 obj = next(
103 (hook for hook in RoleBasedRedirectHook.registered_hooks if role in hook.roles),
104 None,
105 )
106
107 if obj:
108 return obj.url
109
110
111 class GuestRedirectView(View):
112 def get(self, request):
113 """
114 Redirects a guest user to a learner accessible page.
115 """
116 if allow_guest_access():
117 return HttpResponseRedirect(get_url_by_role(user_kinds.LEARNER))
118 return RootURLRedirectView.as_view()(request)
119
120
121 device_is_provisioned = False
122
123
124 def is_provisioned():
125 # First check if the device has been provisioned
126 global device_is_provisioned
127 device_is_provisioned = device_is_provisioned or device_provisioned()
128 return device_is_provisioned
129
130
131 class RootURLRedirectView(View):
132 def get(self, request):
133 """
134 Redirects user based on the highest role they have for which a redirect is defined.
135 """
136 # If it has not been provisioned and we have something that can handle setup, redirect there.
137 if not is_provisioned() and SetupHook.provision_url:
138 return redirect(SetupHook.provision_url())
139
140 if request.user.is_authenticated():
141 url = None
142 if request.user.is_superuser:
143 url = url or get_url_by_role(user_kinds.SUPERUSER)
144 roles = set(
145 Role.objects.filter(user_id=request.user.id)
146 .values_list("kind", flat=True)
147 .distinct()
148 )
149 if user_kinds.ADMIN in roles:
150 url = url or get_url_by_role(user_kinds.ADMIN)
151 if user_kinds.COACH in roles or user_kinds.ASSIGNABLE_COACH in roles:
152 url = url or get_url_by_role(user_kinds.COACH)
153 url = url or get_url_by_role(user_kinds.LEARNER)
154 else:
155 url = get_url_by_role(user_kinds.ANONYMOUS)
156 if url:
157 return HttpResponseRedirect(url)
158 raise Http404(
159 _(
160 "No appropriate redirect pages found. It is likely that Kolibri is badly configured"
161 )
162 )
163
164
165 @method_decorator(cache_no_user_data, name="dispatch")
166 class UnsupportedBrowserView(TemplateView):
167 template_name = "kolibri/unsupported_browser.html"
168
169
170 class StatusCheckView(View):
171 def get(self, request):
172 """
173 Confirms that the server is up
174 """
175 return HttpResponse()
176
177
178 def static_serve_with_fallbacks(search_paths):
179 """
180 Serve a static file by iterating over search_paths until a matching file is found.
181 If a matching file is not found on any of the paths, a 404 will be raised.
182 """
183
184 def serve_func(request, path, document_root=None):
185
186 for search_path in search_paths:
187 try:
188 return serve(request, path, document_root=search_path)
189 except Http404:
190 pass
191
192 # allow the Http404 to be raised, since we couldn't find the file anywhere
193 return serve(request, path, document_root=search_paths[0])
194
195 return serve_func
196
[end of kolibri/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/views.py b/kolibri/core/views.py
--- a/kolibri/core/views.py
+++ b/kolibri/core/views.py
@@ -29,6 +29,10 @@
from kolibri.core.device.utils import device_provisioned
from kolibri.core.hooks import LogoutRedirectHook
from kolibri.core.hooks import RoleBasedRedirectHook
+from kolibri.core.theme_hook import BRAND_COLORS
+from kolibri.core.theme_hook import COLOR_V400
+from kolibri.core.theme_hook import PRIMARY
+from kolibri.core.theme_hook import ThemeHook
# Modified from django.views.i18n
@@ -166,6 +170,16 @@
class UnsupportedBrowserView(TemplateView):
template_name = "kolibri/unsupported_browser.html"
+ def get_context_data(self, **kwargs):
+ context = super(UnsupportedBrowserView, self).get_context_data(**kwargs)
+ context["brand_primary_v400"] = (
+ ThemeHook.get_theme()
+ .get(BRAND_COLORS, {})
+ .get(PRIMARY, {})
+ .get(COLOR_V400, "purple")
+ )
+ return context
+
class StatusCheckView(View):
def get(self, request):
| {"golden_diff": "diff --git a/kolibri/core/views.py b/kolibri/core/views.py\n--- a/kolibri/core/views.py\n+++ b/kolibri/core/views.py\n@@ -29,6 +29,10 @@\n from kolibri.core.device.utils import device_provisioned\n from kolibri.core.hooks import LogoutRedirectHook\n from kolibri.core.hooks import RoleBasedRedirectHook\n+from kolibri.core.theme_hook import BRAND_COLORS\n+from kolibri.core.theme_hook import COLOR_V400\n+from kolibri.core.theme_hook import PRIMARY\n+from kolibri.core.theme_hook import ThemeHook\n \n \n # Modified from django.views.i18n\n@@ -166,6 +170,16 @@\n class UnsupportedBrowserView(TemplateView):\n template_name = \"kolibri/unsupported_browser.html\"\n \n+ def get_context_data(self, **kwargs):\n+ context = super(UnsupportedBrowserView, self).get_context_data(**kwargs)\n+ context[\"brand_primary_v400\"] = (\n+ ThemeHook.get_theme()\n+ .get(BRAND_COLORS, {})\n+ .get(PRIMARY, {})\n+ .get(COLOR_V400, \"purple\")\n+ )\n+ return context\n+\n \n class StatusCheckView(View):\n def get(self, request):\n", "issue": "Unsupported browser template does not use theme styling\n### Observed behavior\r\nThe unsupported browser template uses static css in the template - mostly this is fine, but the colours used in there are Kolibri default theme colours, not those drawn from the currently loaded theme plugin.\r\n\r\n### Expected behavior\r\nShould use the themes defined by the currently active theme hook.\r\n\r\n### User-facing consequences\r\nCustom themes might look odd for unsupported browsers.\r\n\n", "before_files": [{"content": "from django.contrib.auth import logout\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import is_valid_path\nfrom django.urls import translate_url\nfrom django.utils.decorators import method_decorator\nfrom django.utils.six.moves.urllib.parse import urlsplit\nfrom django.utils.six.moves.urllib.parse import urlunsplit\nfrom django.utils.translation import check_for_language\nfrom django.utils.translation import LANGUAGE_SESSION_KEY\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.base import View\nfrom django.views.i18n import LANGUAGE_QUERY_PARAMETER\nfrom django.views.static import serve\n\nfrom kolibri.core.auth.constants import user_kinds\nfrom kolibri.core.auth.models import Role\nfrom kolibri.core.decorators import cache_no_user_data\nfrom kolibri.core.device.hooks import SetupHook\nfrom kolibri.core.device.translation import get_accept_headers_language\nfrom kolibri.core.device.translation import get_device_language\nfrom kolibri.core.device.translation import get_settings_language\nfrom kolibri.core.device.utils import allow_guest_access\nfrom kolibri.core.device.utils import device_provisioned\nfrom kolibri.core.hooks import LogoutRedirectHook\nfrom kolibri.core.hooks import RoleBasedRedirectHook\n\n\n# Modified from django.views.i18n\n@require_POST\ndef set_language(request):\n \"\"\"\n Since this view changes how the user will see the rest of the site, it must\n only be accessed as a POST request. If called as a GET request, it will\n error.\n \"\"\"\n lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)\n next_url = urlsplit(request.POST.get(\"next\")) if request.POST.get(\"next\") else None\n if lang_code and check_for_language(lang_code):\n if next_url and is_valid_path(next_url.path):\n # If it is a recognized Kolibri path, then translate it to the new language and return it.\n next_path = urlunsplit(\n (\n next_url[0],\n next_url[1],\n translate_url(next_url[2], lang_code),\n next_url[3],\n next_url[4],\n )\n )\n else:\n next_path = translate_url(reverse(\"kolibri:core:redirect_user\"), lang_code)\n response = HttpResponse(next_path)\n if hasattr(request, \"session\"):\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n else:\n lang_code = (\n get_device_language()\n or get_accept_headers_language(request)\n or get_settings_language()\n )\n if next_url and is_valid_path(next_url.path):\n # If it is a recognized Kolibri path, then translate it using the default language code for this device\n next_path = urlunsplit(\n (\n next_url[0],\n next_url[1],\n translate_url(next_url[2], lang_code),\n next_url[3],\n next_url[4],\n )\n )\n else:\n next_path = translate_url(reverse(\"kolibri:core:redirect_user\"), lang_code)\n response = HttpResponse(next_path)\n if hasattr(request, \"session\"):\n request.session.pop(LANGUAGE_SESSION_KEY, \"\")\n return response\n\n\ndef logout_view(request):\n logout(request)\n if LogoutRedirectHook.is_enabled():\n return HttpResponseRedirect(\n next(obj.url for obj in LogoutRedirectHook.registered_hooks)\n )\n return HttpResponseRedirect(reverse(\"kolibri:core:redirect_user\"))\n\n\ndef get_urls_by_role(role):\n for hook in RoleBasedRedirectHook.registered_hooks:\n if role in hook.roles:\n yield hook.url\n\n\ndef get_url_by_role(role):\n obj = next(\n (hook for hook in RoleBasedRedirectHook.registered_hooks if role in hook.roles),\n None,\n )\n\n if obj:\n return obj.url\n\n\nclass GuestRedirectView(View):\n def get(self, request):\n \"\"\"\n Redirects a guest user to a learner accessible page.\n \"\"\"\n if allow_guest_access():\n return HttpResponseRedirect(get_url_by_role(user_kinds.LEARNER))\n return RootURLRedirectView.as_view()(request)\n\n\ndevice_is_provisioned = False\n\n\ndef is_provisioned():\n # First check if the device has been provisioned\n global device_is_provisioned\n device_is_provisioned = device_is_provisioned or device_provisioned()\n return device_is_provisioned\n\n\nclass RootURLRedirectView(View):\n def get(self, request):\n \"\"\"\n Redirects user based on the highest role they have for which a redirect is defined.\n \"\"\"\n # If it has not been provisioned and we have something that can handle setup, redirect there.\n if not is_provisioned() and SetupHook.provision_url:\n return redirect(SetupHook.provision_url())\n\n if request.user.is_authenticated():\n url = None\n if request.user.is_superuser:\n url = url or get_url_by_role(user_kinds.SUPERUSER)\n roles = set(\n Role.objects.filter(user_id=request.user.id)\n .values_list(\"kind\", flat=True)\n .distinct()\n )\n if user_kinds.ADMIN in roles:\n url = url or get_url_by_role(user_kinds.ADMIN)\n if user_kinds.COACH in roles or user_kinds.ASSIGNABLE_COACH in roles:\n url = url or get_url_by_role(user_kinds.COACH)\n url = url or get_url_by_role(user_kinds.LEARNER)\n else:\n url = get_url_by_role(user_kinds.ANONYMOUS)\n if url:\n return HttpResponseRedirect(url)\n raise Http404(\n _(\n \"No appropriate redirect pages found. It is likely that Kolibri is badly configured\"\n )\n )\n\n\n@method_decorator(cache_no_user_data, name=\"dispatch\")\nclass UnsupportedBrowserView(TemplateView):\n template_name = \"kolibri/unsupported_browser.html\"\n\n\nclass StatusCheckView(View):\n def get(self, request):\n \"\"\"\n Confirms that the server is up\n \"\"\"\n return HttpResponse()\n\n\ndef static_serve_with_fallbacks(search_paths):\n \"\"\"\n Serve a static file by iterating over search_paths until a matching file is found.\n If a matching file is not found on any of the paths, a 404 will be raised.\n \"\"\"\n\n def serve_func(request, path, document_root=None):\n\n for search_path in search_paths:\n try:\n return serve(request, path, document_root=search_path)\n except Http404:\n pass\n\n # allow the Http404 to be raised, since we couldn't find the file anywhere\n return serve(request, path, document_root=search_paths[0])\n\n return serve_func\n", "path": "kolibri/core/views.py"}]} | 2,554 | 279 |
gh_patches_debug_15954 | rasdani/github-patches | git_diff | Nitrate__Nitrate-352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove django 1.10 support
- Remove django 1.10 testenv from tox.ini
- Update django version in setup.py. Minimum django version is `1.11`.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 import sys
4
5 from setuptools import setup, find_packages
6
7
8 with open('VERSION.txt', 'r') as f:
9 pkg_version = f.read().strip()
10
11
12 def get_long_description():
13 with open('README.rst', 'r') as f:
14 return f.read()
15
16
17 install_requires = [
18 'PyMySQL == 0.7.11',
19 'beautifulsoup4 >= 4.1.1',
20 'celery == 4.1.0',
21 'django >= 1.10,<2.0',
22 'django-contrib-comments == 1.8.0',
23 'django-tinymce == 2.7.0',
24 'django-uuslug == 1.1.8',
25 'html2text',
26 'kobo == 0.7.0',
27 'odfpy >= 0.9.6',
28 'python-bugzilla',
29 'six',
30 'xmltodict',
31 ]
32
33 if sys.version_info.major < 3:
34 install_requires += [
35 'enum34',
36 ]
37
38 extras_require = {
39 # Required for tcms.core.contrib.auth.backends.KerberosBackend
40 'krbauth': [
41 'kerberos == 1.2.5'
42 ],
43
44 # Packages for building documentation
45 'docs': [
46 'Sphinx >= 1.1.2',
47 'sphinx_rtd_theme',
48 ],
49
50 # Necessary packages for running tests
51 'tests': [
52 'coverage',
53 'factory_boy',
54 'flake8',
55 'mock',
56 'pytest',
57 'pytest-cov',
58 'pytest-django',
59 ],
60
61 # Contain tools that assists the development
62 'devtools': [
63 'django-debug-toolbar == 1.7',
64 'tox',
65 'django-extensions',
66 'pygraphviz',
67 'future-breakpoint',
68 ]
69 }
70
71
72 setup(
73 name='Nitrate',
74 version=pkg_version,
75 description='Test Case Management System',
76 long_description=get_long_description(),
77 author='Nitrate Team',
78 maintainer='Chenxiong Qi',
79 maintainer_email='[email protected]',
80 url='https://github.com/Nitrate/Nitrate/',
81 license='GPLv2+',
82 keywords='test case',
83 install_requires=install_requires,
84 extras_require=extras_require,
85 packages=find_packages(),
86 include_package_data=True,
87 classifiers=[
88 'Framework :: Django',
89 'Framework :: Django :: 1.10',
90 'Framework :: Django :: 1.11',
91 'Intended Audience :: Developers',
92 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
93 'Programming Language :: Python :: 2',
94 'Programming Language :: Python :: 2.7',
95 'Programming Language :: Python :: 3',
96 'Programming Language :: Python :: 3.6',
97 'Topic :: Software Development :: Quality Assurance',
98 'Topic :: Software Development :: Testing',
99 ],
100 )
101
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
'PyMySQL == 0.7.11',
'beautifulsoup4 >= 4.1.1',
'celery == 4.1.0',
- 'django >= 1.10,<2.0',
+ 'django >= 1.11,<2.0',
'django-contrib-comments == 1.8.0',
'django-tinymce == 2.7.0',
'django-uuslug == 1.1.8',
@@ -86,7 +86,6 @@
include_package_data=True,
classifiers=[
'Framework :: Django',
- 'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n- 'django >= 1.10,<2.0',\n+ 'django >= 1.11,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n@@ -86,7 +86,6 @@\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n- 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n", "issue": "Remove django 1.10 support\n- Remove django 1.10 testenv from tox.ini\r\n- Update django version in setup.py. Minimum django version is `1.11`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django >= 1.10,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]} | 1,428 | 222 |
gh_patches_debug_50088 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1216 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in comment in conversationbot2.py
<!--
Thanks for reporting issues of python-telegram-bot!
Use this template to notify us if you found a bug, or if you want to request a new feature.
If you're looking for help with programming your bot using our library, feel free to ask your
questions in out telegram group at: https://t.me/pythontelegrambotgroup
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1. Not really a bug... wrong comment line in conversationbot2.py :)
### Expected behaviour
Should be: # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY
### Actual behaviour
Actual comment in conversationbot2.py (leftover from conversationbot.py I guess :))
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
### Configuration
**Operating System:**
Windows
**Version of Python, python-telegram-bot & dependencies:**
3.6
``$ python -m telegram``
### Logs
Insert logs here (if necessary)
</issue>
<code>
[start of examples/conversationbot2.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Simple Bot to reply to Telegram messages
5 # This program is dedicated to the public domain under the CC0 license.
6 """
7 This Bot uses the Updater class to handle the bot.
8
9 First, a few callback functions are defined. Then, those functions are passed to
10 the Dispatcher and registered at their respective places.
11 Then, the bot is started and runs until we press Ctrl-C on the command line.
12
13 Usage:
14 Example of a bot-user conversation using ConversationHandler.
15 Send /start to initiate the conversation.
16 Press Ctrl-C on the command line or send a signal to the process to stop the
17 bot.
18 """
19
20 from telegram import ReplyKeyboardMarkup
21 from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
22 ConversationHandler)
23
24 import logging
25
26 # Enable logging
27 logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
28 level=logging.INFO)
29
30 logger = logging.getLogger(__name__)
31
32 CHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)
33
34 reply_keyboard = [['Age', 'Favourite colour'],
35 ['Number of siblings', 'Something else...'],
36 ['Done']]
37 markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
38
39
40 def facts_to_str(user_data):
41 facts = list()
42
43 for key, value in user_data.items():
44 facts.append('{} - {}'.format(key, value))
45
46 return "\n".join(facts).join(['\n', '\n'])
47
48
49 def start(bot, update):
50 update.message.reply_text(
51 "Hi! My name is Doctor Botter. I will hold a more complex conversation with you. "
52 "Why don't you tell me something about yourself?",
53 reply_markup=markup)
54
55 return CHOOSING
56
57
58 def regular_choice(bot, update, user_data):
59 text = update.message.text
60 user_data['choice'] = text
61 update.message.reply_text(
62 'Your {}? Yes, I would love to hear about that!'.format(text.lower()))
63
64 return TYPING_REPLY
65
66
67 def custom_choice(bot, update):
68 update.message.reply_text('Alright, please send me the category first, '
69 'for example "Most impressive skill"')
70
71 return TYPING_CHOICE
72
73
74 def received_information(bot, update, user_data):
75 text = update.message.text
76 category = user_data['choice']
77 user_data[category] = text
78 del user_data['choice']
79
80 update.message.reply_text("Neat! Just so you know, this is what you already told me:"
81 "{}"
82 "You can tell me more, or change your opinion on something.".format(
83 facts_to_str(user_data)), reply_markup=markup)
84
85 return CHOOSING
86
87
88 def done(bot, update, user_data):
89 if 'choice' in user_data:
90 del user_data['choice']
91
92 update.message.reply_text("I learned these facts about you:"
93 "{}"
94 "Until next time!".format(facts_to_str(user_data)))
95
96 user_data.clear()
97 return ConversationHandler.END
98
99
100 def error(bot, update, error):
101 """Log Errors caused by Updates."""
102 logger.warning('Update "%s" caused error "%s"', update, error)
103
104
105 def main():
106 # Create the Updater and pass it your bot's token.
107 updater = Updater("TOKEN")
108
109 # Get the dispatcher to register handlers
110 dp = updater.dispatcher
111
112 # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
113 conv_handler = ConversationHandler(
114 entry_points=[CommandHandler('start', start)],
115
116 states={
117 CHOOSING: [RegexHandler('^(Age|Favourite colour|Number of siblings)$',
118 regular_choice,
119 pass_user_data=True),
120 RegexHandler('^Something else...$',
121 custom_choice),
122 ],
123
124 TYPING_CHOICE: [MessageHandler(Filters.text,
125 regular_choice,
126 pass_user_data=True),
127 ],
128
129 TYPING_REPLY: [MessageHandler(Filters.text,
130 received_information,
131 pass_user_data=True),
132 ],
133 },
134
135 fallbacks=[RegexHandler('^Done$', done, pass_user_data=True)]
136 )
137
138 dp.add_handler(conv_handler)
139
140 # log all errors
141 dp.add_error_handler(error)
142
143 # Start the Bot
144 updater.start_polling()
145
146 # Run the bot until you press Ctrl-C or the process receives SIGINT,
147 # SIGTERM or SIGABRT. This should be used most of the time, since
148 # start_polling() is non-blocking and will stop the bot gracefully.
149 updater.idle()
150
151
152 if __name__ == '__main__':
153 main()
154
[end of examples/conversationbot2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/conversationbot2.py b/examples/conversationbot2.py
--- a/examples/conversationbot2.py
+++ b/examples/conversationbot2.py
@@ -109,7 +109,7 @@
# Get the dispatcher to register handlers
dp = updater.dispatcher
- # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
+ # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
| {"golden_diff": "diff --git a/examples/conversationbot2.py b/examples/conversationbot2.py\n--- a/examples/conversationbot2.py\n+++ b/examples/conversationbot2.py\n@@ -109,7 +109,7 @@\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n \n- # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\n+ # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n", "issue": "Typo in comment in conversationbot2.py\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug, or if you want to request a new feature.\r\nIf you're looking for help with programming your bot using our library, feel free to ask your\r\nquestions in out telegram group at: https://t.me/pythontelegrambotgroup\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1. Not really a bug... wrong comment line in conversationbot2.py :)\r\n\r\n### Expected behaviour\r\nShould be: # Add conversation handler with the states CHOOSING, TYPING_CHOICE and TYPING_REPLY\r\n\r\n### Actual behaviour\r\nActual comment in conversationbot2.py (leftover from conversationbot.py I guess :))\r\n# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\r\n\r\n### Configuration\r\n**Operating System:**\r\nWindows\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n3.6\r\n\r\n``$ python -m telegram``\r\n\r\n### Logs\r\nInsert logs here (if necessary)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Simple Bot to reply to Telegram messages\n# This program is dedicated to the public domain under the CC0 license.\n\"\"\"\nThis Bot uses the Updater class to handle the bot.\n\nFirst, a few callback functions are defined. Then, those functions are passed to\nthe Dispatcher and registered at their respective places.\nThen, the bot is started and runs until we press Ctrl-C on the command line.\n\nUsage:\nExample of a bot-user conversation using ConversationHandler.\nSend /start to initiate the conversation.\nPress Ctrl-C on the command line or send a signal to the process to stop the\nbot.\n\"\"\"\n\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler)\n\nimport logging\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nCHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)\n\nreply_keyboard = [['Age', 'Favourite colour'],\n ['Number of siblings', 'Something else...'],\n ['Done']]\nmarkup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)\n\n\ndef facts_to_str(user_data):\n facts = list()\n\n for key, value in user_data.items():\n facts.append('{} - {}'.format(key, value))\n\n return \"\\n\".join(facts).join(['\\n', '\\n'])\n\n\ndef start(bot, update):\n update.message.reply_text(\n \"Hi! My name is Doctor Botter. I will hold a more complex conversation with you. \"\n \"Why don't you tell me something about yourself?\",\n reply_markup=markup)\n\n return CHOOSING\n\n\ndef regular_choice(bot, update, user_data):\n text = update.message.text\n user_data['choice'] = text\n update.message.reply_text(\n 'Your {}? Yes, I would love to hear about that!'.format(text.lower()))\n\n return TYPING_REPLY\n\n\ndef custom_choice(bot, update):\n update.message.reply_text('Alright, please send me the category first, '\n 'for example \"Most impressive skill\"')\n\n return TYPING_CHOICE\n\n\ndef received_information(bot, update, user_data):\n text = update.message.text\n category = user_data['choice']\n user_data[category] = text\n del user_data['choice']\n\n update.message.reply_text(\"Neat! Just so you know, this is what you already told me:\"\n \"{}\"\n \"You can tell me more, or change your opinion on something.\".format(\n facts_to_str(user_data)), reply_markup=markup)\n\n return CHOOSING\n\n\ndef done(bot, update, user_data):\n if 'choice' in user_data:\n del user_data['choice']\n\n update.message.reply_text(\"I learned these facts about you:\"\n \"{}\"\n \"Until next time!\".format(facts_to_str(user_data)))\n\n user_data.clear()\n return ConversationHandler.END\n\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef main():\n # Create the Updater and pass it your bot's token.\n updater = Updater(\"TOKEN\")\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n\n states={\n CHOOSING: [RegexHandler('^(Age|Favourite colour|Number of siblings)$',\n regular_choice,\n pass_user_data=True),\n RegexHandler('^Something else...$',\n custom_choice),\n ],\n\n TYPING_CHOICE: [MessageHandler(Filters.text,\n regular_choice,\n pass_user_data=True),\n ],\n\n TYPING_REPLY: [MessageHandler(Filters.text,\n received_information,\n pass_user_data=True),\n ],\n },\n\n fallbacks=[RegexHandler('^Done$', done, pass_user_data=True)]\n )\n\n dp.add_handler(conv_handler)\n\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/conversationbot2.py"}]} | 2,179 | 125 |
gh_patches_debug_18263 | rasdani/github-patches | git_diff | kubeflow__pipelines-4130 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
allow output artifact store configuration (vs hard coded)
it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`).
see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148
it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.
i suggest making it configurable, i can do such PR if we agree its needed.
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
</issue>
<code>
[start of sdk/python/kfp/components/_python_to_graph_component.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 __all__ = [
16 'create_graph_component_from_pipeline_func',
17 ]
18
19
20 import inspect
21 from collections import OrderedDict
22 from typing import Callable
23
24 from . import _components
25 from ._structures import TaskSpec, ComponentSpec, OutputSpec, GraphInputReference, TaskOutputArgument, GraphImplementation, GraphSpec
26 from ._naming import _make_name_unique_by_adding_index
27 from ._python_op import _extract_component_interface
28 from ._components import _create_task_factory_from_component_spec
29
30
31 def create_graph_component_from_pipeline_func(
32 pipeline_func: Callable,
33 output_component_file: str = None,
34 embed_component_specs: bool = False,
35 ) -> Callable:
36 '''Experimental! Creates graph component definition from a python pipeline function. The component file can be published for sharing.
37 Pipeline function is a function that only calls component functions and passes outputs to inputs.
38 This feature is experimental and lacks support for some of the DSL features like conditions and loops.
39 Only pipelines consisting of loaded components or python components are currently supported (no manually created ContainerOps or ResourceOps).
40
41 Args:
42 pipeline_func: Python function to convert
43 output_component_file: Path of the file where the component definition will be written. The `component.yaml` file can then be published for sharing.
44 embed_component_specs: Whether to embed component definitions or just reference them. Embedding makes the graph component self-contained. Default is False.
45
46 Returns:
47 A function representing the graph component. The component spec can be accessed using the .component_spec attribute.
48 The function will have the same parameters as the original function.
49 When called, the function will return a task object, corresponding to the graph component.
50 To reference the outputs of the task, use task.outputs["Output name"].
51
52 Example:
53
54 producer_op = load_component_from_file('producer/component.yaml')
55 processor_op = load_component_from_file('processor/component.yaml')
56
57 def pipeline1(pipeline_param_1: int):
58 producer_task = producer_op()
59 processor_task = processor_op(pipeline_param_1, producer_task.outputs['Output 2'])
60
61 return OrderedDict([
62 ('Pipeline output 1', producer_task.outputs['Output 1']),
63 ('Pipeline output 2', processor_task.outputs['Output 2']),
64 ])
65
66 create_graph_component_from_pipeline_func(pipeline1, output_component_file='pipeline.component.yaml')
67 '''
68 component_spec = create_graph_component_spec_from_pipeline_func(pipeline_func, embed_component_specs)
69 if output_component_file:
70 from pathlib import Path
71 from ._yaml_utils import dump_yaml
72 component_dict = component_spec.to_dict()
73 component_yaml = dump_yaml(component_dict)
74 Path(output_component_file).write_text(component_yaml)
75
76 return _create_task_factory_from_component_spec(component_spec)
77
78 def create_graph_component_spec_from_pipeline_func(pipeline_func: Callable, embed_component_specs: bool = False) -> ComponentSpec:
79
80 component_spec = _extract_component_interface(pipeline_func)
81 # Checking the function parameters - they should not have file passing annotations.
82 input_specs = component_spec.inputs or []
83 for input in input_specs:
84 if input._passing_style:
85 raise TypeError('Graph component function parameter "{}" cannot have file-passing annotation "{}".'.format(input.name, input._passing_style))
86
87 task_map = OrderedDict() #Preserving task order
88
89 from ._components import _create_task_spec_from_component_and_arguments
90 def task_construction_handler(
91 component_spec,
92 arguments,
93 component_ref,
94 ):
95 task = _create_task_spec_from_component_and_arguments(
96 component_spec=component_spec,
97 arguments=arguments,
98 component_ref=component_ref,
99 )
100
101 #Rewriting task ids so that they're same every time
102 task_id = task.component_ref.spec.name or "Task"
103 task_id = _make_name_unique_by_adding_index(task_id, task_map.keys(), ' ')
104 for output_ref in task.outputs.values():
105 output_ref.task_output.task_id = task_id
106 output_ref.task_output.task = None
107 task_map[task_id] = task
108 # Remove the component spec from component reference unless it will make the reference empty or unless explicitly asked by the user
109 if not embed_component_specs and any([task.component_ref.name, task.component_ref.url, task.component_ref.digest]):
110 task.component_ref.spec = None
111
112 return task #The handler is a transformation function, so it must pass the task through.
113
114 # Preparing the pipeline_func arguments
115 # TODO: The key should be original parameter name if different
116 pipeline_func_args = {input.name: GraphInputReference(input_name=input.name).as_argument() for input in input_specs}
117
118 try:
119 #Setting the handler to fix and catch the tasks.
120 # FIX: The handler only hooks container component creation
121 old_handler = _components._container_task_constructor
122 _components._container_task_constructor = task_construction_handler
123
124 #Calling the pipeline_func with GraphInputArgument instances as arguments
125 pipeline_func_result = pipeline_func(**pipeline_func_args)
126 finally:
127 _components._container_task_constructor = old_handler
128
129
130 # Getting graph outputs
131 output_names = [output.name for output in (component_spec.outputs or [])]
132
133 if len(output_names) == 1 and output_names[0] == 'Output': # TODO: Check whether the NamedTuple syntax was used
134 pipeline_func_result = [pipeline_func_result]
135
136 if isinstance(pipeline_func_result, tuple) and hasattr(pipeline_func_result, '_asdict'): # collections.namedtuple and typing.NamedTuple
137 pipeline_func_result = pipeline_func_result._asdict()
138
139 if isinstance(pipeline_func_result, dict):
140 if output_names:
141 if set(output_names) != set(pipeline_func_result.keys()):
142 raise ValueError('Returned outputs do not match outputs specified in the function signature: {} = {}'.format(str(set(pipeline_func_result.keys())), str(set(output_names))))
143
144 if pipeline_func_result is None:
145 graph_output_value_map = {}
146 elif isinstance(pipeline_func_result, dict):
147 graph_output_value_map = OrderedDict(pipeline_func_result)
148 elif isinstance(pipeline_func_result, (list, tuple)):
149 if output_names:
150 if len(pipeline_func_result) != len(output_names):
151 raise ValueError('Expected {} values from pipeline function, but got {}.'.format(len(output_names), len(pipeline_func_result)))
152 graph_output_value_map = OrderedDict((name_value[0], name_value[1]) for name_value in zip(output_names, pipeline_func_result))
153 else:
154 graph_output_value_map = OrderedDict((output_value.task_output.output_name, output_value) for output_value in pipeline_func_result) # TODO: Fix possible name non-uniqueness (e.g. use task id as prefix or add index to non-unique names)
155 else:
156 raise TypeError('Pipeline must return outputs as tuple or OrderedDict.')
157
158 #Checking the pipeline_func output object types
159 for output_name, output_value in graph_output_value_map.items():
160 if not isinstance(output_value, TaskOutputArgument):
161 raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got "{output_name}" = "{}".'.format(output_name, str(output_value)))
162
163 if not component_spec.outputs and graph_output_value_map:
164 component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]
165
166 component_spec.implementation = GraphImplementation(
167 graph=GraphSpec(
168 tasks=task_map,
169 output_values=graph_output_value_map,
170 )
171 )
172 return component_spec
173
[end of sdk/python/kfp/components/_python_to_graph_component.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/kfp/components/_python_to_graph_component.py b/sdk/python/kfp/components/_python_to_graph_component.py
--- a/sdk/python/kfp/components/_python_to_graph_component.py
+++ b/sdk/python/kfp/components/_python_to_graph_component.py
@@ -158,7 +158,7 @@
#Checking the pipeline_func output object types
for output_name, output_value in graph_output_value_map.items():
if not isinstance(output_value, TaskOutputArgument):
- raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got "{output_name}" = "{}".'.format(output_name, str(output_value)))
+ raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got "{}" = "{}".'.format(output_name, str(output_value)))
if not component_spec.outputs and graph_output_value_map:
component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]
| {"golden_diff": "diff --git a/sdk/python/kfp/components/_python_to_graph_component.py b/sdk/python/kfp/components/_python_to_graph_component.py\n--- a/sdk/python/kfp/components/_python_to_graph_component.py\n+++ b/sdk/python/kfp/components/_python_to_graph_component.py\n@@ -158,7 +158,7 @@\n #Checking the pipeline_func output object types\n for output_name, output_value in graph_output_value_map.items():\n if not isinstance(output_value, TaskOutputArgument):\n- raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got \"{output_name}\" = \"{}\".'.format(output_name, str(output_value)))\n+ raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got \"{}\" = \"{}\".'.format(output_name, str(output_value)))\n \n if not component_spec.outputs and graph_output_value_map:\n component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]\n", "issue": "allow output artifact store configuration (vs hard coded)\nit seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). \r\n\r\nsee: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148\r\n\r\nit would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.\r\ni suggest making it configurable, i can do such PR if we agree its needed. \nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\n 'create_graph_component_from_pipeline_func',\n]\n\n\nimport inspect\nfrom collections import OrderedDict\nfrom typing import Callable\n\nfrom . import _components\nfrom ._structures import TaskSpec, ComponentSpec, OutputSpec, GraphInputReference, TaskOutputArgument, GraphImplementation, GraphSpec\nfrom ._naming import _make_name_unique_by_adding_index\nfrom ._python_op import _extract_component_interface\nfrom ._components import _create_task_factory_from_component_spec\n\n\ndef create_graph_component_from_pipeline_func(\n pipeline_func: Callable,\n output_component_file: str = None,\n embed_component_specs: bool = False,\n) -> Callable:\n '''Experimental! Creates graph component definition from a python pipeline function. The component file can be published for sharing.\n Pipeline function is a function that only calls component functions and passes outputs to inputs.\n This feature is experimental and lacks support for some of the DSL features like conditions and loops.\n Only pipelines consisting of loaded components or python components are currently supported (no manually created ContainerOps or ResourceOps).\n\n Args:\n pipeline_func: Python function to convert\n output_component_file: Path of the file where the component definition will be written. The `component.yaml` file can then be published for sharing.\n embed_component_specs: Whether to embed component definitions or just reference them. Embedding makes the graph component self-contained. Default is False.\n\n Returns:\n A function representing the graph component. The component spec can be accessed using the .component_spec attribute.\n The function will have the same parameters as the original function.\n When called, the function will return a task object, corresponding to the graph component.\n To reference the outputs of the task, use task.outputs[\"Output name\"].\n\n Example:\n\n producer_op = load_component_from_file('producer/component.yaml')\n processor_op = load_component_from_file('processor/component.yaml')\n\n def pipeline1(pipeline_param_1: int):\n producer_task = producer_op()\n processor_task = processor_op(pipeline_param_1, producer_task.outputs['Output 2'])\n\n return OrderedDict([\n ('Pipeline output 1', producer_task.outputs['Output 1']),\n ('Pipeline output 2', processor_task.outputs['Output 2']),\n ])\n \n create_graph_component_from_pipeline_func(pipeline1, output_component_file='pipeline.component.yaml')\n '''\n component_spec = create_graph_component_spec_from_pipeline_func(pipeline_func, embed_component_specs)\n if output_component_file:\n from pathlib import Path\n from ._yaml_utils import dump_yaml\n component_dict = component_spec.to_dict()\n component_yaml = dump_yaml(component_dict)\n Path(output_component_file).write_text(component_yaml)\n\n return _create_task_factory_from_component_spec(component_spec)\n\ndef create_graph_component_spec_from_pipeline_func(pipeline_func: Callable, embed_component_specs: bool = False) -> ComponentSpec:\n\n component_spec = _extract_component_interface(pipeline_func)\n # Checking the function parameters - they should not have file passing annotations.\n input_specs = component_spec.inputs or []\n for input in input_specs:\n if input._passing_style:\n raise TypeError('Graph component function parameter \"{}\" cannot have file-passing annotation \"{}\".'.format(input.name, input._passing_style))\n\n task_map = OrderedDict() #Preserving task order\n\n from ._components import _create_task_spec_from_component_and_arguments\n def task_construction_handler(\n component_spec,\n arguments,\n component_ref,\n ):\n task = _create_task_spec_from_component_and_arguments(\n component_spec=component_spec,\n arguments=arguments,\n component_ref=component_ref,\n )\n\n #Rewriting task ids so that they're same every time\n task_id = task.component_ref.spec.name or \"Task\"\n task_id = _make_name_unique_by_adding_index(task_id, task_map.keys(), ' ')\n for output_ref in task.outputs.values():\n output_ref.task_output.task_id = task_id\n output_ref.task_output.task = None\n task_map[task_id] = task\n # Remove the component spec from component reference unless it will make the reference empty or unless explicitly asked by the user\n if not embed_component_specs and any([task.component_ref.name, task.component_ref.url, task.component_ref.digest]):\n task.component_ref.spec = None\n\n return task #The handler is a transformation function, so it must pass the task through.\n\n # Preparing the pipeline_func arguments\n # TODO: The key should be original parameter name if different\n pipeline_func_args = {input.name: GraphInputReference(input_name=input.name).as_argument() for input in input_specs}\n\n try:\n #Setting the handler to fix and catch the tasks.\n # FIX: The handler only hooks container component creation\n old_handler = _components._container_task_constructor\n _components._container_task_constructor = task_construction_handler\n \n #Calling the pipeline_func with GraphInputArgument instances as arguments \n pipeline_func_result = pipeline_func(**pipeline_func_args)\n finally:\n _components._container_task_constructor = old_handler\n\n\n # Getting graph outputs\n output_names = [output.name for output in (component_spec.outputs or [])]\n\n if len(output_names) == 1 and output_names[0] == 'Output': # TODO: Check whether the NamedTuple syntax was used\n pipeline_func_result = [pipeline_func_result]\n\n if isinstance(pipeline_func_result, tuple) and hasattr(pipeline_func_result, '_asdict'): # collections.namedtuple and typing.NamedTuple\n pipeline_func_result = pipeline_func_result._asdict()\n\n if isinstance(pipeline_func_result, dict):\n if output_names:\n if set(output_names) != set(pipeline_func_result.keys()):\n raise ValueError('Returned outputs do not match outputs specified in the function signature: {} = {}'.format(str(set(pipeline_func_result.keys())), str(set(output_names))))\n\n if pipeline_func_result is None:\n graph_output_value_map = {}\n elif isinstance(pipeline_func_result, dict):\n graph_output_value_map = OrderedDict(pipeline_func_result)\n elif isinstance(pipeline_func_result, (list, tuple)):\n if output_names:\n if len(pipeline_func_result) != len(output_names):\n raise ValueError('Expected {} values from pipeline function, but got {}.'.format(len(output_names), len(pipeline_func_result)))\n graph_output_value_map = OrderedDict((name_value[0], name_value[1]) for name_value in zip(output_names, pipeline_func_result))\n else:\n graph_output_value_map = OrderedDict((output_value.task_output.output_name, output_value) for output_value in pipeline_func_result) # TODO: Fix possible name non-uniqueness (e.g. use task id as prefix or add index to non-unique names)\n else:\n raise TypeError('Pipeline must return outputs as tuple or OrderedDict.')\n\n #Checking the pipeline_func output object types\n for output_name, output_value in graph_output_value_map.items():\n if not isinstance(output_value, TaskOutputArgument):\n raise TypeError('Only TaskOutputArgument instances should be returned from graph component, but got \"{output_name}\" = \"{}\".'.format(output_name, str(output_value)))\n\n if not component_spec.outputs and graph_output_value_map:\n component_spec.outputs = [OutputSpec(name=output_name, type=output_value.task_output.type) for output_name, output_value in graph_output_value_map.items()]\n\n component_spec.implementation = GraphImplementation(\n graph=GraphSpec(\n tasks=task_map,\n output_values=graph_output_value_map,\n )\n )\n return component_spec\n", "path": "sdk/python/kfp/components/_python_to_graph_component.py"}]} | 3,040 | 218 |
gh_patches_debug_13918 | rasdani/github-patches | git_diff | falconry__falcon-61 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename Request.body to Request.stream
Mirros Response. And it really is a stream.
</issue>
<code>
[start of falcon/request.py]
1 """Defines the Request class.
2
3 Copyright 2013 by Rackspace Hosting, Inc.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 """
18
19 from datetime import datetime
20
21 import six
22
23 from falcon.request_helpers import *
24 from falcon.exceptions import *
25
26 DEFAULT_ERROR_LOG_FORMAT = ('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR]'
27 ' {1} {2}?{3} => {4}\n')
28
29
30 class Request(object):
31 """Represents a client's HTTP request"""
32
33 __slots__ = (
34 'app',
35 'body',
36 '_headers',
37 'method',
38 '_params',
39 'path',
40 'protocol',
41 'query_string',
42 '_wsgierrors'
43 )
44
45 def __init__(self, env):
46 """Initialize attributes based on a WSGI environment dict
47
48 Note: Request is not meant to be instantiated directory by responders.
49
50 Args:
51 env: A WSGI environment dict passed in from the server. See also
52 the PEP-333 spec.
53
54 """
55
56 self._wsgierrors = env['wsgi.errors']
57 self.body = env['wsgi.input']
58
59 self.protocol = env['wsgi.url_scheme']
60 self.app = env['SCRIPT_NAME']
61 self.method = env['REQUEST_METHOD']
62 self.path = env['PATH_INFO'] or '/'
63 self.query_string = query_string = env['QUERY_STRING']
64
65 self._params = parse_query_string(query_string)
66 self._headers = parse_headers(env)
67
68 def log_error(self, message):
69 """Log an error to wsgi.error
70
71 Prepends timestamp and request info to message, and writes the
72 result out to the WSGI server's error stream (wsgi.error).
73
74 Args:
75 message: A string describing the problem. If a byte-string and
76 running under Python 2, the string is assumed to be encoded
77 as UTF-8.
78
79 """
80 if not six.PY3 and isinstance(message, unicode):
81 message = message.encode('utf-8')
82
83 log_line = (
84 DEFAULT_ERROR_LOG_FORMAT.
85 format(datetime.now(), self.method, self.path,
86 self.query_string, message)
87 )
88
89 self._wsgierrors.write(log_line)
90
91 def client_accepts_json(self):
92 """Return True if the Accept header indicates JSON support"""
93
94 accept = self.get_header('Accept')
95 if accept is not None:
96 return ('application/json' in accept) or ('*/*' in accept)
97
98 return False
99
100 def get_header(self, name, default=None, required=False):
101 """Return a header value as a string
102
103 Args:
104 name: Header name, case-insensitive (e.g., 'Content-Type')
105 default: Value to return in case the header is not
106 found (default None)
107 required: Set to True to raise HttpBadRequest instead
108 of returning gracefully when the header is not found
109 (default False)
110
111 """
112
113 # Use try..except to optimize for the header existing in most cases
114 try:
115 # Don't take the time to cache beforehand, using HTTP naming.
116 # This will be faster, assuming that most headers are looked
117 # up only once, and not all headers will be requested.
118 return self._headers[name.upper().replace('-', '_')]
119 except KeyError:
120 if not required:
121 return default
122
123 raise HTTPBadRequest('Missing header',
124 'The "' + name + '" header is required.')
125
126 def get_param(self, name, default=None, required=False):
127 """Return the value of a query string parameter as a string
128
129 Args:
130 name: Parameter name, case-sensitive (e.g., 'sort')
131 default: Value to return in case the parameter is not found in the
132 query string (default None)
133 required: Set to True to raise HTTPBadRequest instead of returning
134 gracefully when the parameter is not found (default False)
135
136 Returns:
137 The value of the param as a byte string, or the default value if
138 param is not found and is not required.
139
140 Raises
141 HTTPBadRequest: The param was not found in the request, but was
142 required.
143
144 """
145
146 # PERF: Use if..in since it is a good all-around performer; we don't
147 # know how likely params are to be specified by clients.
148 if name in self._params:
149 return self._params[name]
150
151 if not required:
152 return default
153
154 raise HTTPBadRequest('Missing query parameter',
155 'The "' + name + '" query parameter is required.')
156
157 def get_param_as_int(self, name, default=None, required=False):
158 """Return the value of a query string parameter as an int
159
160 Args:
161 name: Parameter name, case-sensitive (e.g., 'limit')
162 default: Value to return in case the parameter is not found in the
163 query string, or it is not an integer (default None)
164 required: Set to True to raise HTTPBadRequest instead of returning
165 gracefully when the parameter is not found or is not an
166 integer (default False)
167
168 Returns:
169 The value of the param if it is found and can be converted to an
170 integer. Otherwise, returns the default value unless required is
171 True.
172
173 Raises
174 HTTPBadRequest: The param was not found in the request, but was
175 required.
176
177 """
178
179 # PERF: Use if..in since it is a good all-around performer; we don't
180 # know how likely params are to be specified by clients.
181 if name in self._params:
182 val = self._params[name]
183 try:
184 return int(val)
185 except ValueError:
186 pass
187
188 if not required:
189 return default
190
191 raise HTTPBadRequest('Missing query parameter',
192 'The "' + name + '" query parameter is required.')
193
[end of falcon/request.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/request.py b/falcon/request.py
--- a/falcon/request.py
+++ b/falcon/request.py
@@ -32,13 +32,13 @@
__slots__ = (
'app',
- 'body',
'_headers',
'method',
'_params',
'path',
'protocol',
'query_string',
+ 'stream',
'_wsgierrors'
)
@@ -54,7 +54,7 @@
"""
self._wsgierrors = env['wsgi.errors']
- self.body = env['wsgi.input']
+ self.stream = env['wsgi.input']
self.protocol = env['wsgi.url_scheme']
self.app = env['SCRIPT_NAME']
| {"golden_diff": "diff --git a/falcon/request.py b/falcon/request.py\n--- a/falcon/request.py\n+++ b/falcon/request.py\n@@ -32,13 +32,13 @@\n \n __slots__ = (\n 'app',\n- 'body',\n '_headers',\n 'method',\n '_params',\n 'path',\n 'protocol',\n 'query_string',\n+ 'stream',\n '_wsgierrors'\n )\n \n@@ -54,7 +54,7 @@\n \"\"\"\n \n self._wsgierrors = env['wsgi.errors']\n- self.body = env['wsgi.input']\n+ self.stream = env['wsgi.input']\n \n self.protocol = env['wsgi.url_scheme']\n self.app = env['SCRIPT_NAME']\n", "issue": "Rename Request.body to Request.stream\nMirros Response. And it really is a stream.\n\n", "before_files": [{"content": "\"\"\"Defines the Request class.\n\nCopyright 2013 by Rackspace Hosting, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom datetime import datetime\n\nimport six\n\nfrom falcon.request_helpers import *\nfrom falcon.exceptions import *\n\nDEFAULT_ERROR_LOG_FORMAT = ('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR]'\n ' {1} {2}?{3} => {4}\\n')\n\n\nclass Request(object):\n \"\"\"Represents a client's HTTP request\"\"\"\n\n __slots__ = (\n 'app',\n 'body',\n '_headers',\n 'method',\n '_params',\n 'path',\n 'protocol',\n 'query_string',\n '_wsgierrors'\n )\n\n def __init__(self, env):\n \"\"\"Initialize attributes based on a WSGI environment dict\n\n Note: Request is not meant to be instantiated directory by responders.\n\n Args:\n env: A WSGI environment dict passed in from the server. See also\n the PEP-333 spec.\n\n \"\"\"\n\n self._wsgierrors = env['wsgi.errors']\n self.body = env['wsgi.input']\n\n self.protocol = env['wsgi.url_scheme']\n self.app = env['SCRIPT_NAME']\n self.method = env['REQUEST_METHOD']\n self.path = env['PATH_INFO'] or '/'\n self.query_string = query_string = env['QUERY_STRING']\n\n self._params = parse_query_string(query_string)\n self._headers = parse_headers(env)\n\n def log_error(self, message):\n \"\"\"Log an error to wsgi.error\n\n Prepends timestamp and request info to message, and writes the\n result out to the WSGI server's error stream (wsgi.error).\n\n Args:\n message: A string describing the problem. If a byte-string and\n running under Python 2, the string is assumed to be encoded\n as UTF-8.\n\n \"\"\"\n if not six.PY3 and isinstance(message, unicode):\n message = message.encode('utf-8')\n\n log_line = (\n DEFAULT_ERROR_LOG_FORMAT.\n format(datetime.now(), self.method, self.path,\n self.query_string, message)\n )\n\n self._wsgierrors.write(log_line)\n\n def client_accepts_json(self):\n \"\"\"Return True if the Accept header indicates JSON support\"\"\"\n\n accept = self.get_header('Accept')\n if accept is not None:\n return ('application/json' in accept) or ('*/*' in accept)\n\n return False\n\n def get_header(self, name, default=None, required=False):\n \"\"\"Return a header value as a string\n\n Args:\n name: Header name, case-insensitive (e.g., 'Content-Type')\n default: Value to return in case the header is not\n found (default None)\n required: Set to True to raise HttpBadRequest instead\n of returning gracefully when the header is not found\n (default False)\n\n \"\"\"\n\n # Use try..except to optimize for the header existing in most cases\n try:\n # Don't take the time to cache beforehand, using HTTP naming.\n # This will be faster, assuming that most headers are looked\n # up only once, and not all headers will be requested.\n return self._headers[name.upper().replace('-', '_')]\n except KeyError:\n if not required:\n return default\n\n raise HTTPBadRequest('Missing header',\n 'The \"' + name + '\" header is required.')\n\n def get_param(self, name, default=None, required=False):\n \"\"\"Return the value of a query string parameter as a string\n\n Args:\n name: Parameter name, case-sensitive (e.g., 'sort')\n default: Value to return in case the parameter is not found in the\n query string (default None)\n required: Set to True to raise HTTPBadRequest instead of returning\n gracefully when the parameter is not found (default False)\n\n Returns:\n The value of the param as a byte string, or the default value if\n param is not found and is not required.\n\n Raises\n HTTPBadRequest: The param was not found in the request, but was\n required.\n\n \"\"\"\n\n # PERF: Use if..in since it is a good all-around performer; we don't\n # know how likely params are to be specified by clients.\n if name in self._params:\n return self._params[name]\n\n if not required:\n return default\n\n raise HTTPBadRequest('Missing query parameter',\n 'The \"' + name + '\" query parameter is required.')\n\n def get_param_as_int(self, name, default=None, required=False):\n \"\"\"Return the value of a query string parameter as an int\n\n Args:\n name: Parameter name, case-sensitive (e.g., 'limit')\n default: Value to return in case the parameter is not found in the\n query string, or it is not an integer (default None)\n required: Set to True to raise HTTPBadRequest instead of returning\n gracefully when the parameter is not found or is not an\n integer (default False)\n\n Returns:\n The value of the param if it is found and can be converted to an\n integer. Otherwise, returns the default value unless required is\n True.\n\n Raises\n HTTPBadRequest: The param was not found in the request, but was\n required.\n\n \"\"\"\n\n # PERF: Use if..in since it is a good all-around performer; we don't\n # know how likely params are to be specified by clients.\n if name in self._params:\n val = self._params[name]\n try:\n return int(val)\n except ValueError:\n pass\n\n if not required:\n return default\n\n raise HTTPBadRequest('Missing query parameter',\n 'The \"' + name + '\" query parameter is required.')\n", "path": "falcon/request.py"}]} | 2,402 | 168 |
gh_patches_debug_39573 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-674 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Jinja 3.x
Jinja 3.x was released last week and it has broken some functionality within the agent. The following tests are currently failing:
```
tests/integration/instruments/test_jinja2_py36plus.py::test_async_render
tests/integration/instruments/test_jinja2_py36plus.py::test_async_render_name
tests/integration/instruments/test_urllib3.py::test_request - pytest.PytestUnraisableExceptionWarning
tests/integration/instruments/test_urllib3.py::test_request_no_absolute_url
```
</issue>
<code>
[start of src/scout_apm/instruments/jinja2.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5 import sys
6
7 import wrapt
8
9 from scout_apm.core.tracked_request import TrackedRequest
10
11 try:
12 from jinja2 import Environment
13 except ImportError: # pragma: no cover
14 Environment = None
15
16 try:
17 from jinja2 import Template
18 except ImportError: # pragma: no cover
19 Template = None
20
21 # The async_ module can only be shipped on Python 3.6+
22 try:
23 from scout_apm.async_.instruments.jinja2 import wrapped_render_async
24 except ImportError:
25 wrapped_render_async = None
26
27
28 logger = logging.getLogger(__name__)
29
30
31 have_patched_environment_init = False
32 have_patched_template_render = False
33 have_patched_template_render_async = False
34
35
36 def ensure_installed():
37 global have_patched_environment_init
38 global have_patched_template_render
39
40 logger.debug("Instrumenting Jinja2.")
41
42 if Template is None:
43 logger.debug("Couldn't import jinja2.Template - probably not installed.")
44 return
45
46 if not have_patched_environment_init:
47 try:
48 Environment.__init__ = wrapped_environment_init(Environment.__init__)
49 except Exception as exc:
50 logger.warning(
51 "Failed to instrument jinja2.Environment.__init__: %r",
52 exc,
53 exc_info=exc,
54 )
55 else:
56 have_patched_environment_init = True
57
58 if not have_patched_template_render:
59 try:
60 Template.render = wrapped_render(Template.render)
61 except Exception as exc:
62 logger.warning(
63 "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc
64 )
65 else:
66 have_patched_template_render = True
67
68
69 @wrapt.decorator
70 def wrapped_render(wrapped, instance, args, kwargs):
71 tracked_request = TrackedRequest.instance()
72 with tracked_request.span(operation="Template/Render") as span:
73 span.tag("name", instance.name)
74 return wrapped(*args, **kwargs)
75
76
77 @wrapt.decorator
78 def wrapped_environment_init(wrapped, instance, args, kwargs):
79 """
80 Delayed wrapping of render_async(), since Template won't have this method
81 until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is
82 done conditionally in Environment.__init__:
83 https://github.com/pallets/jinja/issues/765
84 """
85 global have_patched_template_render_async
86 result = wrapped(*args, **kwargs)
87
88 if (
89 wrapped_render_async is not None
90 and not have_patched_template_render_async
91 and "jinja2.asyncsupport" in sys.modules
92 ):
93 try:
94 Template.render_async = wrapped_render_async(Template.render_async)
95 except Exception as exc:
96 logger.warning(
97 "Failed to instrument jinja2.Template.render_async: %r",
98 exc,
99 exc_info=exc,
100 )
101 else:
102 have_patched_template_render_async = True
103
104 return result
105
[end of src/scout_apm/instruments/jinja2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py
--- a/src/scout_apm/instruments/jinja2.py
+++ b/src/scout_apm/instruments/jinja2.py
@@ -34,7 +34,6 @@
def ensure_installed():
- global have_patched_environment_init
global have_patched_template_render
logger.debug("Instrumenting Jinja2.")
@@ -43,9 +42,31 @@
logger.debug("Couldn't import jinja2.Template - probably not installed.")
return
- if not have_patched_environment_init:
+ instrument_render_async()
+
+ if not have_patched_template_render:
+ try:
+ Template.render = wrapped_render(Template.render)
+ except Exception as exc:
+ logger.warning(
+ "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc
+ )
+ else:
+ have_patched_template_render = True
+
+
+def instrument_render_async():
+ global have_patched_environment_init
+ global have_patched_template_render_async
+
+ if wrapped_render_async is None:
+ return
+
+ if not have_patched_environment_init and not hasattr(Template, "render_async"):
try:
- Environment.__init__ = wrapped_environment_init(Environment.__init__)
+ Environment.__init__ = wrapped_environment_init_jinja_v2(
+ Environment.__init__
+ )
except Exception as exc:
logger.warning(
"Failed to instrument jinja2.Environment.__init__: %r",
@@ -54,16 +75,17 @@
)
else:
have_patched_environment_init = True
-
- if not have_patched_template_render:
+ elif hasattr(Template, "render_async") and not have_patched_template_render_async:
try:
- Template.render = wrapped_render(Template.render)
+ Template.render_async = wrapped_render_async(Template.render_async)
except Exception as exc:
logger.warning(
- "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc
+ "Failed to instrument jinja2.Template.render_async: %r",
+ exc,
+ exc_info=exc,
)
else:
- have_patched_template_render = True
+ have_patched_template_render_async = True
@wrapt.decorator
@@ -75,12 +97,14 @@
@wrapt.decorator
-def wrapped_environment_init(wrapped, instance, args, kwargs):
+def wrapped_environment_init_jinja_v2(wrapped, instance, args, kwargs):
"""
Delayed wrapping of render_async(), since Template won't have this method
until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is
done conditionally in Environment.__init__:
https://github.com/pallets/jinja/issues/765
+
+ This is no longer needed since Jinja2 v3.0.0
"""
global have_patched_template_render_async
result = wrapped(*args, **kwargs)
| {"golden_diff": "diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py\n--- a/src/scout_apm/instruments/jinja2.py\n+++ b/src/scout_apm/instruments/jinja2.py\n@@ -34,7 +34,6 @@\n \n \n def ensure_installed():\n- global have_patched_environment_init\n global have_patched_template_render\n \n logger.debug(\"Instrumenting Jinja2.\")\n@@ -43,9 +42,31 @@\n logger.debug(\"Couldn't import jinja2.Template - probably not installed.\")\n return\n \n- if not have_patched_environment_init:\n+ instrument_render_async()\n+\n+ if not have_patched_template_render:\n+ try:\n+ Template.render = wrapped_render(Template.render)\n+ except Exception as exc:\n+ logger.warning(\n+ \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ )\n+ else:\n+ have_patched_template_render = True\n+\n+\n+def instrument_render_async():\n+ global have_patched_environment_init\n+ global have_patched_template_render_async\n+\n+ if wrapped_render_async is None:\n+ return\n+\n+ if not have_patched_environment_init and not hasattr(Template, \"render_async\"):\n try:\n- Environment.__init__ = wrapped_environment_init(Environment.__init__)\n+ Environment.__init__ = wrapped_environment_init_jinja_v2(\n+ Environment.__init__\n+ )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Environment.__init__: %r\",\n@@ -54,16 +75,17 @@\n )\n else:\n have_patched_environment_init = True\n-\n- if not have_patched_template_render:\n+ elif hasattr(Template, \"render_async\") and not have_patched_template_render_async:\n try:\n- Template.render = wrapped_render(Template.render)\n+ Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n- \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ \"Failed to instrument jinja2.Template.render_async: %r\",\n+ exc,\n+ exc_info=exc,\n )\n else:\n- have_patched_template_render = True\n+ have_patched_template_render_async = True\n \n \n @wrapt.decorator\n@@ -75,12 +97,14 @@\n \n \n @wrapt.decorator\n-def wrapped_environment_init(wrapped, instance, args, kwargs):\n+def wrapped_environment_init_jinja_v2(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n+\n+ This is no longer needed since Jinja2 v3.0.0\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n", "issue": "Support Jinja 3.x\nJinja 3.x was released last week and it has broken some functionality within the agent. The following tests are currently failing:\r\n\r\n```\r\ntests/integration/instruments/test_jinja2_py36plus.py::test_async_render\r\ntests/integration/instruments/test_jinja2_py36plus.py::test_async_render_name\r\ntests/integration/instruments/test_urllib3.py::test_request - pytest.PytestUnraisableExceptionWarning\r\ntests/integration/instruments/test_urllib3.py::test_request_no_absolute_url\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Environment\nexcept ImportError: # pragma: no cover\n Environment = None\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_environment_init = False\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_environment_init\n global have_patched_template_render\n\n logger.debug(\"Instrumenting Jinja2.\")\n\n if Template is None:\n logger.debug(\"Couldn't import jinja2.Template - probably not installed.\")\n return\n\n if not have_patched_environment_init:\n try:\n Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Environment.__init__: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_environment_init = True\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=\"Template/Render\") as span:\n span.tag(\"name\", instance.name)\n return wrapped(*args, **kwargs)\n\n\[email protected]\ndef wrapped_environment_init(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n\n if (\n wrapped_render_async is not None\n and not have_patched_template_render_async\n and \"jinja2.asyncsupport\" in sys.modules\n ):\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n return result\n", "path": "src/scout_apm/instruments/jinja2.py"}]} | 1,534 | 692 |
gh_patches_debug_12015 | rasdani/github-patches | git_diff | iterative__dvc-8505 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`exp run`: data gets re-imported on every call
# Bug Report
## Description
When a pipeline uses an imported data file (with `dvc import`), the data gets cloned(?) and hashed every time `dvc exp run` is called.
### Reproduce
1. dvc import [email protected]:iterative/dataset-registry.git use-cases/cats-dogs
2. dvc stage add -n foo -d cats-dogs echo foo
3. dvc exp run
### Expected
When using `dvc repro` the imported data doesn't get re-hashed. I would expect `dvc exp run` to behave the same.
### Environment information
**Output of `dvc doctor`:**
```console
$ dvc doctor
DVC version: 2.6.3 (pip)
---------------------------------
Platform: Python 3.9.6 on macOS-10.16-x86_64-i386-64bit
Supports:
gdrive (pydrive2 = 1.9.1),
http (requests = 2.26.0),
https (requests = 2.26.0)
Cache types: reflink, hardlink, symlink
Cache directory: apfs on /dev/disk1s1s1
Caches: local
Remotes: None
Workspace directory: apfs on /dev/disk1s1s1
Repo: dvc, git
```
**Additional Information (if any):**
```console
$ dvc repro -v
2021-08-25 11:11:55,186 DEBUG: Computed stage: 'cats-dogs.dvc' md5: '5a135b297ee3c96465ce4b320f44fb8b'
'cats-dogs.dvc' didn't change, skipping
Stage 'foo' didn't change, skipping
Data and pipelines are up to date.
```
```console
$ dvc exp run -v
2021-08-25 11:12:15,672 DEBUG: Detaching HEAD at 'HEAD'
2021-08-25 11:12:15,690 DEBUG: Stashing workspace
2021-08-25 11:12:15,700 DEBUG: No changes to stash
2021-08-25 11:12:15,749 DEBUG: Creating external repo [email protected]:iterative/dataset-registry.git@ca140591a21c6d75a7057d1e2eb3f51d3115c5f5
2021-08-25 11:12:15,749 DEBUG: erepo: git clone '[email protected]:iterative/dataset-registry.git' to a temporary dir
Computing file/dir hashes (only done once)
. . .
```
</issue>
<code>
[start of dvc/repo/commit.py]
1 from dvc import prompt
2
3 from . import locked
4
5
6 def _prepare_message(stage, changes):
7 changed_deps, changed_outs, changed_stage = changes
8 if changed_deps and changed_outs:
9 msg = "dependencies {deps} and outputs {outs} of {stage} changed."
10 elif changed_deps:
11 msg = "dependencies {deps} of {stage} changed."
12 elif changed_outs:
13 msg = "outputs {outs} of {stage} changed."
14 else:
15 msg = "{stage_changed}"
16
17 msg += " Are you sure you want to commit it?"
18
19 kw = {
20 "stage": stage,
21 "deps": changed_deps,
22 "outs": changed_outs,
23 "stage_changed": changed_stage,
24 }
25 return msg.format_map(kw)
26
27
28 def prompt_to_commit(stage, changes, force=False):
29 from dvc.stage.exceptions import StageCommitError
30
31 if not (force or prompt.confirm(_prepare_message(stage, changes))):
32 raise StageCommitError(
33 "unable to commit changed {}. Use `-f|--force` to "
34 "force.".format(stage)
35 )
36
37
38 @locked
39 def commit(
40 self,
41 target,
42 with_deps=False,
43 recursive=False,
44 force=False,
45 allow_missing=False,
46 data_only=False,
47 ):
48 from dvc.dvcfile import Dvcfile
49
50 stages_info = [
51 info
52 for info in self.stage.collect_granular(
53 target, with_deps=with_deps, recursive=recursive
54 )
55 if not data_only or info.stage.is_data_source
56 ]
57 for stage_info in stages_info:
58 stage = stage_info.stage
59 changes = stage.changed_entries()
60 if any(changes):
61 prompt_to_commit(stage, changes, force=force)
62 stage.save(allow_missing=allow_missing)
63 stage.commit(
64 filter_info=stage_info.filter_info, allow_missing=allow_missing
65 )
66
67 Dvcfile(self, stage.path).dump(stage, update_pipeline=False)
68 return [s.stage for s in stages_info]
69
[end of dvc/repo/commit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/repo/commit.py b/dvc/repo/commit.py
--- a/dvc/repo/commit.py
+++ b/dvc/repo/commit.py
@@ -56,10 +56,13 @@
]
for stage_info in stages_info:
stage = stage_info.stage
- changes = stage.changed_entries()
- if any(changes):
- prompt_to_commit(stage, changes, force=force)
+ if force:
stage.save(allow_missing=allow_missing)
+ else:
+ changes = stage.changed_entries()
+ if any(changes):
+ prompt_to_commit(stage, changes, force=force)
+ stage.save(allow_missing=allow_missing)
stage.commit(
filter_info=stage_info.filter_info, allow_missing=allow_missing
)
| {"golden_diff": "diff --git a/dvc/repo/commit.py b/dvc/repo/commit.py\n--- a/dvc/repo/commit.py\n+++ b/dvc/repo/commit.py\n@@ -56,10 +56,13 @@\n ]\n for stage_info in stages_info:\n stage = stage_info.stage\n- changes = stage.changed_entries()\n- if any(changes):\n- prompt_to_commit(stage, changes, force=force)\n+ if force:\n stage.save(allow_missing=allow_missing)\n+ else:\n+ changes = stage.changed_entries()\n+ if any(changes):\n+ prompt_to_commit(stage, changes, force=force)\n+ stage.save(allow_missing=allow_missing)\n stage.commit(\n filter_info=stage_info.filter_info, allow_missing=allow_missing\n )\n", "issue": "`exp run`: data gets re-imported on every call\n# Bug Report\r\n\r\n## Description\r\n\r\nWhen a pipeline uses an imported data file (with `dvc import`), the data gets cloned(?) and hashed every time `dvc exp run` is called.\r\n\r\n### Reproduce\r\n\r\n1. dvc import [email protected]:iterative/dataset-registry.git use-cases/cats-dogs\r\n2. dvc stage add -n foo -d cats-dogs echo foo\r\n3. dvc exp run\r\n\r\n### Expected\r\n\r\nWhen using `dvc repro` the imported data doesn't get re-hashed. I would expect `dvc exp run` to behave the same.\r\n\r\n### Environment information\r\n\r\n**Output of `dvc doctor`:**\r\n\r\n```console\r\n$ dvc doctor\r\nDVC version: 2.6.3 (pip)\r\n---------------------------------\r\nPlatform: Python 3.9.6 on macOS-10.16-x86_64-i386-64bit\r\nSupports:\r\n gdrive (pydrive2 = 1.9.1),\r\n http (requests = 2.26.0),\r\n https (requests = 2.26.0)\r\nCache types: reflink, hardlink, symlink\r\nCache directory: apfs on /dev/disk1s1s1\r\nCaches: local\r\nRemotes: None\r\nWorkspace directory: apfs on /dev/disk1s1s1\r\nRepo: dvc, git\r\n```\r\n\r\n**Additional Information (if any):**\r\n\r\n```console\r\n$ dvc repro -v\r\n2021-08-25 11:11:55,186 DEBUG: Computed stage: 'cats-dogs.dvc' md5: '5a135b297ee3c96465ce4b320f44fb8b'\r\n'cats-dogs.dvc' didn't change, skipping\r\nStage 'foo' didn't change, skipping\r\nData and pipelines are up to date.\r\n```\r\n\r\n```console\r\n$ dvc exp run -v\r\n2021-08-25 11:12:15,672 DEBUG: Detaching HEAD at 'HEAD' \r\n2021-08-25 11:12:15,690 DEBUG: Stashing workspace\r\n2021-08-25 11:12:15,700 DEBUG: No changes to stash\r\n2021-08-25 11:12:15,749 DEBUG: Creating external repo [email protected]:iterative/dataset-registry.git@ca140591a21c6d75a7057d1e2eb3f51d3115c5f5\r\n2021-08-25 11:12:15,749 DEBUG: erepo: git clone '[email protected]:iterative/dataset-registry.git' to a temporary dir\r\nComputing file/dir hashes (only done once) \r\n. . . \r\n```\r\n\r\n\n", "before_files": [{"content": "from dvc import prompt\n\nfrom . import locked\n\n\ndef _prepare_message(stage, changes):\n changed_deps, changed_outs, changed_stage = changes\n if changed_deps and changed_outs:\n msg = \"dependencies {deps} and outputs {outs} of {stage} changed.\"\n elif changed_deps:\n msg = \"dependencies {deps} of {stage} changed.\"\n elif changed_outs:\n msg = \"outputs {outs} of {stage} changed.\"\n else:\n msg = \"{stage_changed}\"\n\n msg += \" Are you sure you want to commit it?\"\n\n kw = {\n \"stage\": stage,\n \"deps\": changed_deps,\n \"outs\": changed_outs,\n \"stage_changed\": changed_stage,\n }\n return msg.format_map(kw)\n\n\ndef prompt_to_commit(stage, changes, force=False):\n from dvc.stage.exceptions import StageCommitError\n\n if not (force or prompt.confirm(_prepare_message(stage, changes))):\n raise StageCommitError(\n \"unable to commit changed {}. Use `-f|--force` to \"\n \"force.\".format(stage)\n )\n\n\n@locked\ndef commit(\n self,\n target,\n with_deps=False,\n recursive=False,\n force=False,\n allow_missing=False,\n data_only=False,\n):\n from dvc.dvcfile import Dvcfile\n\n stages_info = [\n info\n for info in self.stage.collect_granular(\n target, with_deps=with_deps, recursive=recursive\n )\n if not data_only or info.stage.is_data_source\n ]\n for stage_info in stages_info:\n stage = stage_info.stage\n changes = stage.changed_entries()\n if any(changes):\n prompt_to_commit(stage, changes, force=force)\n stage.save(allow_missing=allow_missing)\n stage.commit(\n filter_info=stage_info.filter_info, allow_missing=allow_missing\n )\n\n Dvcfile(self, stage.path).dump(stage, update_pipeline=False)\n return [s.stage for s in stages_info]\n", "path": "dvc/repo/commit.py"}]} | 1,796 | 176 |
gh_patches_debug_31064 | rasdani/github-patches | git_diff | SeldonIO__MLServer-916 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue in aggregating batched requests parameters
When sending batched requests with custom parameters (using extra field in [here](https://github.com/SeldonIO/MLServer/blob/749cd02bc5f3ee0b915151eedeff50075ac946ad/mlserver/types/dataplane.py#L27)) like the following:
```python
# request 1
payload = types.InferenceRequest(
inputs=[
types.RequestInput(
name="parameters-np",
shape=[1],
datatype="BYTES",
data=[],
parameters=types.Parameters(
custom-param='value-1,
)
]
)
# request 2
payload = types.InferenceRequest(
inputs=[
types.RequestInput(
name="parameters-np",
shape=[1],
datatype="BYTES",
data=[],
parameters=types.Parameters(
custom-param='value-2',
)
]
)
```
the results param field for the batched inference request will only contain a single `custom-param` field with the last request value for that, this is because a [dictionary is used](https://github.com/SeldonIO/MLServer/blob/749cd02bc5f3ee0b915151eedeff50075ac946ad/mlserver/batching/requests.py#L112) to aggregate parameters and for the case of having a same parameter with different values in the same batch only one value will be stored for them. E.g. in the case above we will only have `custom-param` equals to `value-2`. A better solution would be able to store all the values of common request parameter separately in a list e.g. `{custom-param=['value-1', 'value-2']`
</issue>
<code>
[start of mlserver/batching/requests.py]
1 from collections import defaultdict, OrderedDict
2 from typing import Dict, List, Optional, Union
3
4 from ..types import (
5 InferenceRequest,
6 InferenceResponse,
7 Parameters,
8 RequestInput,
9 RequestOutput,
10 ResponseOutput,
11 )
12 from .shape import Shape
13
14
15 def _get_data(payload: Union[RequestInput, ResponseOutput]):
16 return getattr(payload.data, "__root__", payload.data)
17
18
19 def _merge_parameters(
20 all_params: dict,
21 parametrised_obj: Union[
22 InferenceRequest, InferenceResponse, RequestInput, RequestOutput
23 ],
24 ) -> dict:
25 if not parametrised_obj.parameters:
26 return all_params
27
28 obj_params = parametrised_obj.parameters.dict()
29 return {**all_params, **obj_params}
30
31
32 def _merge_data(
33 all_data: Union[list, List[str], List[bytes]]
34 ) -> Union[list, str, bytes]:
35 sampled_datum = all_data[0]
36
37 if isinstance(sampled_datum, str):
38 return "".join(all_data) # type: ignore
39
40 if isinstance(sampled_datum, bytes):
41 return b"".join(all_data) # type: ignore
42
43 if isinstance(sampled_datum, list):
44 return sum(all_data, [])
45
46 # TODO: Should we raise an error if we couldn't merge the data?
47 return all_data
48
49
50 class BatchedRequests:
51 def __init__(self, inference_requests: Dict[str, InferenceRequest] = {}):
52 self.inference_requests = inference_requests
53
54 # External IDs represent the incoming prediction IDs that need to match
55 # 1:1 between request and response.
56 # Since we can't ensure the uniqueness (or even presence) of the
57 # external IDs, we'll also maintain our own list of internal IDs.
58 self._ids_mapping: Dict[str, Optional[str]] = OrderedDict()
59
60 # Minibatch here refers to the individual batch size of the input head
61 # of each input request (i.e. the number of datapoints on each input
62 # request)
63 self._minibatch_sizes: Dict[str, int] = OrderedDict()
64
65 self.merged_request = self._merge_requests()
66
67 def _merge_requests(self) -> InferenceRequest:
68 inputs_index: Dict[str, Dict[str, RequestInput]] = defaultdict(OrderedDict)
69 outputs_index: Dict[str, Dict[str, RequestOutput]] = defaultdict(OrderedDict)
70 all_params: dict = {}
71 has_outputs = False # if no outputs are defined, then outputs=None
72
73 for internal_id, inference_request in self.inference_requests.items():
74 self._ids_mapping[internal_id] = inference_request.id
75 all_params = _merge_parameters(all_params, inference_request)
76 for request_input in inference_request.inputs:
77 inputs_index[request_input.name][internal_id] = request_input
78
79 if inference_request.outputs is not None:
80 has_outputs = True
81 for request_output in inference_request.outputs:
82 outputs_index[request_output.name][internal_id] = request_output
83
84 inputs = [
85 self._merge_request_inputs(request_inputs)
86 for request_inputs in inputs_index.values()
87 ]
88
89 outputs = (
90 [
91 self._merge_request_outputs(request_outputs)
92 for request_outputs in outputs_index.values()
93 ]
94 if has_outputs
95 else None
96 )
97
98 # TODO: Should we add a 'fake' request ID?
99 params = Parameters(**all_params) if all_params else None
100 return InferenceRequest(inputs=inputs, outputs=outputs, parameters=params)
101
102 def _merge_request_inputs(
103 self, request_inputs: Dict[str, RequestInput]
104 ) -> RequestInput:
105 # Note that minibatch sizes could be different on each input head,
106 # however, to simplify the implementation, here we assume that it will
107 # be the same across all of them
108 batch_size = 0
109 all_data = []
110 all_params: dict = {}
111 for internal_id, request_input in request_inputs.items():
112 all_params = _merge_parameters(all_params, request_input)
113 all_data.append(_get_data(request_input))
114 minibatch_shape = Shape(request_input.shape)
115 self._minibatch_sizes[internal_id] = minibatch_shape.batch_size
116 batch_size += minibatch_shape.batch_size
117
118 data = _merge_data(all_data)
119 parameters = Parameters(**all_params) if all_params else None
120
121 # TODO: What should we do if list is empty?
122 sampled = next(iter(request_inputs.values()))
123 shape = Shape(sampled.shape)
124 shape.batch_size = batch_size
125
126 return RequestInput(
127 name=sampled.name,
128 datatype=sampled.datatype,
129 shape=shape.to_list(),
130 data=data,
131 parameters=parameters,
132 )
133
134 def _merge_request_outputs(
135 self, request_outputs: Dict[str, RequestOutput]
136 ) -> RequestOutput:
137 all_params: dict = {}
138 for internal_id, request_output in request_outputs.items():
139 all_params = _merge_parameters(all_params, request_output)
140
141 parameters = Parameters(**all_params) if all_params else None
142
143 # TODO: What should we do if list is empty?
144 sampled = next(iter(request_outputs.values()))
145
146 return RequestOutput(name=sampled.name, parameters=parameters)
147
148 def split_response(
149 self, batched_response: InferenceResponse
150 ) -> Dict[str, InferenceResponse]:
151 responses: Dict[str, InferenceResponse] = {}
152
153 for response_output in batched_response.outputs:
154 response_outputs = self._split_response_output(response_output)
155
156 for internal_id, response_output in response_outputs.items():
157 if internal_id not in responses:
158 responses[internal_id] = InferenceResponse(
159 id=self._ids_mapping[internal_id],
160 model_name=batched_response.model_name,
161 model_version=batched_response.model_version,
162 outputs=[],
163 parameters=batched_response.parameters,
164 )
165
166 responses[internal_id].outputs.append(response_output)
167
168 return responses
169
170 def _split_response_output(
171 self, response_output: ResponseOutput
172 ) -> Dict[str, ResponseOutput]:
173
174 all_data = self._split_data(response_output)
175 response_outputs = {}
176 for internal_id, data in all_data.items():
177 shape = Shape(response_output.shape)
178 shape.batch_size = self._minibatch_sizes[internal_id]
179 response_outputs[internal_id] = ResponseOutput(
180 name=response_output.name,
181 shape=shape.to_list(),
182 data=data,
183 datatype=response_output.datatype,
184 parameters=response_output.parameters,
185 )
186
187 return response_outputs
188
189 def _split_data(self, response_output: ResponseOutput) -> Dict[str, ResponseOutput]:
190 merged_shape = Shape(response_output.shape)
191 element_size = merged_shape.elem_size
192 merged_data = _get_data(response_output)
193 idx = 0
194
195 all_data = {}
196 # TODO: Don't rely on array to have been flattened
197 for internal_id, minibatch_size in self._minibatch_sizes.items():
198 data = merged_data[idx : idx + minibatch_size * element_size]
199 idx += minibatch_size * element_size
200 all_data[internal_id] = data
201
202 return all_data
203
[end of mlserver/batching/requests.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/batching/requests.py b/mlserver/batching/requests.py
--- a/mlserver/batching/requests.py
+++ b/mlserver/batching/requests.py
@@ -29,6 +29,40 @@
return {**all_params, **obj_params}
+def _merge_input_parameters(
+ all_params: dict,
+ parametrised_obj: Union[
+ InferenceRequest, InferenceResponse, RequestInput, RequestOutput
+ ],
+) -> dict:
+ if not parametrised_obj.parameters:
+ return all_params
+ obj_params = parametrised_obj.parameters.dict()
+ if all_params == {}:
+ return obj_params
+ else:
+ common_keys = set(all_params).intersection(set(obj_params)) - {
+ "content_type",
+ "headers",
+ }
+ uncommon_keys = set(all_params).union(set(obj_params)) - common_keys
+ new_all_params = {}
+ for key in common_keys:
+ if type(all_params[key]) == list:
+ new_value = all_params[key] + [obj_params[key]]
+ new_all_params[key] = new_value
+ else:
+ new_value = [all_params[key]]
+ new_value.append(obj_params[key])
+ new_all_params[key] = new_value
+ for key in uncommon_keys:
+ if key in all_params.keys():
+ new_all_params[key] = all_params[key]
+ if key in obj_params.keys():
+ new_all_params[key] = obj_params[key]
+ return new_all_params
+
+
def _merge_data(
all_data: Union[list, List[str], List[bytes]]
) -> Union[list, str, bytes]:
@@ -109,7 +143,7 @@
all_data = []
all_params: dict = {}
for internal_id, request_input in request_inputs.items():
- all_params = _merge_parameters(all_params, request_input)
+ all_params = _merge_input_parameters(all_params, request_input)
all_data.append(_get_data(request_input))
minibatch_shape = Shape(request_input.shape)
self._minibatch_sizes[internal_id] = minibatch_shape.batch_size
| {"golden_diff": "diff --git a/mlserver/batching/requests.py b/mlserver/batching/requests.py\n--- a/mlserver/batching/requests.py\n+++ b/mlserver/batching/requests.py\n@@ -29,6 +29,40 @@\n return {**all_params, **obj_params}\n \n \n+def _merge_input_parameters(\n+ all_params: dict,\n+ parametrised_obj: Union[\n+ InferenceRequest, InferenceResponse, RequestInput, RequestOutput\n+ ],\n+) -> dict:\n+ if not parametrised_obj.parameters:\n+ return all_params\n+ obj_params = parametrised_obj.parameters.dict()\n+ if all_params == {}:\n+ return obj_params\n+ else:\n+ common_keys = set(all_params).intersection(set(obj_params)) - {\n+ \"content_type\",\n+ \"headers\",\n+ }\n+ uncommon_keys = set(all_params).union(set(obj_params)) - common_keys\n+ new_all_params = {}\n+ for key in common_keys:\n+ if type(all_params[key]) == list:\n+ new_value = all_params[key] + [obj_params[key]]\n+ new_all_params[key] = new_value\n+ else:\n+ new_value = [all_params[key]]\n+ new_value.append(obj_params[key])\n+ new_all_params[key] = new_value\n+ for key in uncommon_keys:\n+ if key in all_params.keys():\n+ new_all_params[key] = all_params[key]\n+ if key in obj_params.keys():\n+ new_all_params[key] = obj_params[key]\n+ return new_all_params\n+\n+\n def _merge_data(\n all_data: Union[list, List[str], List[bytes]]\n ) -> Union[list, str, bytes]:\n@@ -109,7 +143,7 @@\n all_data = []\n all_params: dict = {}\n for internal_id, request_input in request_inputs.items():\n- all_params = _merge_parameters(all_params, request_input)\n+ all_params = _merge_input_parameters(all_params, request_input)\n all_data.append(_get_data(request_input))\n minibatch_shape = Shape(request_input.shape)\n self._minibatch_sizes[internal_id] = minibatch_shape.batch_size\n", "issue": "Issue in aggregating batched requests parameters\nWhen sending batched requests with custom parameters (using extra field in [here](https://github.com/SeldonIO/MLServer/blob/749cd02bc5f3ee0b915151eedeff50075ac946ad/mlserver/types/dataplane.py#L27)) like the following:\r\n\r\n```python\r\n# request 1\r\npayload = types.InferenceRequest(\r\n inputs=[\r\n types.RequestInput(\r\n name=\"parameters-np\",\r\n shape=[1],\r\n datatype=\"BYTES\",\r\n data=[],\r\n parameters=types.Parameters(\r\n custom-param='value-1,\r\n )\r\n ]\r\n)\r\n# request 2\r\npayload = types.InferenceRequest(\r\n inputs=[\r\n types.RequestInput(\r\n name=\"parameters-np\",\r\n shape=[1],\r\n datatype=\"BYTES\",\r\n data=[],\r\n parameters=types.Parameters(\r\n custom-param='value-2',\r\n )\r\n ]\r\n)\r\n```\r\nthe results param field for the batched inference request will only contain a single `custom-param` field with the last request value for that, this is because a [dictionary is used](https://github.com/SeldonIO/MLServer/blob/749cd02bc5f3ee0b915151eedeff50075ac946ad/mlserver/batching/requests.py#L112) to aggregate parameters and for the case of having a same parameter with different values in the same batch only one value will be stored for them. E.g. in the case above we will only have `custom-param` equals to `value-2`. A better solution would be able to store all the values of common request parameter separately in a list e.g. `{custom-param=['value-1', 'value-2']`\r\n\n", "before_files": [{"content": "from collections import defaultdict, OrderedDict\nfrom typing import Dict, List, Optional, Union\n\nfrom ..types import (\n InferenceRequest,\n InferenceResponse,\n Parameters,\n RequestInput,\n RequestOutput,\n ResponseOutput,\n)\nfrom .shape import Shape\n\n\ndef _get_data(payload: Union[RequestInput, ResponseOutput]):\n return getattr(payload.data, \"__root__\", payload.data)\n\n\ndef _merge_parameters(\n all_params: dict,\n parametrised_obj: Union[\n InferenceRequest, InferenceResponse, RequestInput, RequestOutput\n ],\n) -> dict:\n if not parametrised_obj.parameters:\n return all_params\n\n obj_params = parametrised_obj.parameters.dict()\n return {**all_params, **obj_params}\n\n\ndef _merge_data(\n all_data: Union[list, List[str], List[bytes]]\n) -> Union[list, str, bytes]:\n sampled_datum = all_data[0]\n\n if isinstance(sampled_datum, str):\n return \"\".join(all_data) # type: ignore\n\n if isinstance(sampled_datum, bytes):\n return b\"\".join(all_data) # type: ignore\n\n if isinstance(sampled_datum, list):\n return sum(all_data, [])\n\n # TODO: Should we raise an error if we couldn't merge the data?\n return all_data\n\n\nclass BatchedRequests:\n def __init__(self, inference_requests: Dict[str, InferenceRequest] = {}):\n self.inference_requests = inference_requests\n\n # External IDs represent the incoming prediction IDs that need to match\n # 1:1 between request and response.\n # Since we can't ensure the uniqueness (or even presence) of the\n # external IDs, we'll also maintain our own list of internal IDs.\n self._ids_mapping: Dict[str, Optional[str]] = OrderedDict()\n\n # Minibatch here refers to the individual batch size of the input head\n # of each input request (i.e. the number of datapoints on each input\n # request)\n self._minibatch_sizes: Dict[str, int] = OrderedDict()\n\n self.merged_request = self._merge_requests()\n\n def _merge_requests(self) -> InferenceRequest:\n inputs_index: Dict[str, Dict[str, RequestInput]] = defaultdict(OrderedDict)\n outputs_index: Dict[str, Dict[str, RequestOutput]] = defaultdict(OrderedDict)\n all_params: dict = {}\n has_outputs = False # if no outputs are defined, then outputs=None\n\n for internal_id, inference_request in self.inference_requests.items():\n self._ids_mapping[internal_id] = inference_request.id\n all_params = _merge_parameters(all_params, inference_request)\n for request_input in inference_request.inputs:\n inputs_index[request_input.name][internal_id] = request_input\n\n if inference_request.outputs is not None:\n has_outputs = True\n for request_output in inference_request.outputs:\n outputs_index[request_output.name][internal_id] = request_output\n\n inputs = [\n self._merge_request_inputs(request_inputs)\n for request_inputs in inputs_index.values()\n ]\n\n outputs = (\n [\n self._merge_request_outputs(request_outputs)\n for request_outputs in outputs_index.values()\n ]\n if has_outputs\n else None\n )\n\n # TODO: Should we add a 'fake' request ID?\n params = Parameters(**all_params) if all_params else None\n return InferenceRequest(inputs=inputs, outputs=outputs, parameters=params)\n\n def _merge_request_inputs(\n self, request_inputs: Dict[str, RequestInput]\n ) -> RequestInput:\n # Note that minibatch sizes could be different on each input head,\n # however, to simplify the implementation, here we assume that it will\n # be the same across all of them\n batch_size = 0\n all_data = []\n all_params: dict = {}\n for internal_id, request_input in request_inputs.items():\n all_params = _merge_parameters(all_params, request_input)\n all_data.append(_get_data(request_input))\n minibatch_shape = Shape(request_input.shape)\n self._minibatch_sizes[internal_id] = minibatch_shape.batch_size\n batch_size += minibatch_shape.batch_size\n\n data = _merge_data(all_data)\n parameters = Parameters(**all_params) if all_params else None\n\n # TODO: What should we do if list is empty?\n sampled = next(iter(request_inputs.values()))\n shape = Shape(sampled.shape)\n shape.batch_size = batch_size\n\n return RequestInput(\n name=sampled.name,\n datatype=sampled.datatype,\n shape=shape.to_list(),\n data=data,\n parameters=parameters,\n )\n\n def _merge_request_outputs(\n self, request_outputs: Dict[str, RequestOutput]\n ) -> RequestOutput:\n all_params: dict = {}\n for internal_id, request_output in request_outputs.items():\n all_params = _merge_parameters(all_params, request_output)\n\n parameters = Parameters(**all_params) if all_params else None\n\n # TODO: What should we do if list is empty?\n sampled = next(iter(request_outputs.values()))\n\n return RequestOutput(name=sampled.name, parameters=parameters)\n\n def split_response(\n self, batched_response: InferenceResponse\n ) -> Dict[str, InferenceResponse]:\n responses: Dict[str, InferenceResponse] = {}\n\n for response_output in batched_response.outputs:\n response_outputs = self._split_response_output(response_output)\n\n for internal_id, response_output in response_outputs.items():\n if internal_id not in responses:\n responses[internal_id] = InferenceResponse(\n id=self._ids_mapping[internal_id],\n model_name=batched_response.model_name,\n model_version=batched_response.model_version,\n outputs=[],\n parameters=batched_response.parameters,\n )\n\n responses[internal_id].outputs.append(response_output)\n\n return responses\n\n def _split_response_output(\n self, response_output: ResponseOutput\n ) -> Dict[str, ResponseOutput]:\n\n all_data = self._split_data(response_output)\n response_outputs = {}\n for internal_id, data in all_data.items():\n shape = Shape(response_output.shape)\n shape.batch_size = self._minibatch_sizes[internal_id]\n response_outputs[internal_id] = ResponseOutput(\n name=response_output.name,\n shape=shape.to_list(),\n data=data,\n datatype=response_output.datatype,\n parameters=response_output.parameters,\n )\n\n return response_outputs\n\n def _split_data(self, response_output: ResponseOutput) -> Dict[str, ResponseOutput]:\n merged_shape = Shape(response_output.shape)\n element_size = merged_shape.elem_size\n merged_data = _get_data(response_output)\n idx = 0\n\n all_data = {}\n # TODO: Don't rely on array to have been flattened\n for internal_id, minibatch_size in self._minibatch_sizes.items():\n data = merged_data[idx : idx + minibatch_size * element_size]\n idx += minibatch_size * element_size\n all_data[internal_id] = data\n\n return all_data\n", "path": "mlserver/batching/requests.py"}]} | 2,978 | 483 |
gh_patches_debug_31585 | rasdani/github-patches | git_diff | azavea__raster-vision-1079 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle edge case: reading a window that overflows the RasterSource's extent that has been artificially cropped using extent_crop can cause data leakage
If, for example, there are two `RasterSource`'s with cropped extents such that they both respectively read from the left and right halves of the same underlying image, there will be instances at the boundary where windows straddle both halves. This is not an issue if the two halves are both in the training split or both in the validation split, but if they are in different splits, there will be data leakage.
**Possible solution**
`RasterSource` should fill the overflowing regions in the window with NODATA values if `extent_crop` is set. This can be a configurable option so that it can be enabled only when the image is part of scenes from different data splits.
</issue>
<code>
[start of rastervision_core/rastervision/core/data/raster_source/rasterio_source.py]
1 import logging
2 import math
3 import os
4 from pyproj import Transformer
5 import subprocess
6 from decimal import Decimal
7 import tempfile
8 from typing import Optional
9
10 import numpy as np
11 import rasterio
12 from rasterio.enums import (ColorInterp, MaskFlags)
13
14 from rastervision.pipeline.file_system import download_if_needed
15 from rastervision.core.box import Box
16 from rastervision.core.data.crs_transformer import RasterioCRSTransformer
17 from rastervision.core.data.raster_source import (RasterSource, CropOffsets)
18 from rastervision.core.data import (ActivateMixin, ActivationError)
19
20 log = logging.getLogger(__name__)
21 wgs84 = 'epsg:4326'
22 meters_per_degree = 111319.5
23
24
25 def build_vrt(vrt_path, image_paths):
26 """Build a VRT for a set of TIFF files."""
27 cmd = ['gdalbuildvrt', vrt_path]
28 cmd.extend(image_paths)
29 subprocess.run(cmd)
30
31
32 def download_and_build_vrt(image_uris, tmp_dir):
33 log.info('Building VRT...')
34 image_paths = [download_if_needed(uri, tmp_dir) for uri in image_uris]
35 image_path = os.path.join(tmp_dir, 'index.vrt')
36 build_vrt(image_path, image_paths)
37 return image_path
38
39
40 def stream_and_build_vrt(images_uris, tmp_dir):
41 log.info('Building VRT...')
42 image_paths = images_uris
43 image_path = os.path.join(tmp_dir, 'index.vrt')
44 build_vrt(image_path, image_paths)
45 return image_path
46
47
48 def load_window(image_dataset, window=None, is_masked=False):
49 """Load a window of an image using Rasterio.
50
51 Args:
52 image_dataset: a Rasterio dataset
53 window: ((row_start, row_stop), (col_start, col_stop)) or
54 ((y_min, y_max), (x_min, x_max))
55 is_masked: If True, read a masked array from rasterio
56
57 Returns:
58 np.ndarray of shape (height, width, channels) where channels is the number of
59 channels in the image_dataset.
60 """
61 if is_masked:
62 im = image_dataset.read(window=window, boundless=True, masked=True)
63 im = np.ma.filled(im, fill_value=0)
64 else:
65 im = image_dataset.read(window=window, boundless=True)
66
67 # Handle non-zero NODATA values by setting the data to 0.
68 for channel, nodata in enumerate(image_dataset.nodatavals):
69 if nodata is not None and nodata != 0:
70 im[channel, im[channel] == nodata] = 0
71
72 im = np.transpose(im, axes=[1, 2, 0])
73 return im
74
75
76 class RasterioSource(ActivateMixin, RasterSource):
77 def __init__(self,
78 uris,
79 raster_transformers,
80 tmp_dir,
81 allow_streaming=False,
82 channel_order=None,
83 x_shift=0.0,
84 y_shift=0.0,
85 extent_crop: Optional[CropOffsets] = None):
86 """Constructor.
87
88 This RasterSource can read any file that can be opened by Rasterio/GDAL
89 including georeferenced formats such as GeoTIFF and non-georeferenced formats
90 such as JPG. See https://www.gdal.org/formats_list.html for more details.
91
92 If channel_order is None, then use non-alpha channels. This also sets any
93 masked or NODATA pixel values to be zeros.
94
95 Args:
96 channel_order: list of indices of channels to extract from raw imagery
97 extent_crop (CropOffsets, optional): Relative
98 offsets (top, left, bottom, right) for cropping the extent.
99 Useful for using splitting a scene into different datasets.
100 Defaults to None i.e. no cropping.
101 """
102 self.uris = uris
103 self.tmp_dir = tmp_dir
104 self.image_tmp_dir = None
105 self.image_dataset = None
106 self.x_shift = x_shift
107 self.y_shift = y_shift
108 self.do_shift = self.x_shift != 0.0 or self.y_shift != 0.0
109 self.allow_streaming = allow_streaming
110 self.extent_crop = extent_crop
111
112 num_channels = None
113
114 # Activate in order to get information out of the raster
115 with self.activate():
116 num_channels = self.image_dataset.count
117 if channel_order is None:
118 colorinterp = self.image_dataset.colorinterp
119 if colorinterp:
120 channel_order = [
121 i for i, color_interp in enumerate(colorinterp)
122 if color_interp != ColorInterp.alpha
123 ]
124 else:
125 channel_order = list(range(0, num_channels))
126 self.validate_channel_order(channel_order, num_channels)
127
128 mask_flags = self.image_dataset.mask_flag_enums
129 self.is_masked = any(
130 [m for m in mask_flags if m != MaskFlags.all_valid])
131
132 self.height = self.image_dataset.height
133 self.width = self.image_dataset.width
134
135 # Get 1x1 chip and apply raster transformers to test dtype.
136 test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))
137 test_chip = test_chip[:, :, channel_order]
138 for transformer in raster_transformers:
139 test_chip = transformer.transform(test_chip, channel_order)
140 self.dtype = test_chip.dtype
141
142 self._set_crs_transformer()
143
144 super().__init__(channel_order, num_channels, raster_transformers)
145
146 def _download_data(self, tmp_dir):
147 """Download any data needed for this Raster Source.
148
149 Return a single local path representing the image or a VRT of the data.
150 """
151 if len(self.uris) == 1:
152 if self.allow_streaming:
153 return self.uris[0]
154 else:
155 return download_if_needed(self.uris[0], tmp_dir)
156 else:
157 if self.allow_streaming:
158 return stream_and_build_vrt(self.uris, tmp_dir)
159 else:
160 return download_and_build_vrt(self.uris, tmp_dir)
161
162 def get_crs_transformer(self):
163 return self.crs_transformer
164
165 def get_extent(self):
166 h, w = self.height, self.width
167 if self.extent_crop is not None:
168 skip_top, skip_left, skip_bottom, skip_right = self.extent_crop
169 ymin, xmin = int(h * skip_top), int(w * skip_left)
170 ymax, xmax = h - int(h * skip_bottom), w - int(w * skip_right)
171 return Box(ymin, xmin, ymax, xmax)
172 return Box(0, 0, h, w)
173
174 def get_dtype(self):
175 """Return the numpy.dtype of this scene"""
176 return self.dtype
177
178 def _get_chip(self, window):
179 if self.image_dataset is None:
180 raise ActivationError('RasterSource must be activated before use')
181 shifted_window = self._get_shifted_window(window)
182 return load_window(
183 self.image_dataset,
184 window=shifted_window.rasterio_format(),
185 is_masked=self.is_masked)
186
187 def _activate(self):
188 # Download images to temporary directory and delete them when done.
189 self.image_tmp_dir = tempfile.TemporaryDirectory(dir=self.tmp_dir)
190 self.imagery_path = self._download_data(self.image_tmp_dir.name)
191 self.image_dataset = rasterio.open(self.imagery_path)
192 self._set_crs_transformer()
193
194 def _set_crs_transformer(self):
195 self.crs_transformer = RasterioCRSTransformer.from_dataset(
196 self.image_dataset)
197 crs = self.image_dataset.crs
198 self.to_wgs84 = None
199 self.from_wgs84 = None
200 if crs and self.do_shift:
201 self.to_wgs84 = Transformer.from_crs(
202 crs.wkt, wgs84, always_xy=True)
203 self.from_wgs84 = Transformer.from_crs(
204 wgs84, crs.wkt, always_xy=True)
205
206 def _deactivate(self):
207 self.image_dataset.close()
208 self.image_dataset = None
209 self.image_tmp_dir.cleanup()
210 self.image_tmp_dir = None
211
212 def _get_shifted_window(self, window):
213 do_shift = self.x_shift != 0.0 or self.y_shift != 0.0
214 if do_shift:
215 ymin, xmin, ymax, xmax = window.tuple_format()
216 width = window.get_width()
217 height = window.get_height()
218
219 # Transform image coordinates into world coordinates
220 transform = self.image_dataset.transform
221 xmin2, ymin2 = transform * (xmin, ymin)
222
223 # Transform from world coordinates to WGS84
224 if self.to_wgs84:
225 lon, lat = self.to_wgs84.transform(xmin2, ymin2)
226 else:
227 lon, lat = xmin2, ymin2
228
229 # Shift. This is performed by computing the shifts in
230 # meters to shifts in degrees. Those shifts are then
231 # applied to the WGS84 coordinate.
232 #
233 # Courtesy of https://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters # noqa
234 lat_radians = math.pi * lat / 180.0
235 dlon = Decimal(self.x_shift) / Decimal(
236 meters_per_degree * math.cos(lat_radians))
237 dlat = Decimal(self.y_shift) / Decimal(meters_per_degree)
238 lon = float(Decimal(lon) + dlon)
239 lat = float(Decimal(lat) + dlat)
240
241 # Transform from WGS84 to world coordinates
242 if self.from_wgs84:
243 xmin3, ymin3 = self.from_wgs84.transform(lon, lat)
244 else:
245 xmin3, ymin3 = lon, lat
246
247 # Trasnform from world coordinates back into image coordinates
248 xmin4, ymin4 = ~transform * (xmin3, ymin3)
249
250 window = Box(ymin4, xmin4, ymin4 + height, xmin4 + width)
251 return window
252
[end of rastervision_core/rastervision/core/data/raster_source/rasterio_source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py b/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py
--- a/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py
+++ b/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py
@@ -73,6 +73,26 @@
return im
+def fill_overflow(extent: Box,
+ window: Box,
+ arr: np.ndarray,
+ fill_value: int = 0) -> np.ndarray:
+ """Given a window and corresponding array of values, if the window
+ overflows the extent, fill the overflowing regions with fill_value.
+ """
+ top_overflow = max(0, extent.ymin - window.ymin)
+ bottom_overflow = max(0, window.ymax - extent.ymax)
+ left_overflow = max(0, extent.xmin - window.xmin)
+ right_overflow = max(0, window.xmax - extent.xmax)
+
+ h, w = arr.shape[:2]
+ arr[:top_overflow] = fill_value
+ arr[h - bottom_overflow:] = fill_value
+ arr[:, :left_overflow] = fill_value
+ arr[:, w - right_overflow:] = fill_value
+ return arr
+
+
class RasterioSource(ActivateMixin, RasterSource):
def __init__(self,
uris,
@@ -175,14 +195,17 @@
"""Return the numpy.dtype of this scene"""
return self.dtype
- def _get_chip(self, window):
+ def _get_chip(self, window: Box) -> np.ndarray:
if self.image_dataset is None:
raise ActivationError('RasterSource must be activated before use')
shifted_window = self._get_shifted_window(window)
- return load_window(
+ chip = load_window(
self.image_dataset,
window=shifted_window.rasterio_format(),
is_masked=self.is_masked)
+ if self.extent_crop is not None:
+ chip = fill_overflow(self.get_extent(), window, chip)
+ return chip
def _activate(self):
# Download images to temporary directory and delete them when done.
| {"golden_diff": "diff --git a/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py b/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py\n--- a/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py\n+++ b/rastervision_core/rastervision/core/data/raster_source/rasterio_source.py\n@@ -73,6 +73,26 @@\n return im\n \n \n+def fill_overflow(extent: Box,\n+ window: Box,\n+ arr: np.ndarray,\n+ fill_value: int = 0) -> np.ndarray:\n+ \"\"\"Given a window and corresponding array of values, if the window\n+ overflows the extent, fill the overflowing regions with fill_value.\n+ \"\"\"\n+ top_overflow = max(0, extent.ymin - window.ymin)\n+ bottom_overflow = max(0, window.ymax - extent.ymax)\n+ left_overflow = max(0, extent.xmin - window.xmin)\n+ right_overflow = max(0, window.xmax - extent.xmax)\n+\n+ h, w = arr.shape[:2]\n+ arr[:top_overflow] = fill_value\n+ arr[h - bottom_overflow:] = fill_value\n+ arr[:, :left_overflow] = fill_value\n+ arr[:, w - right_overflow:] = fill_value\n+ return arr\n+\n+\n class RasterioSource(ActivateMixin, RasterSource):\n def __init__(self,\n uris,\n@@ -175,14 +195,17 @@\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return self.dtype\n \n- def _get_chip(self, window):\n+ def _get_chip(self, window: Box) -> np.ndarray:\n if self.image_dataset is None:\n raise ActivationError('RasterSource must be activated before use')\n shifted_window = self._get_shifted_window(window)\n- return load_window(\n+ chip = load_window(\n self.image_dataset,\n window=shifted_window.rasterio_format(),\n is_masked=self.is_masked)\n+ if self.extent_crop is not None:\n+ chip = fill_overflow(self.get_extent(), window, chip)\n+ return chip\n \n def _activate(self):\n # Download images to temporary directory and delete them when done.\n", "issue": "Handle edge case: reading a window that overflows the RasterSource's extent that has been artificially cropped using extent_crop can cause data leakage\nIf, for example, there are two `RasterSource`'s with cropped extents such that they both respectively read from the left and right halves of the same underlying image, there will be instances at the boundary where windows straddle both halves. This is not an issue if the two halves are both in the training split or both in the validation split, but if they are in different splits, there will be data leakage.\r\n\r\n**Possible solution**\r\n`RasterSource` should fill the overflowing regions in the window with NODATA values if `extent_crop` is set. This can be a configurable option so that it can be enabled only when the image is part of scenes from different data splits.\n", "before_files": [{"content": "import logging\nimport math\nimport os\nfrom pyproj import Transformer\nimport subprocess\nfrom decimal import Decimal\nimport tempfile\nfrom typing import Optional\n\nimport numpy as np\nimport rasterio\nfrom rasterio.enums import (ColorInterp, MaskFlags)\n\nfrom rastervision.pipeline.file_system import download_if_needed\nfrom rastervision.core.box import Box\nfrom rastervision.core.data.crs_transformer import RasterioCRSTransformer\nfrom rastervision.core.data.raster_source import (RasterSource, CropOffsets)\nfrom rastervision.core.data import (ActivateMixin, ActivationError)\n\nlog = logging.getLogger(__name__)\nwgs84 = 'epsg:4326'\nmeters_per_degree = 111319.5\n\n\ndef build_vrt(vrt_path, image_paths):\n \"\"\"Build a VRT for a set of TIFF files.\"\"\"\n cmd = ['gdalbuildvrt', vrt_path]\n cmd.extend(image_paths)\n subprocess.run(cmd)\n\n\ndef download_and_build_vrt(image_uris, tmp_dir):\n log.info('Building VRT...')\n image_paths = [download_if_needed(uri, tmp_dir) for uri in image_uris]\n image_path = os.path.join(tmp_dir, 'index.vrt')\n build_vrt(image_path, image_paths)\n return image_path\n\n\ndef stream_and_build_vrt(images_uris, tmp_dir):\n log.info('Building VRT...')\n image_paths = images_uris\n image_path = os.path.join(tmp_dir, 'index.vrt')\n build_vrt(image_path, image_paths)\n return image_path\n\n\ndef load_window(image_dataset, window=None, is_masked=False):\n \"\"\"Load a window of an image using Rasterio.\n\n Args:\n image_dataset: a Rasterio dataset\n window: ((row_start, row_stop), (col_start, col_stop)) or\n ((y_min, y_max), (x_min, x_max))\n is_masked: If True, read a masked array from rasterio\n\n Returns:\n np.ndarray of shape (height, width, channels) where channels is the number of\n channels in the image_dataset.\n \"\"\"\n if is_masked:\n im = image_dataset.read(window=window, boundless=True, masked=True)\n im = np.ma.filled(im, fill_value=0)\n else:\n im = image_dataset.read(window=window, boundless=True)\n\n # Handle non-zero NODATA values by setting the data to 0.\n for channel, nodata in enumerate(image_dataset.nodatavals):\n if nodata is not None and nodata != 0:\n im[channel, im[channel] == nodata] = 0\n\n im = np.transpose(im, axes=[1, 2, 0])\n return im\n\n\nclass RasterioSource(ActivateMixin, RasterSource):\n def __init__(self,\n uris,\n raster_transformers,\n tmp_dir,\n allow_streaming=False,\n channel_order=None,\n x_shift=0.0,\n y_shift=0.0,\n extent_crop: Optional[CropOffsets] = None):\n \"\"\"Constructor.\n\n This RasterSource can read any file that can be opened by Rasterio/GDAL\n including georeferenced formats such as GeoTIFF and non-georeferenced formats\n such as JPG. See https://www.gdal.org/formats_list.html for more details.\n\n If channel_order is None, then use non-alpha channels. This also sets any\n masked or NODATA pixel values to be zeros.\n\n Args:\n channel_order: list of indices of channels to extract from raw imagery\n extent_crop (CropOffsets, optional): Relative\n offsets (top, left, bottom, right) for cropping the extent.\n Useful for using splitting a scene into different datasets.\n Defaults to None i.e. no cropping.\n \"\"\"\n self.uris = uris\n self.tmp_dir = tmp_dir\n self.image_tmp_dir = None\n self.image_dataset = None\n self.x_shift = x_shift\n self.y_shift = y_shift\n self.do_shift = self.x_shift != 0.0 or self.y_shift != 0.0\n self.allow_streaming = allow_streaming\n self.extent_crop = extent_crop\n\n num_channels = None\n\n # Activate in order to get information out of the raster\n with self.activate():\n num_channels = self.image_dataset.count\n if channel_order is None:\n colorinterp = self.image_dataset.colorinterp\n if colorinterp:\n channel_order = [\n i for i, color_interp in enumerate(colorinterp)\n if color_interp != ColorInterp.alpha\n ]\n else:\n channel_order = list(range(0, num_channels))\n self.validate_channel_order(channel_order, num_channels)\n\n mask_flags = self.image_dataset.mask_flag_enums\n self.is_masked = any(\n [m for m in mask_flags if m != MaskFlags.all_valid])\n\n self.height = self.image_dataset.height\n self.width = self.image_dataset.width\n\n # Get 1x1 chip and apply raster transformers to test dtype.\n test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))\n test_chip = test_chip[:, :, channel_order]\n for transformer in raster_transformers:\n test_chip = transformer.transform(test_chip, channel_order)\n self.dtype = test_chip.dtype\n\n self._set_crs_transformer()\n\n super().__init__(channel_order, num_channels, raster_transformers)\n\n def _download_data(self, tmp_dir):\n \"\"\"Download any data needed for this Raster Source.\n\n Return a single local path representing the image or a VRT of the data.\n \"\"\"\n if len(self.uris) == 1:\n if self.allow_streaming:\n return self.uris[0]\n else:\n return download_if_needed(self.uris[0], tmp_dir)\n else:\n if self.allow_streaming:\n return stream_and_build_vrt(self.uris, tmp_dir)\n else:\n return download_and_build_vrt(self.uris, tmp_dir)\n\n def get_crs_transformer(self):\n return self.crs_transformer\n\n def get_extent(self):\n h, w = self.height, self.width\n if self.extent_crop is not None:\n skip_top, skip_left, skip_bottom, skip_right = self.extent_crop\n ymin, xmin = int(h * skip_top), int(w * skip_left)\n ymax, xmax = h - int(h * skip_bottom), w - int(w * skip_right)\n return Box(ymin, xmin, ymax, xmax)\n return Box(0, 0, h, w)\n\n def get_dtype(self):\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return self.dtype\n\n def _get_chip(self, window):\n if self.image_dataset is None:\n raise ActivationError('RasterSource must be activated before use')\n shifted_window = self._get_shifted_window(window)\n return load_window(\n self.image_dataset,\n window=shifted_window.rasterio_format(),\n is_masked=self.is_masked)\n\n def _activate(self):\n # Download images to temporary directory and delete them when done.\n self.image_tmp_dir = tempfile.TemporaryDirectory(dir=self.tmp_dir)\n self.imagery_path = self._download_data(self.image_tmp_dir.name)\n self.image_dataset = rasterio.open(self.imagery_path)\n self._set_crs_transformer()\n\n def _set_crs_transformer(self):\n self.crs_transformer = RasterioCRSTransformer.from_dataset(\n self.image_dataset)\n crs = self.image_dataset.crs\n self.to_wgs84 = None\n self.from_wgs84 = None\n if crs and self.do_shift:\n self.to_wgs84 = Transformer.from_crs(\n crs.wkt, wgs84, always_xy=True)\n self.from_wgs84 = Transformer.from_crs(\n wgs84, crs.wkt, always_xy=True)\n\n def _deactivate(self):\n self.image_dataset.close()\n self.image_dataset = None\n self.image_tmp_dir.cleanup()\n self.image_tmp_dir = None\n\n def _get_shifted_window(self, window):\n do_shift = self.x_shift != 0.0 or self.y_shift != 0.0\n if do_shift:\n ymin, xmin, ymax, xmax = window.tuple_format()\n width = window.get_width()\n height = window.get_height()\n\n # Transform image coordinates into world coordinates\n transform = self.image_dataset.transform\n xmin2, ymin2 = transform * (xmin, ymin)\n\n # Transform from world coordinates to WGS84\n if self.to_wgs84:\n lon, lat = self.to_wgs84.transform(xmin2, ymin2)\n else:\n lon, lat = xmin2, ymin2\n\n # Shift. This is performed by computing the shifts in\n # meters to shifts in degrees. Those shifts are then\n # applied to the WGS84 coordinate.\n #\n # Courtesy of https://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters # noqa\n lat_radians = math.pi * lat / 180.0\n dlon = Decimal(self.x_shift) / Decimal(\n meters_per_degree * math.cos(lat_radians))\n dlat = Decimal(self.y_shift) / Decimal(meters_per_degree)\n lon = float(Decimal(lon) + dlon)\n lat = float(Decimal(lat) + dlat)\n\n # Transform from WGS84 to world coordinates\n if self.from_wgs84:\n xmin3, ymin3 = self.from_wgs84.transform(lon, lat)\n else:\n xmin3, ymin3 = lon, lat\n\n # Trasnform from world coordinates back into image coordinates\n xmin4, ymin4 = ~transform * (xmin3, ymin3)\n\n window = Box(ymin4, xmin4, ymin4 + height, xmin4 + width)\n return window\n", "path": "rastervision_core/rastervision/core/data/raster_source/rasterio_source.py"}]} | 3,602 | 511 |
gh_patches_debug_5228 | rasdani/github-patches | git_diff | pyodide__pyodide-3074 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Export version from pyodide JS module
## 🚀 Feature
In `pyodide.d.ts` I see `declare let version: string;` but it's not exported. It'd be great if it was exported so that I can do this:
```ts
import {version} from "pyodide";
```
### Motivation
I have [some code](https://github.com/alexmojaki/pyodide-worker-runner/blob/e7dd3d0ee1dff457bf9d6104944477840a83e5a7/lib/index.ts#L16) that roughly looks like this:
```ts
import {loadPyodide, PyodideInterface} from "pyodide";
const version = "0.21.1";
const indexURL = `https://cdn.jsdelivr.net/pyodide/v${version}/full/`;
const pyodide: PyodideInterface = await loadPyodide({indexURL});
if (pyodide.version !== version) {
throw new Error(
`loadPyodide loaded version ${pyodide.version} instead of ${version}`,
);
}
```
I'd like to import `version` instead of setting it manually, so that it always automatically matches whatever version of pyodide is installed.
</issue>
<code>
[start of tools/bump_version.py]
1 #!/usr/bin/env python3
2
3 import argparse
4 import difflib
5 import functools
6 import itertools
7 import pathlib
8 import re
9 from ast import Str
10 from collections import namedtuple
11 from collections.abc import Callable
12
13 CORE_VERSION_REGEX = r"(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)"
14
15 PYTHON_VERSION_REGEX = CORE_VERSION_REGEX + (
16 r"((?P<pre>a|b|rc)(?P<preversion>\d+))?" r"(\.(?P<dev>dev)(?P<devversion>\d+))?"
17 )
18
19 JS_VERSION_REGEX = CORE_VERSION_REGEX + (
20 r"(\-(?P<pre>alpha|beta|rc)\.(?P<preversion>\d+))?"
21 r"(\-(?P<dev>dev)\.(?P<devversion>\d+))?"
22 )
23
24
25 def build_version_pattern(pattern):
26 return re.compile(
27 pattern.format(
28 python_version=f"(?P<version>{PYTHON_VERSION_REGEX})",
29 js_version=f"(?P<version>{JS_VERSION_REGEX})",
30 )
31 )
32
33
34 ROOT = pathlib.Path(__file__).resolve().parent.parent
35 Target = namedtuple("target", ("file", "pattern", "prerelease"))
36 PYTHON_TARGETS = [
37 Target(
38 file=ROOT / "src/py/pyodide/__init__.py",
39 pattern=build_version_pattern('__version__ = "{python_version}"'),
40 prerelease=True,
41 ),
42 Target(
43 file=ROOT / "src/py/setup.cfg",
44 pattern=build_version_pattern("version = {python_version}"),
45 prerelease=True,
46 ),
47 Target(
48 ROOT / "pyodide-build/pyodide_build/__init__.py",
49 pattern=build_version_pattern('__version__ = "{python_version}"'),
50 prerelease=True,
51 ),
52 Target(
53 ROOT / "docs/conf.py",
54 build_version_pattern('pyodide_version = "{python_version}"'),
55 prerelease=True,
56 ),
57 Target(
58 ROOT / "run_docker",
59 build_version_pattern('PYODIDE_PREBUILT_IMAGE_TAG="{python_version}"'),
60 prerelease=False,
61 ),
62 Target(
63 ROOT / "docs/project/about.md",
64 build_version_pattern(r"version\s*=\s*{{{python_version}}}"),
65 prerelease=False,
66 ),
67 ]
68
69 JS_TARGETS = [
70 Target(
71 ROOT / "src/js/package.json",
72 build_version_pattern(r'"pyodide",\s*"version": "{js_version}"'),
73 prerelease=True,
74 ),
75 Target(
76 ROOT / "src/js/package-lock.json",
77 build_version_pattern(r'"pyodide",\s*"version": "{js_version}"'),
78 prerelease=True,
79 ),
80 ]
81
82
83 @functools.lru_cache
84 def python_version_to_js_version(version: str) -> Str:
85 """
86 Convert Python version name to JS version name
87 These two are different in prerelease or dev versions.
88 e.g. 1.2.3a0 <==> 1.2.3-alpha.0
89 4.5.6.dev2 <==> 4.5.6-dev.2
90 """
91 match = re.match(PYTHON_VERSION_REGEX, version)
92 matches = match.groupdict()
93
94 prerelease = matches["pre"] is not None
95 devrelease = matches["dev"] is not None
96
97 if prerelease and devrelease:
98 raise ValueError("Cannot have both prerelease and devrelease")
99 elif prerelease:
100 matches["pre"] = matches["pre"].replace("a", "alpha").replace("b", "beta")
101 return "{major}.{minor}.{patch}-{pre}.{preversion}".format(**matches)
102 elif devrelease:
103 return "{major}.{minor}.{patch}-{dev}.{devversion}".format(**matches)
104 else:
105 return "{major}.{minor}.{patch}".format(**matches)
106
107
108 @functools.lru_cache
109 def is_core_version(version: str) -> bool:
110 match = re.fullmatch(CORE_VERSION_REGEX, version)
111 if match is None:
112 return False
113
114 return True
115
116
117 def parse_current_version(target: Target) -> str:
118 """Parse current version"""
119 content = target.file.read_text()
120 match = target.pattern.search(content)
121
122 if match is None:
123 raise ValueError(f"Unabled to detect version string: {target.file}")
124
125 return match.groupdict()["version"]
126
127
128 def generate_updated_content(
129 target: Target, current_version: str, new_version: str
130 ) -> Callable:
131 file = target.file
132 pattern = target.pattern
133 content = file.read_text()
134
135 if current_version == new_version:
136 return None
137
138 # Some files only required to be bumped on core version release.
139 # For example, we don't deploy prebuilt docker images for dev release.
140 if not target.prerelease:
141 if not is_core_version(new_version):
142 print(f"[*] {file}: Skipped (not targeting a core version)")
143 return None
144
145 new_content = content
146 startpos = 0
147 while match := pattern.search(new_content, pos=startpos):
148 version = match.groupdict()["version"]
149 if version == current_version:
150 start, end = match.span()
151 new_span = new_content[start:end].replace(current_version, new_version)
152 new_content = new_content[:start] + new_span + new_content[end:]
153 startpos = end
154 elif version == new_version:
155 break
156 else:
157 raise ValueError(
158 f"'{file}' contains invalid version: expected '{current_version}' but found '{version}'"
159 )
160
161 show_diff(content, new_content, file)
162
163 return new_content
164
165
166 def show_diff(before: str, after: str, file: pathlib.Path):
167 diffs = list(
168 difflib.unified_diff(
169 before.splitlines(keepends=True), after.splitlines(keepends=True), n=0
170 )
171 )[2:]
172 print(f"[*] Diff of '{file}':\n")
173 print("".join(diffs))
174
175
176 def parse_args():
177 parser = argparse.ArgumentParser("Bump version strings in the Pyodide repository")
178 parser.add_argument("--new-version", help="New version")
179 parser.add_argument(
180 "--dry-run", action="store_true", help="Don't actually write anything"
181 )
182
183 return parser.parse_args()
184
185
186 def main():
187 args = parse_args()
188
189 if args.new_version is None:
190 new_version = input("New version (e.g. 0.22.0, 0.22.0a0, 0.22.0.dev0): ")
191 else:
192 new_version = args.new_version
193
194 if re.fullmatch(PYTHON_VERSION_REGEX, new_version) is None:
195 raise ValueError(f"Invalid new version: {new_version}")
196
197 new_version_py = new_version
198 new_version_js = python_version_to_js_version(new_version)
199
200 # We want to update files in all-or-nothing strategy,
201 # so we keep the queue of update functions
202 update_queue = []
203
204 targets = itertools.chain(
205 zip(PYTHON_TARGETS, [new_version_py] * len(PYTHON_TARGETS)),
206 zip(JS_TARGETS, [new_version_js] * len(JS_TARGETS)),
207 )
208 for target, new_version in targets:
209 current_version = parse_current_version(target)
210 new_content = generate_updated_content(target, current_version, new_version)
211 if new_content is not None:
212 update_queue.append((target, new_content))
213
214 if args.dry_run:
215 return
216
217 for target, content in update_queue:
218 target.file.write_text(content)
219
220
221 if __name__ == "__main__":
222 main()
223
[end of tools/bump_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/bump_version.py b/tools/bump_version.py
--- a/tools/bump_version.py
+++ b/tools/bump_version.py
@@ -64,6 +64,16 @@
build_version_pattern(r"version\s*=\s*{{{python_version}}}"),
prerelease=False,
),
+ Target(
+ ROOT / "src/js/version.ts",
+ build_version_pattern('version: string = "{python_version}"'),
+ prerelease=True,
+ ),
+ Target(
+ ROOT / "src/core/pre.js",
+ build_version_pattern('API.version = "{python_version}"'),
+ prerelease=True,
+ ),
]
JS_TARGETS = [
| {"golden_diff": "diff --git a/tools/bump_version.py b/tools/bump_version.py\n--- a/tools/bump_version.py\n+++ b/tools/bump_version.py\n@@ -64,6 +64,16 @@\n build_version_pattern(r\"version\\s*=\\s*{{{python_version}}}\"),\n prerelease=False,\n ),\n+ Target(\n+ ROOT / \"src/js/version.ts\",\n+ build_version_pattern('version: string = \"{python_version}\"'),\n+ prerelease=True,\n+ ),\n+ Target(\n+ ROOT / \"src/core/pre.js\",\n+ build_version_pattern('API.version = \"{python_version}\"'),\n+ prerelease=True,\n+ ),\n ]\n \n JS_TARGETS = [\n", "issue": "Export version from pyodide JS module\n## \ud83d\ude80 Feature\r\n\r\nIn `pyodide.d.ts` I see `declare let version: string;` but it's not exported. It'd be great if it was exported so that I can do this:\r\n\r\n```ts\r\nimport {version} from \"pyodide\";\r\n```\r\n\r\n### Motivation\r\n\r\nI have [some code](https://github.com/alexmojaki/pyodide-worker-runner/blob/e7dd3d0ee1dff457bf9d6104944477840a83e5a7/lib/index.ts#L16) that roughly looks like this:\r\n\r\n```ts\r\nimport {loadPyodide, PyodideInterface} from \"pyodide\";\r\n\r\nconst version = \"0.21.1\";\r\nconst indexURL = `https://cdn.jsdelivr.net/pyodide/v${version}/full/`;\r\nconst pyodide: PyodideInterface = await loadPyodide({indexURL});\r\nif (pyodide.version !== version) {\r\n throw new Error(\r\n `loadPyodide loaded version ${pyodide.version} instead of ${version}`,\r\n );\r\n}\r\n```\r\n\r\nI'd like to import `version` instead of setting it manually, so that it always automatically matches whatever version of pyodide is installed.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport difflib\nimport functools\nimport itertools\nimport pathlib\nimport re\nfrom ast import Str\nfrom collections import namedtuple\nfrom collections.abc import Callable\n\nCORE_VERSION_REGEX = r\"(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)\"\n\nPYTHON_VERSION_REGEX = CORE_VERSION_REGEX + (\n r\"((?P<pre>a|b|rc)(?P<preversion>\\d+))?\" r\"(\\.(?P<dev>dev)(?P<devversion>\\d+))?\"\n)\n\nJS_VERSION_REGEX = CORE_VERSION_REGEX + (\n r\"(\\-(?P<pre>alpha|beta|rc)\\.(?P<preversion>\\d+))?\"\n r\"(\\-(?P<dev>dev)\\.(?P<devversion>\\d+))?\"\n)\n\n\ndef build_version_pattern(pattern):\n return re.compile(\n pattern.format(\n python_version=f\"(?P<version>{PYTHON_VERSION_REGEX})\",\n js_version=f\"(?P<version>{JS_VERSION_REGEX})\",\n )\n )\n\n\nROOT = pathlib.Path(__file__).resolve().parent.parent\nTarget = namedtuple(\"target\", (\"file\", \"pattern\", \"prerelease\"))\nPYTHON_TARGETS = [\n Target(\n file=ROOT / \"src/py/pyodide/__init__.py\",\n pattern=build_version_pattern('__version__ = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n file=ROOT / \"src/py/setup.cfg\",\n pattern=build_version_pattern(\"version = {python_version}\"),\n prerelease=True,\n ),\n Target(\n ROOT / \"pyodide-build/pyodide_build/__init__.py\",\n pattern=build_version_pattern('__version__ = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"docs/conf.py\",\n build_version_pattern('pyodide_version = \"{python_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"run_docker\",\n build_version_pattern('PYODIDE_PREBUILT_IMAGE_TAG=\"{python_version}\"'),\n prerelease=False,\n ),\n Target(\n ROOT / \"docs/project/about.md\",\n build_version_pattern(r\"version\\s*=\\s*{{{python_version}}}\"),\n prerelease=False,\n ),\n]\n\nJS_TARGETS = [\n Target(\n ROOT / \"src/js/package.json\",\n build_version_pattern(r'\"pyodide\",\\s*\"version\": \"{js_version}\"'),\n prerelease=True,\n ),\n Target(\n ROOT / \"src/js/package-lock.json\",\n build_version_pattern(r'\"pyodide\",\\s*\"version\": \"{js_version}\"'),\n prerelease=True,\n ),\n]\n\n\[email protected]_cache\ndef python_version_to_js_version(version: str) -> Str:\n \"\"\"\n Convert Python version name to JS version name\n These two are different in prerelease or dev versions.\n e.g. 1.2.3a0 <==> 1.2.3-alpha.0\n 4.5.6.dev2 <==> 4.5.6-dev.2\n \"\"\"\n match = re.match(PYTHON_VERSION_REGEX, version)\n matches = match.groupdict()\n\n prerelease = matches[\"pre\"] is not None\n devrelease = matches[\"dev\"] is not None\n\n if prerelease and devrelease:\n raise ValueError(\"Cannot have both prerelease and devrelease\")\n elif prerelease:\n matches[\"pre\"] = matches[\"pre\"].replace(\"a\", \"alpha\").replace(\"b\", \"beta\")\n return \"{major}.{minor}.{patch}-{pre}.{preversion}\".format(**matches)\n elif devrelease:\n return \"{major}.{minor}.{patch}-{dev}.{devversion}\".format(**matches)\n else:\n return \"{major}.{minor}.{patch}\".format(**matches)\n\n\[email protected]_cache\ndef is_core_version(version: str) -> bool:\n match = re.fullmatch(CORE_VERSION_REGEX, version)\n if match is None:\n return False\n\n return True\n\n\ndef parse_current_version(target: Target) -> str:\n \"\"\"Parse current version\"\"\"\n content = target.file.read_text()\n match = target.pattern.search(content)\n\n if match is None:\n raise ValueError(f\"Unabled to detect version string: {target.file}\")\n\n return match.groupdict()[\"version\"]\n\n\ndef generate_updated_content(\n target: Target, current_version: str, new_version: str\n) -> Callable:\n file = target.file\n pattern = target.pattern\n content = file.read_text()\n\n if current_version == new_version:\n return None\n\n # Some files only required to be bumped on core version release.\n # For example, we don't deploy prebuilt docker images for dev release.\n if not target.prerelease:\n if not is_core_version(new_version):\n print(f\"[*] {file}: Skipped (not targeting a core version)\")\n return None\n\n new_content = content\n startpos = 0\n while match := pattern.search(new_content, pos=startpos):\n version = match.groupdict()[\"version\"]\n if version == current_version:\n start, end = match.span()\n new_span = new_content[start:end].replace(current_version, new_version)\n new_content = new_content[:start] + new_span + new_content[end:]\n startpos = end\n elif version == new_version:\n break\n else:\n raise ValueError(\n f\"'{file}' contains invalid version: expected '{current_version}' but found '{version}'\"\n )\n\n show_diff(content, new_content, file)\n\n return new_content\n\n\ndef show_diff(before: str, after: str, file: pathlib.Path):\n diffs = list(\n difflib.unified_diff(\n before.splitlines(keepends=True), after.splitlines(keepends=True), n=0\n )\n )[2:]\n print(f\"[*] Diff of '{file}':\\n\")\n print(\"\".join(diffs))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Bump version strings in the Pyodide repository\")\n parser.add_argument(\"--new-version\", help=\"New version\")\n parser.add_argument(\n \"--dry-run\", action=\"store_true\", help=\"Don't actually write anything\"\n )\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n if args.new_version is None:\n new_version = input(\"New version (e.g. 0.22.0, 0.22.0a0, 0.22.0.dev0): \")\n else:\n new_version = args.new_version\n\n if re.fullmatch(PYTHON_VERSION_REGEX, new_version) is None:\n raise ValueError(f\"Invalid new version: {new_version}\")\n\n new_version_py = new_version\n new_version_js = python_version_to_js_version(new_version)\n\n # We want to update files in all-or-nothing strategy,\n # so we keep the queue of update functions\n update_queue = []\n\n targets = itertools.chain(\n zip(PYTHON_TARGETS, [new_version_py] * len(PYTHON_TARGETS)),\n zip(JS_TARGETS, [new_version_js] * len(JS_TARGETS)),\n )\n for target, new_version in targets:\n current_version = parse_current_version(target)\n new_content = generate_updated_content(target, current_version, new_version)\n if new_content is not None:\n update_queue.append((target, new_content))\n\n if args.dry_run:\n return\n\n for target, content in update_queue:\n target.file.write_text(content)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/bump_version.py"}]} | 3,050 | 151 |
gh_patches_debug_35163 | rasdani/github-patches | git_diff | StackStorm__st2-4174 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incomplete help for: st2 action-alias match
```
$ st2 action-alias match --help
usage: st2 action-alias match [-t TOKEN] [--api-key API_KEY] [-j] [-y]
[-a ATTR [ATTR ...]] [-w WIDTH [WIDTH ...]]
command
st2 action-alias match: error: too few arguments
```
</issue>
<code>
[start of st2client/st2client/commands/action_alias.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from __future__ import absolute_import
17
18 from st2client.models import core
19 from st2client.models.action_alias import ActionAlias
20 from st2client.models.action_alias import ActionAliasMatch
21 from st2client.commands import resource
22 from st2client.formatters import table
23
24
25 __all__ = [
26 'ActionAliasBranch',
27 'ActionAliasMatchCommand',
28 'ActionAliasExecuteCommand'
29 ]
30
31
32 class ActionAliasBranch(resource.ResourceBranch):
33 def __init__(self, description, app, subparsers, parent_parser=None):
34 super(ActionAliasBranch, self).__init__(
35 ActionAlias, description, app, subparsers,
36 parent_parser=parent_parser, read_only=False,
37 commands={
38 'list': ActionAliasListCommand,
39 'get': ActionAliasGetCommand
40 })
41
42 self.commands['match'] = ActionAliasMatchCommand(
43 self.resource, self.app, self.subparsers,
44 add_help=False)
45 self.commands['execute'] = ActionAliasExecuteCommand(
46 self.resource, self.app, self.subparsers,
47 add_help=False)
48
49
50 class ActionAliasListCommand(resource.ContentPackResourceListCommand):
51 display_attributes = ['ref', 'pack', 'description', 'enabled']
52
53
54 class ActionAliasGetCommand(resource.ContentPackResourceGetCommand):
55 display_attributes = ['all']
56 attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',
57 'enabled', 'action_ref', 'formats']
58
59
60 class ActionAliasMatchCommand(resource.ResourceCommand):
61 display_attributes = ['name', 'description']
62
63 def __init__(self, resource, *args, **kwargs):
64 super(ActionAliasMatchCommand, self).__init__(
65 resource, 'match',
66 'Get the list of %s that match the command text.' %
67 resource.get_plural_display_name().lower(),
68 *args, **kwargs)
69
70 self.parser.add_argument('match_text',
71 metavar='command',
72 help=help)
73 self.parser.add_argument('-h', '--help',
74 action='store_true', dest='help',
75 help='Print usage for the given action.')
76 self.parser.add_argument('-a', '--attr', nargs='+',
77 default=self.display_attributes,
78 help=('List of attributes to include in the '
79 'output. "all" will return all '
80 'attributes.'))
81 self.parser.add_argument('-w', '--width', nargs='+', type=int,
82 default=None,
83 help=('Set the width of columns in output.'))
84
85 @resource.add_auth_token_to_kwargs_from_cli
86 def run(self, args, **kwargs):
87 alias_match = ActionAliasMatch()
88 alias_match.command = args.match_text
89
90 match, _ = self.manager.match(alias_match, **kwargs)
91 return [match]
92
93 def run_and_print(self, args, **kwargs):
94 instances = self.run(args, **kwargs)
95 self.print_output(instances, table.MultiColumnTable,
96 attributes=args.attr, widths=args.width,
97 json=args.json, yaml=args.yaml)
98
99
100 class ActionAliasExecuteCommand(resource.ResourceCommand):
101 display_attributes = ['name']
102
103 def __init__(self, resource, *args, **kwargs):
104 super(ActionAliasExecuteCommand, self).__init__(
105 resource, 'execute',
106 ('Execute the command text by finding a matching %s.' %
107 resource.get_display_name().lower()), *args, **kwargs)
108
109 self.parser.add_argument('command_text',
110 metavar='command',
111 help=help)
112 self.parser.add_argument('-h', '--help',
113 action='store_true', dest='help',
114 help='Print usage for the given action.')
115 self.parser.add_argument('-u', '--user', type=str, default=None,
116 help='User under which to run the action (admins only).')
117
118 @resource.add_auth_token_to_kwargs_from_cli
119 def run(self, args, **kwargs):
120 payload = core.Resource()
121 payload.command = args.command_text
122 payload.user = args.user
123 payload.source_channel = 'cli'
124
125 alias_execution_mgr = self.app.client.managers['ActionAliasExecution']
126 execution = alias_execution_mgr.match_and_execute(payload)
127 return execution
128
129 def run_and_print(self, args, **kwargs):
130 execution = self.run(args, **kwargs)
131 print("Matching Action-alias: '%s'" % execution.actionalias['ref'])
132 print("To get the results, execute:\n st2 execution get %s" %
133 (execution.execution['id']))
134
[end of st2client/st2client/commands/action_alias.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py
--- a/st2client/st2client/commands/action_alias.py
+++ b/st2client/st2client/commands/action_alias.py
@@ -41,10 +41,10 @@
self.commands['match'] = ActionAliasMatchCommand(
self.resource, self.app, self.subparsers,
- add_help=False)
+ add_help=True)
self.commands['execute'] = ActionAliasExecuteCommand(
self.resource, self.app, self.subparsers,
- add_help=False)
+ add_help=True)
class ActionAliasListCommand(resource.ContentPackResourceListCommand):
@@ -69,10 +69,8 @@
self.parser.add_argument('match_text',
metavar='command',
- help=help)
- self.parser.add_argument('-h', '--help',
- action='store_true', dest='help',
- help='Print usage for the given action.')
+ help=('Get the list of %s that match the command text.' %
+ resource.get_display_name().lower()))
self.parser.add_argument('-a', '--attr', nargs='+',
default=self.display_attributes,
help=('List of attributes to include in the '
@@ -108,10 +106,8 @@
self.parser.add_argument('command_text',
metavar='command',
- help=help)
- self.parser.add_argument('-h', '--help',
- action='store_true', dest='help',
- help='Print usage for the given action.')
+ help=('Execute the command text by finding a matching %s.' %
+ resource.get_display_name().lower()))
self.parser.add_argument('-u', '--user', type=str, default=None,
help='User under which to run the action (admins only).')
| {"golden_diff": "diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py\n--- a/st2client/st2client/commands/action_alias.py\n+++ b/st2client/st2client/commands/action_alias.py\n@@ -41,10 +41,10 @@\n \n self.commands['match'] = ActionAliasMatchCommand(\n self.resource, self.app, self.subparsers,\n- add_help=False)\n+ add_help=True)\n self.commands['execute'] = ActionAliasExecuteCommand(\n self.resource, self.app, self.subparsers,\n- add_help=False)\n+ add_help=True)\n \n \n class ActionAliasListCommand(resource.ContentPackResourceListCommand):\n@@ -69,10 +69,8 @@\n \n self.parser.add_argument('match_text',\n metavar='command',\n- help=help)\n- self.parser.add_argument('-h', '--help',\n- action='store_true', dest='help',\n- help='Print usage for the given action.')\n+ help=('Get the list of %s that match the command text.' %\n+ resource.get_display_name().lower()))\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n help=('List of attributes to include in the '\n@@ -108,10 +106,8 @@\n \n self.parser.add_argument('command_text',\n metavar='command',\n- help=help)\n- self.parser.add_argument('-h', '--help',\n- action='store_true', dest='help',\n- help='Print usage for the given action.')\n+ help=('Execute the command text by finding a matching %s.' %\n+ resource.get_display_name().lower()))\n self.parser.add_argument('-u', '--user', type=str, default=None,\n help='User under which to run the action (admins only).')\n", "issue": "Incomplete help for: st2 action-alias match\n```\r\n$ st2 action-alias match --help\r\nusage: st2 action-alias match [-t TOKEN] [--api-key API_KEY] [-j] [-y]\r\n [-a ATTR [ATTR ...]] [-w WIDTH [WIDTH ...]]\r\n command\r\nst2 action-alias match: error: too few arguments\r\n```\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2client.models import core\nfrom st2client.models.action_alias import ActionAlias\nfrom st2client.models.action_alias import ActionAliasMatch\nfrom st2client.commands import resource\nfrom st2client.formatters import table\n\n\n__all__ = [\n 'ActionAliasBranch',\n 'ActionAliasMatchCommand',\n 'ActionAliasExecuteCommand'\n]\n\n\nclass ActionAliasBranch(resource.ResourceBranch):\n def __init__(self, description, app, subparsers, parent_parser=None):\n super(ActionAliasBranch, self).__init__(\n ActionAlias, description, app, subparsers,\n parent_parser=parent_parser, read_only=False,\n commands={\n 'list': ActionAliasListCommand,\n 'get': ActionAliasGetCommand\n })\n\n self.commands['match'] = ActionAliasMatchCommand(\n self.resource, self.app, self.subparsers,\n add_help=False)\n self.commands['execute'] = ActionAliasExecuteCommand(\n self.resource, self.app, self.subparsers,\n add_help=False)\n\n\nclass ActionAliasListCommand(resource.ContentPackResourceListCommand):\n display_attributes = ['ref', 'pack', 'description', 'enabled']\n\n\nclass ActionAliasGetCommand(resource.ContentPackResourceGetCommand):\n display_attributes = ['all']\n attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',\n 'enabled', 'action_ref', 'formats']\n\n\nclass ActionAliasMatchCommand(resource.ResourceCommand):\n display_attributes = ['name', 'description']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasMatchCommand, self).__init__(\n resource, 'match',\n 'Get the list of %s that match the command text.' %\n resource.get_plural_display_name().lower(),\n *args, **kwargs)\n\n self.parser.add_argument('match_text',\n metavar='command',\n help=help)\n self.parser.add_argument('-h', '--help',\n action='store_true', dest='help',\n help='Print usage for the given action.')\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n help=('List of attributes to include in the '\n 'output. \"all\" will return all '\n 'attributes.'))\n self.parser.add_argument('-w', '--width', nargs='+', type=int,\n default=None,\n help=('Set the width of columns in output.'))\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n alias_match = ActionAliasMatch()\n alias_match.command = args.match_text\n\n match, _ = self.manager.match(alias_match, **kwargs)\n return [match]\n\n def run_and_print(self, args, **kwargs):\n instances = self.run(args, **kwargs)\n self.print_output(instances, table.MultiColumnTable,\n attributes=args.attr, widths=args.width,\n json=args.json, yaml=args.yaml)\n\n\nclass ActionAliasExecuteCommand(resource.ResourceCommand):\n display_attributes = ['name']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasExecuteCommand, self).__init__(\n resource, 'execute',\n ('Execute the command text by finding a matching %s.' %\n resource.get_display_name().lower()), *args, **kwargs)\n\n self.parser.add_argument('command_text',\n metavar='command',\n help=help)\n self.parser.add_argument('-h', '--help',\n action='store_true', dest='help',\n help='Print usage for the given action.')\n self.parser.add_argument('-u', '--user', type=str, default=None,\n help='User under which to run the action (admins only).')\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n payload = core.Resource()\n payload.command = args.command_text\n payload.user = args.user\n payload.source_channel = 'cli'\n\n alias_execution_mgr = self.app.client.managers['ActionAliasExecution']\n execution = alias_execution_mgr.match_and_execute(payload)\n return execution\n\n def run_and_print(self, args, **kwargs):\n execution = self.run(args, **kwargs)\n print(\"Matching Action-alias: '%s'\" % execution.actionalias['ref'])\n print(\"To get the results, execute:\\n st2 execution get %s\" %\n (execution.execution['id']))\n", "path": "st2client/st2client/commands/action_alias.py"}]} | 2,036 | 406 |
gh_patches_debug_17804 | rasdani/github-patches | git_diff | pypa__pip-6616 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace overly specific function definition with more general alternative
In `outdated.py`, there is a function definition for `was_installed_by_pip` which can be simplified (or gotten rid of), by using `get_installer` defined in another file.
https://github.com/pypa/pip/blob/ba539093754bc96dcdb7f4a48911deffcbcc8725/src/pip/_internal/utils/outdated.py#L79
https://github.com/pypa/pip/blob/ba539093754bc96dcdb7f4a48911deffcbcc8725/src/pip/_internal/utils/packaging.py#L86
</issue>
<code>
[start of src/pip/_internal/utils/outdated.py]
1 from __future__ import absolute_import
2
3 import datetime
4 import json
5 import logging
6 import os.path
7 import sys
8
9 from pip._vendor import lockfile, pkg_resources
10 from pip._vendor.packaging import version as packaging_version
11
12 from pip._internal.index import PackageFinder
13 from pip._internal.utils.compat import WINDOWS
14 from pip._internal.utils.filesystem import check_path_owner
15 from pip._internal.utils.misc import ensure_dir, get_installed_version
16 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
17
18 if MYPY_CHECK_RUNNING:
19 import optparse
20 from typing import Any, Dict
21 from pip._internal.download import PipSession
22
23
24 SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
25
26
27 logger = logging.getLogger(__name__)
28
29
30 class SelfCheckState(object):
31 def __init__(self, cache_dir):
32 # type: (str) -> None
33 self.state = {} # type: Dict[str, Any]
34 self.statefile_path = None
35
36 # Try to load the existing state
37 if cache_dir:
38 self.statefile_path = os.path.join(cache_dir, "selfcheck.json")
39 try:
40 with open(self.statefile_path) as statefile:
41 self.state = json.load(statefile)[sys.prefix]
42 except (IOError, ValueError, KeyError):
43 # Explicitly suppressing exceptions, since we don't want to
44 # error out if the cache file is invalid.
45 pass
46
47 def save(self, pypi_version, current_time):
48 # type: (str, datetime.datetime) -> None
49 # If we do not have a path to cache in, don't bother saving.
50 if not self.statefile_path:
51 return
52
53 # Check to make sure that we own the directory
54 if not check_path_owner(os.path.dirname(self.statefile_path)):
55 return
56
57 # Now that we've ensured the directory is owned by this user, we'll go
58 # ahead and make sure that all our directories are created.
59 ensure_dir(os.path.dirname(self.statefile_path))
60
61 # Attempt to write out our version check file
62 with lockfile.LockFile(self.statefile_path):
63 if os.path.exists(self.statefile_path):
64 with open(self.statefile_path) as statefile:
65 state = json.load(statefile)
66 else:
67 state = {}
68
69 state[sys.prefix] = {
70 "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
71 "pypi_version": pypi_version,
72 }
73
74 with open(self.statefile_path, "w") as statefile:
75 json.dump(state, statefile, sort_keys=True,
76 separators=(",", ":"))
77
78
79 def was_installed_by_pip(pkg):
80 # type: (str) -> bool
81 """Checks whether pkg was installed by pip
82
83 This is used not to display the upgrade message when pip is in fact
84 installed by system package manager, such as dnf on Fedora.
85 """
86 try:
87 dist = pkg_resources.get_distribution(pkg)
88 return (dist.has_metadata('INSTALLER') and
89 'pip' in dist.get_metadata_lines('INSTALLER'))
90 except pkg_resources.DistributionNotFound:
91 return False
92
93
94 def pip_version_check(session, options):
95 # type: (PipSession, optparse.Values) -> None
96 """Check for an update for pip.
97
98 Limit the frequency of checks to once per week. State is stored either in
99 the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
100 of the pip script path.
101 """
102 installed_version = get_installed_version("pip")
103 if not installed_version:
104 return
105
106 pip_version = packaging_version.parse(installed_version)
107 pypi_version = None
108
109 try:
110 state = SelfCheckState(cache_dir=options.cache_dir)
111
112 current_time = datetime.datetime.utcnow()
113 # Determine if we need to refresh the state
114 if "last_check" in state.state and "pypi_version" in state.state:
115 last_check = datetime.datetime.strptime(
116 state.state["last_check"],
117 SELFCHECK_DATE_FMT
118 )
119 if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
120 pypi_version = state.state["pypi_version"]
121
122 # Refresh the version if we need to or just see if we need to warn
123 if pypi_version is None:
124 # Lets use PackageFinder to see what the latest pip version is
125 finder = PackageFinder.create(
126 find_links=options.find_links,
127 index_urls=[options.index_url] + options.extra_index_urls,
128 allow_all_prereleases=False, # Explicitly set to False
129 trusted_hosts=options.trusted_hosts,
130 session=session,
131 )
132 candidate = finder.find_candidates("pip").get_best()
133 if candidate is None:
134 return
135 pypi_version = str(candidate.version)
136
137 # save that we've performed a check
138 state.save(pypi_version, current_time)
139
140 remote_version = packaging_version.parse(pypi_version)
141
142 local_version_is_older = (
143 pip_version < remote_version and
144 pip_version.base_version != remote_version.base_version and
145 was_installed_by_pip('pip')
146 )
147
148 # Determine if our pypi_version is older
149 if not local_version_is_older:
150 return
151
152 # Advise "python -m pip" on Windows to avoid issues
153 # with overwriting pip.exe.
154 if WINDOWS:
155 pip_cmd = "python -m pip"
156 else:
157 pip_cmd = "pip"
158 logger.warning(
159 "You are using pip version %s, however version %s is "
160 "available.\nYou should consider upgrading via the "
161 "'%s install --upgrade pip' command.",
162 pip_version, pypi_version, pip_cmd
163 )
164 except Exception:
165 logger.debug(
166 "There was an error checking the latest version of pip",
167 exc_info=True,
168 )
169
[end of src/pip/_internal/utils/outdated.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/utils/outdated.py b/src/pip/_internal/utils/outdated.py
--- a/src/pip/_internal/utils/outdated.py
+++ b/src/pip/_internal/utils/outdated.py
@@ -13,6 +13,7 @@
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.misc import ensure_dir, get_installed_version
+from pip._internal.utils.packaging import get_installer
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
@@ -85,8 +86,7 @@
"""
try:
dist = pkg_resources.get_distribution(pkg)
- return (dist.has_metadata('INSTALLER') and
- 'pip' in dist.get_metadata_lines('INSTALLER'))
+ return "pip" == get_installer(dist)
except pkg_resources.DistributionNotFound:
return False
| {"golden_diff": "diff --git a/src/pip/_internal/utils/outdated.py b/src/pip/_internal/utils/outdated.py\n--- a/src/pip/_internal/utils/outdated.py\n+++ b/src/pip/_internal/utils/outdated.py\n@@ -13,6 +13,7 @@\n from pip._internal.utils.compat import WINDOWS\n from pip._internal.utils.filesystem import check_path_owner\n from pip._internal.utils.misc import ensure_dir, get_installed_version\n+from pip._internal.utils.packaging import get_installer\n from pip._internal.utils.typing import MYPY_CHECK_RUNNING\n \n if MYPY_CHECK_RUNNING:\n@@ -85,8 +86,7 @@\n \"\"\"\n try:\n dist = pkg_resources.get_distribution(pkg)\n- return (dist.has_metadata('INSTALLER') and\n- 'pip' in dist.get_metadata_lines('INSTALLER'))\n+ return \"pip\" == get_installer(dist)\n except pkg_resources.DistributionNotFound:\n return False\n", "issue": "Replace overly specific function definition with more general alternative\nIn `outdated.py`, there is a function definition for `was_installed_by_pip` which can be simplified (or gotten rid of), by using `get_installer` defined in another file.\r\n\r\nhttps://github.com/pypa/pip/blob/ba539093754bc96dcdb7f4a48911deffcbcc8725/src/pip/_internal/utils/outdated.py#L79\r\n\r\nhttps://github.com/pypa/pip/blob/ba539093754bc96dcdb7f4a48911deffcbcc8725/src/pip/_internal/utils/packaging.py#L86\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile, pkg_resources\nfrom pip._vendor.packaging import version as packaging_version\n\nfrom pip._internal.index import PackageFinder\nfrom pip._internal.utils.compat import WINDOWS\nfrom pip._internal.utils.filesystem import check_path_owner\nfrom pip._internal.utils.misc import ensure_dir, get_installed_version\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n import optparse\n from typing import Any, Dict\n from pip._internal.download import PipSession\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelfCheckState(object):\n def __init__(self, cache_dir):\n # type: (str) -> None\n self.state = {} # type: Dict[str, Any]\n self.statefile_path = None\n\n # Try to load the existing state\n if cache_dir:\n self.statefile_path = os.path.join(cache_dir, \"selfcheck.json\")\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n # Explicitly suppressing exceptions, since we don't want to\n # error out if the cache file is invalid.\n pass\n\n def save(self, pypi_version, current_time):\n # type: (str, datetime.datetime) -> None\n # If we do not have a path to cache in, don't bother saving.\n if not self.statefile_path:\n return\n\n # Check to make sure that we own the directory\n if not check_path_owner(os.path.dirname(self.statefile_path)):\n return\n\n # Now that we've ensured the directory is owned by this user, we'll go\n # ahead and make sure that all our directories are created.\n ensure_dir(os.path.dirname(self.statefile_path))\n\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n if os.path.exists(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n else:\n state = {}\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef was_installed_by_pip(pkg):\n # type: (str) -> bool\n \"\"\"Checks whether pkg was installed by pip\n\n This is used not to display the upgrade message when pip is in fact\n installed by system package manager, such as dnf on Fedora.\n \"\"\"\n try:\n dist = pkg_resources.get_distribution(pkg)\n return (dist.has_metadata('INSTALLER') and\n 'pip' in dist.get_metadata_lines('INSTALLER'))\n except pkg_resources.DistributionNotFound:\n return False\n\n\ndef pip_version_check(session, options):\n # type: (PipSession, optparse.Values) -> None\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n installed_version = get_installed_version(\"pip\")\n if not installed_version:\n return\n\n pip_version = packaging_version.parse(installed_version)\n pypi_version = None\n\n try:\n state = SelfCheckState(cache_dir=options.cache_dir)\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n # Lets use PackageFinder to see what the latest pip version is\n finder = PackageFinder.create(\n find_links=options.find_links,\n index_urls=[options.index_url] + options.extra_index_urls,\n allow_all_prereleases=False, # Explicitly set to False\n trusted_hosts=options.trusted_hosts,\n session=session,\n )\n candidate = finder.find_candidates(\"pip\").get_best()\n if candidate is None:\n return\n pypi_version = str(candidate.version)\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n remote_version = packaging_version.parse(pypi_version)\n\n local_version_is_older = (\n pip_version < remote_version and\n pip_version.base_version != remote_version.base_version and\n was_installed_by_pip('pip')\n )\n\n # Determine if our pypi_version is older\n if not local_version_is_older:\n return\n\n # Advise \"python -m pip\" on Windows to avoid issues\n # with overwriting pip.exe.\n if WINDOWS:\n pip_cmd = \"python -m pip\"\n else:\n pip_cmd = \"pip\"\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'%s install --upgrade pip' command.\",\n pip_version, pypi_version, pip_cmd\n )\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "src/pip/_internal/utils/outdated.py"}]} | 2,392 | 206 |
gh_patches_debug_9126 | rasdani/github-patches | git_diff | sunpy__sunpy-5186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`HMISynopticMap` is missing a docstring
`HMISynopticMap` has no docstring. At minimum, it should at least have something analogous to what `MDISynopticMap` has:
https://github.com/sunpy/sunpy/blob/824f72026403001c65e23f24ced3ff5eb7d13a68/sunpy/map/sources/soho.py#L227-L229
However, it'd be better if it had more meat to it (e.g., explanation and references).
</issue>
<code>
[start of sunpy/map/sources/sdo.py]
1 """SDO Map subclass definitions"""
2
3 import numpy as np
4
5 import astropy.units as u
6 from astropy.coordinates import CartesianRepresentation, HeliocentricMeanEcliptic
7 from astropy.visualization import AsinhStretch
8 from astropy.visualization.mpl_normalize import ImageNormalize
9
10 from sunpy import log
11 from sunpy.map import GenericMap
12 from sunpy.map.sources.source_type import source_stretch
13
14 __all__ = ['AIAMap', 'HMIMap', 'HMISynopticMap']
15
16
17 class AIAMap(GenericMap):
18 """AIA Image Map.
19
20 The Atmospheric Imaging Assembly is a set of four telescopes that employ
21 normal-incidence, multi-layer coated optics to provide narrow-band imaging
22 of the Sun. It provides high resolution full-disk images of the corona and
23 transition region up to 0.5 solar radii above the solar limb with 1.5
24 arcsecond angular resolution and 12-second temporal resolution. It observes
25 the Sun in the following seven extreme ultraviolet bandpasses: 94 A
26 (Fe XVIII), 131 A (Fe VIII, XXI), 171 A (Fe IX), 193 A (Fe XII, XXIV),
27 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes
28 in the visible 1600 A (C IV) and the nearby continuum (1700 A).
29
30 Notes
31 -----
32 Observer location: The standard AIA FITS header provides the spacecraft location in multiple
33 coordinate systems, including Heliocentric Aries Ecliptic (HAE) and Heliographic Stonyhurst
34 (HGS). SunPy uses the provided HAE coordinates due to accuracy concerns with the provided
35 HGS coordinates, but other software packages may make different choices.
36
37 References
38 ----------
39 * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_
40 * `Instrument Page <https://aia.lmsal.com>`_
41 * `Fits Header keywords <http://jsoc.stanford.edu/doc/keywords/AIA/AIA02840_A_AIA-SDO_FITS_Keyword_Documents.pdf>`_
42 * `Analysis Guide <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/>`_
43 * `Instrument Paper <https://doi.org/10.1007/s11207-011-9776-8>`_
44 * `wavelengths and temperature response reference <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/figures/aia_tel_resp.png>`_
45 """
46
47 def __init__(self, data, header, **kwargs):
48 if 'bunit' not in header and 'pixlunit' in header:
49 # PIXLUNIT is not a FITS standard keyword
50 header['bunit'] = header['pixlunit']
51
52 super().__init__(data, header, **kwargs)
53
54 # Fill in some missing info
55 self.meta['detector'] = self.meta.get('detector', "AIA")
56 self._nickname = self.detector
57 self.plot_settings['cmap'] = self._get_cmap_name()
58 self.plot_settings['norm'] = ImageNormalize(
59 stretch=source_stretch(self.meta, AsinhStretch(0.01)), clip=False)
60
61 @property
62 def _supported_observer_coordinates(self):
63 return [(('haex_obs', 'haey_obs', 'haez_obs'), {'x': self.meta.get('haex_obs'),
64 'y': self.meta.get('haey_obs'),
65 'z': self.meta.get('haez_obs'),
66 'unit': u.m,
67 'representation_type': CartesianRepresentation,
68 'frame': HeliocentricMeanEcliptic})
69 ] + super()._supported_observer_coordinates
70
71 @property
72 def observatory(self):
73 """
74 Returns the observatory.
75 """
76 return self.meta.get('telescop', '').split('/')[0]
77
78 @classmethod
79 def is_datasource_for(cls, data, header, **kwargs):
80 """Determines if header corresponds to an AIA image"""
81 return str(header.get('instrume', '')).startswith('AIA')
82
83
84 class HMIMap(GenericMap):
85 """HMI Image Map.
86
87 HMI consists of a refracting telescope, a polarization selector,
88 an image stabilization system, a narrow band tunable filter
89 and two 4096 pixel CCD cameras. It observes the full solar disk in the Fe I
90 absorption line at 6173 Angstrom with a resolution of 1 arc-second.
91 HMI takes images in a sequence of tuning and polarizations at a 4-second
92 cadence for each camera. One camera is dedicated to a 45 s Doppler and
93 line-of-sight field sequence while the other to a 90 s vector field
94 sequence.
95
96 References
97 ----------
98 * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_
99 * `Instrument Page <http://hmi.stanford.edu>`_
100 * `Analysis Guide <http://hmi.stanford.edu/doc/magnetic/guide.pdf>`_
101 """
102
103 def __init__(self, data, header, **kwargs):
104
105 super().__init__(data, header, **kwargs)
106
107 self.meta['detector'] = self.meta.get('detector', "HMI")
108 self._nickname = self.detector
109
110 @property
111 def measurement(self):
112 """
113 Returns the measurement type.
114 """
115 return self.meta.get('content', '').split(" ")[0].lower()
116
117 @property
118 def observatory(self):
119 """
120 Returns the observatory.
121 """
122 return self.meta.get('telescop', '').split('/')[0]
123
124 @classmethod
125 def is_datasource_for(cls, data, header, **kwargs):
126 """Determines if header corresponds to an HMI image"""
127 return (str(header.get('TELESCOP', '')).endswith('HMI') and
128 not HMISynopticMap.is_datasource_for(data, header))
129
130
131 class HMISynopticMap(HMIMap):
132
133 def __init__(self, data, header, **kwargs):
134 super().__init__(data, header, **kwargs)
135
136 if self.meta['cunit1'] == 'Degree':
137 self.meta['cunit1'] = 'deg'
138
139 if self.meta['cunit2'] == 'Sine Latitude':
140 log.debug("Editing CUNIT2, CDELT1, CDLET2 keywords to the correct "
141 "values for a CEA projection.")
142 self.meta['cunit2'] = 'deg'
143
144 # Since, this map uses the cylindrical equal-area (CEA) projection,
145 # the spacing should be modified to 180/pi times the original value
146 # Reference: Section 5.5, Thompson 2006
147 self.meta['cdelt2'] = 180 / np.pi * self.meta['cdelt2']
148 self.meta['cdelt1'] = np.abs(self.meta['cdelt1'])
149
150 if 'date-obs' not in self.meta and 't_obs' in self.meta:
151 log.debug('Setting "DATE-OBS" keyword from "T_OBS"')
152 self.meta['date-obs'] = self.meta['t_obs']
153
154 @classmethod
155 def is_datasource_for(cls, data, header, **kwargs):
156 """
157 Determines if header corresponds to an HMI synoptic map.
158 """
159 return (str(header.get('TELESCOP', '')).endswith('HMI') and
160 str(header.get('CONTENT', '')) ==
161 'Carrington Synoptic Chart Of Br Field')
162
[end of sunpy/map/sources/sdo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/map/sources/sdo.py b/sunpy/map/sources/sdo.py
--- a/sunpy/map/sources/sdo.py
+++ b/sunpy/map/sources/sdo.py
@@ -129,7 +129,19 @@
class HMISynopticMap(HMIMap):
+ """
+ SDO/HMI Synoptic Map.
+
+ Synoptic maps are constructed from HMI 720s line-of-sight magnetograms
+ collected over a 27-day solar rotation.
+
+ See `~sunpy.map.sources.sdo.HMIMap` for information on the HMI instrument.
+ References
+ ----------
+ * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`__
+ * `JSOC's HMI Synoptic Charts <http://jsoc.stanford.edu/HMI/LOS_Synoptic_charts.html>`__
+ """
def __init__(self, data, header, **kwargs):
super().__init__(data, header, **kwargs)
| {"golden_diff": "diff --git a/sunpy/map/sources/sdo.py b/sunpy/map/sources/sdo.py\n--- a/sunpy/map/sources/sdo.py\n+++ b/sunpy/map/sources/sdo.py\n@@ -129,7 +129,19 @@\n \n \n class HMISynopticMap(HMIMap):\n+ \"\"\"\n+ SDO/HMI Synoptic Map.\n+\n+ Synoptic maps are constructed from HMI 720s line-of-sight magnetograms\n+ collected over a 27-day solar rotation.\n+\n+ See `~sunpy.map.sources.sdo.HMIMap` for information on the HMI instrument.\n \n+ References\n+ ----------\n+ * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`__\n+ * `JSOC's HMI Synoptic Charts <http://jsoc.stanford.edu/HMI/LOS_Synoptic_charts.html>`__\n+ \"\"\"\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n", "issue": "`HMISynopticMap` is missing a docstring\n`HMISynopticMap` has no docstring. At minimum, it should at least have something analogous to what `MDISynopticMap` has:\r\nhttps://github.com/sunpy/sunpy/blob/824f72026403001c65e23f24ced3ff5eb7d13a68/sunpy/map/sources/soho.py#L227-L229\r\n\r\nHowever, it'd be better if it had more meat to it (e.g., explanation and references).\n", "before_files": [{"content": "\"\"\"SDO Map subclass definitions\"\"\"\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.coordinates import CartesianRepresentation, HeliocentricMeanEcliptic\nfrom astropy.visualization import AsinhStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\n\nfrom sunpy import log\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\n\n__all__ = ['AIAMap', 'HMIMap', 'HMISynopticMap']\n\n\nclass AIAMap(GenericMap):\n \"\"\"AIA Image Map.\n\n The Atmospheric Imaging Assembly is a set of four telescopes that employ\n normal-incidence, multi-layer coated optics to provide narrow-band imaging\n of the Sun. It provides high resolution full-disk images of the corona and\n transition region up to 0.5 solar radii above the solar limb with 1.5\n arcsecond angular resolution and 12-second temporal resolution. It observes\n the Sun in the following seven extreme ultraviolet bandpasses: 94 A\n (Fe XVIII), 131 A (Fe VIII, XXI), 171 A (Fe IX), 193 A (Fe XII, XXIV),\n 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes\n in the visible 1600 A (C IV) and the nearby continuum (1700 A).\n\n Notes\n -----\n Observer location: The standard AIA FITS header provides the spacecraft location in multiple\n coordinate systems, including Heliocentric Aries Ecliptic (HAE) and Heliographic Stonyhurst\n (HGS). SunPy uses the provided HAE coordinates due to accuracy concerns with the provided\n HGS coordinates, but other software packages may make different choices.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <https://aia.lmsal.com>`_\n * `Fits Header keywords <http://jsoc.stanford.edu/doc/keywords/AIA/AIA02840_A_AIA-SDO_FITS_Keyword_Documents.pdf>`_\n * `Analysis Guide <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/>`_\n * `Instrument Paper <https://doi.org/10.1007/s11207-011-9776-8>`_\n * `wavelengths and temperature response reference <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/figures/aia_tel_resp.png>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n if 'bunit' not in header and 'pixlunit' in header:\n # PIXLUNIT is not a FITS standard keyword\n header['bunit'] = header['pixlunit']\n\n super().__init__(data, header, **kwargs)\n\n # Fill in some missing info\n self.meta['detector'] = self.meta.get('detector', \"AIA\")\n self._nickname = self.detector\n self.plot_settings['cmap'] = self._get_cmap_name()\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, AsinhStretch(0.01)), clip=False)\n\n @property\n def _supported_observer_coordinates(self):\n return [(('haex_obs', 'haey_obs', 'haez_obs'), {'x': self.meta.get('haex_obs'),\n 'y': self.meta.get('haey_obs'),\n 'z': self.meta.get('haez_obs'),\n 'unit': u.m,\n 'representation_type': CartesianRepresentation,\n 'frame': HeliocentricMeanEcliptic})\n ] + super()._supported_observer_coordinates\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta.get('telescop', '').split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an AIA image\"\"\"\n return str(header.get('instrume', '')).startswith('AIA')\n\n\nclass HMIMap(GenericMap):\n \"\"\"HMI Image Map.\n\n HMI consists of a refracting telescope, a polarization selector,\n an image stabilization system, a narrow band tunable filter\n and two 4096 pixel CCD cameras. It observes the full solar disk in the Fe I\n absorption line at 6173 Angstrom with a resolution of 1 arc-second.\n HMI takes images in a sequence of tuning and polarizations at a 4-second\n cadence for each camera. One camera is dedicated to a 45 s Doppler and\n line-of-sight field sequence while the other to a 90 s vector field\n sequence.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <http://hmi.stanford.edu>`_\n * `Analysis Guide <http://hmi.stanford.edu/doc/magnetic/guide.pdf>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n super().__init__(data, header, **kwargs)\n\n self.meta['detector'] = self.meta.get('detector', \"HMI\")\n self._nickname = self.detector\n\n @property\n def measurement(self):\n \"\"\"\n Returns the measurement type.\n \"\"\"\n return self.meta.get('content', '').split(\" \")[0].lower()\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta.get('telescop', '').split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an HMI image\"\"\"\n return (str(header.get('TELESCOP', '')).endswith('HMI') and\n not HMISynopticMap.is_datasource_for(data, header))\n\n\nclass HMISynopticMap(HMIMap):\n\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n\n if self.meta['cunit1'] == 'Degree':\n self.meta['cunit1'] = 'deg'\n\n if self.meta['cunit2'] == 'Sine Latitude':\n log.debug(\"Editing CUNIT2, CDELT1, CDLET2 keywords to the correct \"\n \"values for a CEA projection.\")\n self.meta['cunit2'] = 'deg'\n\n # Since, this map uses the cylindrical equal-area (CEA) projection,\n # the spacing should be modified to 180/pi times the original value\n # Reference: Section 5.5, Thompson 2006\n self.meta['cdelt2'] = 180 / np.pi * self.meta['cdelt2']\n self.meta['cdelt1'] = np.abs(self.meta['cdelt1'])\n\n if 'date-obs' not in self.meta and 't_obs' in self.meta:\n log.debug('Setting \"DATE-OBS\" keyword from \"T_OBS\"')\n self.meta['date-obs'] = self.meta['t_obs']\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"\n Determines if header corresponds to an HMI synoptic map.\n \"\"\"\n return (str(header.get('TELESCOP', '')).endswith('HMI') and\n str(header.get('CONTENT', '')) ==\n 'Carrington Synoptic Chart Of Br Field')\n", "path": "sunpy/map/sources/sdo.py"}]} | 2,778 | 233 |
gh_patches_debug_20315 | rasdani/github-patches | git_diff | Qiskit__qiskit-2302 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
multi-language documentation
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected behavior?
I would like to remove all the multi-language documentation from this repository and add it to github.com/Qiskit/qiskit. I am not sure of the best way we do it and I let all discuss in that repository how we should do it. But we are not going to have documentation in the individual elements (only a readme about the element, contributing guidelines and other community files)
@alfrisch could you take the lead on the German
@rraymondhp could you take the lead on the Japanese
@hanheepaik could you take the lead on the Korean
@liupibm could you take the lead on the Chinese
Thanks
</issue>
<code>
[start of docs/de/conf.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 """
4 Language specific configuration file, inheriting from the main /doc
5 conf.py file and adjusting the variables that depend on the language.
6 """
7
8 import os
9 import sys
10
11 sys.path.insert(0, os.path.abspath('..'))
12 from conf import *
13
14 language = 'de'
15 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']
16
17 templates_path = ['../_templates']
18 html_static_path = ['../theme/static/']
19 html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'
20 html_favicon = '../theme/static/favicon.ico'
21
[end of docs/de/conf.py]
[start of docs/ja/conf.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 """
4 Language specific configuration file, inheriting from the main /doc
5 conf.py file and adjusting the variables that depend on the language.
6 """
7
8 import os
9 import sys
10
11 sys.path.insert(0, os.path.abspath('..'))
12 from conf import *
13
14 language = 'ja'
15 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']
16
17 templates_path = ['../_templates']
18 html_static_path = ['../theme/static/']
19 html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'
20 html_favicon = '../theme/static/favicon.ico'
21
[end of docs/ja/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/de/conf.py b/docs/de/conf.py
deleted file mode 100644
--- a/docs/de/conf.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Language specific configuration file, inheriting from the main /doc
-conf.py file and adjusting the variables that depend on the language.
-"""
-
-import os
-import sys
-
-sys.path.insert(0, os.path.abspath('..'))
-from conf import *
-
-language = 'de'
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']
-
-templates_path = ['../_templates']
-html_static_path = ['../theme/static/']
-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'
-html_favicon = '../theme/static/favicon.ico'
diff --git a/docs/ja/conf.py b/docs/ja/conf.py
deleted file mode 100644
--- a/docs/ja/conf.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Language specific configuration file, inheriting from the main /doc
-conf.py file and adjusting the variables that depend on the language.
-"""
-
-import os
-import sys
-
-sys.path.insert(0, os.path.abspath('..'))
-from conf import *
-
-language = 'ja'
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']
-
-templates_path = ['../_templates']
-html_static_path = ['../theme/static/']
-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'
-html_favicon = '../theme/static/favicon.ico'
| {"golden_diff": "diff --git a/docs/de/conf.py b/docs/de/conf.py\ndeleted file mode 100644\n--- a/docs/de/conf.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#!/usr/bin/env python3\n-# -*- coding: utf-8 -*-\n-\"\"\"\n-Language specific configuration file, inheriting from the main /doc\n-conf.py file and adjusting the variables that depend on the language.\n-\"\"\"\n-\n-import os\n-import sys\n-\n-sys.path.insert(0, os.path.abspath('..'))\n-from conf import *\n-\n-language = 'de'\n-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n-\n-templates_path = ['../_templates']\n-html_static_path = ['../theme/static/']\n-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\n-html_favicon = '../theme/static/favicon.ico'\ndiff --git a/docs/ja/conf.py b/docs/ja/conf.py\ndeleted file mode 100644\n--- a/docs/ja/conf.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#!/usr/bin/env python3\n-# -*- coding: utf-8 -*-\n-\"\"\"\n-Language specific configuration file, inheriting from the main /doc\n-conf.py file and adjusting the variables that depend on the language.\n-\"\"\"\n-\n-import os\n-import sys\n-\n-sys.path.insert(0, os.path.abspath('..'))\n-from conf import *\n-\n-language = 'ja'\n-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n-\n-templates_path = ['../_templates']\n-html_static_path = ['../theme/static/']\n-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\n-html_favicon = '../theme/static/favicon.ico'\n", "issue": "multi-language documentation \n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected behavior?\r\nI would like to remove all the multi-language documentation from this repository and add it to github.com/Qiskit/qiskit. I am not sure of the best way we do it and I let all discuss in that repository how we should do it. But we are not going to have documentation in the individual elements (only a readme about the element, contributing guidelines and other community files)\r\n\r\n@alfrisch could you take the lead on the German\r\n@rraymondhp could you take the lead on the Japanese\r\n@hanheepaik could you take the lead on the Korean\r\n@liupibm could you take the lead on the Chinese\r\n\r\nThanks \r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLanguage specific configuration file, inheriting from the main /doc\nconf.py file and adjusting the variables that depend on the language.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\nfrom conf import *\n\nlanguage = 'de'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n\ntemplates_path = ['../_templates']\nhtml_static_path = ['../theme/static/']\nhtml_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\nhtml_favicon = '../theme/static/favicon.ico'\n", "path": "docs/de/conf.py"}, {"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLanguage specific configuration file, inheriting from the main /doc\nconf.py file and adjusting the variables that depend on the language.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\nfrom conf import *\n\nlanguage = 'ja'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n\ntemplates_path = ['../_templates']\nhtml_static_path = ['../theme/static/']\nhtml_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\nhtml_favicon = '../theme/static/favicon.ico'\n", "path": "docs/ja/conf.py"}]} | 1,090 | 395 |
gh_patches_debug_43406 | rasdani/github-patches | git_diff | sublimelsp__LSP-707 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reduce impact of LSP color listener by default.
@predragnikolic: Noticed while closing views after find-replace in pyls:
```
Traceback (most recent call last):
File "/Applications/Sublime Text.app/Contents/MacOS/sublime_plugin.py", line 506, in run_async_view_listener_callback
vel.__class__.__dict__[name](vel)
File "/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py", line 47, in on_activated_async
self.schedule_request()
File "/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py", line 53, in schedule_request
current_point = self.view.sel()[0].begin()
File "/Applications/Sublime Text.app/Contents/MacOS/sublime.py", line 649, in __getitem__
raise IndexError()
IndexError
```
* We should probably fix the above issue (maybe it's rare but I ran into this edge case pretty quickly)
* We could avoid creating ViewEventListeners for a few kinds of irrelevant views: Read-only, transient, unsupported syntaxes ( Maybe `DocumentSyncListener` `is_applicable` is suitable?)
* We could avoid running `LspColorListener`'s `schedule_request` until a session with colorProvider for a given view has been found.
</issue>
<code>
[start of plugin/color.py]
1 import sublime_plugin
2 import sublime
3
4 try:
5 from typing import Any, List, Dict, Callable, Optional
6 assert Any and List and Dict and Callable and Optional
7 except ImportError:
8 pass
9
10 from .core.protocol import Request
11 from .core.url import filename_to_uri
12 from .core.registry import session_for_view
13 from .core.settings import settings
14 from .core.views import range_to_region
15 from .core.protocol import Range
16
17
18 def send_color_request(view, on_response_recieved: 'Callable'):
19 session = session_for_view(view)
20 if not session or not session.has_capability('colorProvider'):
21 # the server doesn't support colors, just return
22 return
23
24 params = {
25 "textDocument": {
26 "uri": filename_to_uri(view.file_name())
27 }
28 }
29 session.client.send_request(
30 Request.documentColor(params),
31 lambda response: on_response_recieved(response))
32
33
34 class LspColorListener(sublime_plugin.ViewEventListener):
35 def __init__(self, view: sublime.View) -> None:
36 super().__init__(view)
37 self.color_phantom_set = None # type: Optional[sublime.PhantomSet]
38 self._stored_point = -1
39
40 @classmethod
41 def is_applicable(cls, _settings):
42 return 'colorProvider' not in settings.disabled_capabilities
43
44 def on_activated_async(self):
45 self.schedule_request()
46
47 def on_modified_async(self):
48 self.schedule_request()
49
50 def schedule_request(self):
51 current_point = self.view.sel()[0].begin()
52 if self._stored_point != current_point:
53 self._stored_point = current_point
54 sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)
55
56 def fire_request(self, current_point: int) -> None:
57 if current_point == self._stored_point:
58 send_color_request(self.view, self.handle_response)
59
60 def handle_response(self, response) -> None:
61 phantoms = []
62 for val in response:
63 color = val['color']
64 red = color['red'] * 255
65 green = color['green'] * 255
66 blue = color['blue'] * 255
67 alpha = color['alpha']
68
69 content = """
70 <div style='padding: 0.4em;
71 margin-top: 0.1em;
72 border: 1px solid color(var(--foreground) alpha(0.25));
73 background-color: rgba({}, {}, {}, {})'>
74 </div>""".format(red, green, blue, alpha)
75
76 range = Range.from_lsp(val['range'])
77 region = range_to_region(range, self.view)
78
79 phantoms.append(sublime.Phantom(region, content, sublime.LAYOUT_INLINE))
80
81 if phantoms:
82 if not self.color_phantom_set:
83 self.color_phantom_set = sublime.PhantomSet(self.view, "lsp_color")
84 self.color_phantom_set.update(phantoms)
85 else:
86 self.color_phantom_set = None
87
88
89 def remove_color_boxes(view):
90 view.erase_phantoms('lsp_color')
91
[end of plugin/color.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/color.py b/plugin/color.py
--- a/plugin/color.py
+++ b/plugin/color.py
@@ -9,26 +9,12 @@
from .core.protocol import Request
from .core.url import filename_to_uri
-from .core.registry import session_for_view
-from .core.settings import settings
+from .core.registry import session_for_view, config_for_scope
+from .core.settings import settings, client_configs
from .core.views import range_to_region
from .core.protocol import Range
-
-
-def send_color_request(view, on_response_recieved: 'Callable'):
- session = session_for_view(view)
- if not session or not session.has_capability('colorProvider'):
- # the server doesn't support colors, just return
- return
-
- params = {
- "textDocument": {
- "uri": filename_to_uri(view.file_name())
- }
- }
- session.client.send_request(
- Request.documentColor(params),
- lambda response: on_response_recieved(response))
+from .core.configurations import is_supported_syntax
+from .core.documents import is_transient_view
class LspColorListener(sublime_plugin.ViewEventListener):
@@ -36,26 +22,72 @@
super().__init__(view)
self.color_phantom_set = None # type: Optional[sublime.PhantomSet]
self._stored_point = -1
+ self.initialized = False
+ self.enabled = False
@classmethod
def is_applicable(cls, _settings):
- return 'colorProvider' not in settings.disabled_capabilities
+ syntax = _settings.get('syntax')
+ is_supported = syntax and is_supported_syntax(syntax, client_configs.all)
+ disabled_by_user = 'colorProvider' in settings.disabled_capabilities
+ return is_supported and not disabled_by_user
def on_activated_async(self):
- self.schedule_request()
+ if not self.initialized:
+ self.initialize()
+
+ def initialize(self, is_retry=False):
+ config = config_for_scope(self.view)
+ if not config:
+ self.initialized = True # no server enabled, re-open file to activate feature.
+
+ session = session_for_view(self.view)
+ if session:
+ self.initialized = True
+ self.enabled = session.has_capability('colorProvider')
+ if self.enabled:
+ self.send_color_request()
+ elif not is_retry:
+ # session may be starting, try again once in a second.
+ sublime.set_timeout_async(lambda: self.initialize(is_retry=True), 1000)
+ else:
+ self.initialized = True # we retried but still no session available.
def on_modified_async(self):
- self.schedule_request()
+ if self.enabled:
+ self.schedule_request()
def schedule_request(self):
- current_point = self.view.sel()[0].begin()
+ sel = self.view.sel()
+ if len(sel) < 1:
+ return
+
+ current_point = sel[0].begin()
if self._stored_point != current_point:
self._stored_point = current_point
sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)
def fire_request(self, current_point: int) -> None:
if current_point == self._stored_point:
- send_color_request(self.view, self.handle_response)
+ self.send_color_request()
+
+ def send_color_request(self):
+ if is_transient_view(self.view):
+ return
+
+ session = session_for_view(self.view)
+ if not session:
+ return
+
+ params = {
+ "textDocument": {
+ "uri": filename_to_uri(self.view.file_name())
+ }
+ }
+ session.client.send_request(
+ Request.documentColor(params),
+ self.handle_response
+ )
def handle_response(self, response) -> None:
phantoms = []
@@ -68,7 +100,7 @@
content = """
<div style='padding: 0.4em;
- margin-top: 0.1em;
+ margin-top: 0.2em;
border: 1px solid color(var(--foreground) alpha(0.25));
background-color: rgba({}, {}, {}, {})'>
</div>""".format(red, green, blue, alpha)
| {"golden_diff": "diff --git a/plugin/color.py b/plugin/color.py\n--- a/plugin/color.py\n+++ b/plugin/color.py\n@@ -9,26 +9,12 @@\n \n from .core.protocol import Request\n from .core.url import filename_to_uri\n-from .core.registry import session_for_view\n-from .core.settings import settings\n+from .core.registry import session_for_view, config_for_scope\n+from .core.settings import settings, client_configs\n from .core.views import range_to_region\n from .core.protocol import Range\n-\n-\n-def send_color_request(view, on_response_recieved: 'Callable'):\n- session = session_for_view(view)\n- if not session or not session.has_capability('colorProvider'):\n- # the server doesn't support colors, just return\n- return\n-\n- params = {\n- \"textDocument\": {\n- \"uri\": filename_to_uri(view.file_name())\n- }\n- }\n- session.client.send_request(\n- Request.documentColor(params),\n- lambda response: on_response_recieved(response))\n+from .core.configurations import is_supported_syntax\n+from .core.documents import is_transient_view\n \n \n class LspColorListener(sublime_plugin.ViewEventListener):\n@@ -36,26 +22,72 @@\n super().__init__(view)\n self.color_phantom_set = None # type: Optional[sublime.PhantomSet]\n self._stored_point = -1\n+ self.initialized = False\n+ self.enabled = False\n \n @classmethod\n def is_applicable(cls, _settings):\n- return 'colorProvider' not in settings.disabled_capabilities\n+ syntax = _settings.get('syntax')\n+ is_supported = syntax and is_supported_syntax(syntax, client_configs.all)\n+ disabled_by_user = 'colorProvider' in settings.disabled_capabilities\n+ return is_supported and not disabled_by_user\n \n def on_activated_async(self):\n- self.schedule_request()\n+ if not self.initialized:\n+ self.initialize()\n+\n+ def initialize(self, is_retry=False):\n+ config = config_for_scope(self.view)\n+ if not config:\n+ self.initialized = True # no server enabled, re-open file to activate feature.\n+\n+ session = session_for_view(self.view)\n+ if session:\n+ self.initialized = True\n+ self.enabled = session.has_capability('colorProvider')\n+ if self.enabled:\n+ self.send_color_request()\n+ elif not is_retry:\n+ # session may be starting, try again once in a second.\n+ sublime.set_timeout_async(lambda: self.initialize(is_retry=True), 1000)\n+ else:\n+ self.initialized = True # we retried but still no session available.\n \n def on_modified_async(self):\n- self.schedule_request()\n+ if self.enabled:\n+ self.schedule_request()\n \n def schedule_request(self):\n- current_point = self.view.sel()[0].begin()\n+ sel = self.view.sel()\n+ if len(sel) < 1:\n+ return\n+\n+ current_point = sel[0].begin()\n if self._stored_point != current_point:\n self._stored_point = current_point\n sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)\n \n def fire_request(self, current_point: int) -> None:\n if current_point == self._stored_point:\n- send_color_request(self.view, self.handle_response)\n+ self.send_color_request()\n+\n+ def send_color_request(self):\n+ if is_transient_view(self.view):\n+ return\n+\n+ session = session_for_view(self.view)\n+ if not session:\n+ return\n+\n+ params = {\n+ \"textDocument\": {\n+ \"uri\": filename_to_uri(self.view.file_name())\n+ }\n+ }\n+ session.client.send_request(\n+ Request.documentColor(params),\n+ self.handle_response\n+ )\n \n def handle_response(self, response) -> None:\n phantoms = []\n@@ -68,7 +100,7 @@\n \n content = \"\"\"\n <div style='padding: 0.4em;\n- margin-top: 0.1em;\n+ margin-top: 0.2em;\n border: 1px solid color(var(--foreground) alpha(0.25));\n background-color: rgba({}, {}, {}, {})'>\n </div>\"\"\".format(red, green, blue, alpha)\n", "issue": "Reduce impact of LSP color listener by default.\n@predragnikolic: Noticed while closing views after find-replace in pyls:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Applications/Sublime Text.app/Contents/MacOS/sublime_plugin.py\", line 506, in run_async_view_listener_callback\r\n vel.__class__.__dict__[name](vel)\r\n File \"/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py\", line 47, in on_activated_async\r\n self.schedule_request()\r\n File \"/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py\", line 53, in schedule_request\r\n current_point = self.view.sel()[0].begin()\r\n File \"/Applications/Sublime Text.app/Contents/MacOS/sublime.py\", line 649, in __getitem__\r\n raise IndexError()\r\nIndexError\r\n```\r\n\r\n* We should probably fix the above issue (maybe it's rare but I ran into this edge case pretty quickly)\r\n* We could avoid creating ViewEventListeners for a few kinds of irrelevant views: Read-only, transient, unsupported syntaxes ( Maybe `DocumentSyncListener` `is_applicable` is suitable?)\r\n* We could avoid running `LspColorListener`'s `schedule_request` until a session with colorProvider for a given view has been found.\r\n\n", "before_files": [{"content": "import sublime_plugin\nimport sublime\n\ntry:\n from typing import Any, List, Dict, Callable, Optional\n assert Any and List and Dict and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.protocol import Request\nfrom .core.url import filename_to_uri\nfrom .core.registry import session_for_view\nfrom .core.settings import settings\nfrom .core.views import range_to_region\nfrom .core.protocol import Range\n\n\ndef send_color_request(view, on_response_recieved: 'Callable'):\n session = session_for_view(view)\n if not session or not session.has_capability('colorProvider'):\n # the server doesn't support colors, just return\n return\n\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(view.file_name())\n }\n }\n session.client.send_request(\n Request.documentColor(params),\n lambda response: on_response_recieved(response))\n\n\nclass LspColorListener(sublime_plugin.ViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.color_phantom_set = None # type: Optional[sublime.PhantomSet]\n self._stored_point = -1\n\n @classmethod\n def is_applicable(cls, _settings):\n return 'colorProvider' not in settings.disabled_capabilities\n\n def on_activated_async(self):\n self.schedule_request()\n\n def on_modified_async(self):\n self.schedule_request()\n\n def schedule_request(self):\n current_point = self.view.sel()[0].begin()\n if self._stored_point != current_point:\n self._stored_point = current_point\n sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)\n\n def fire_request(self, current_point: int) -> None:\n if current_point == self._stored_point:\n send_color_request(self.view, self.handle_response)\n\n def handle_response(self, response) -> None:\n phantoms = []\n for val in response:\n color = val['color']\n red = color['red'] * 255\n green = color['green'] * 255\n blue = color['blue'] * 255\n alpha = color['alpha']\n\n content = \"\"\"\n <div style='padding: 0.4em;\n margin-top: 0.1em;\n border: 1px solid color(var(--foreground) alpha(0.25));\n background-color: rgba({}, {}, {}, {})'>\n </div>\"\"\".format(red, green, blue, alpha)\n\n range = Range.from_lsp(val['range'])\n region = range_to_region(range, self.view)\n\n phantoms.append(sublime.Phantom(region, content, sublime.LAYOUT_INLINE))\n\n if phantoms:\n if not self.color_phantom_set:\n self.color_phantom_set = sublime.PhantomSet(self.view, \"lsp_color\")\n self.color_phantom_set.update(phantoms)\n else:\n self.color_phantom_set = None\n\n\ndef remove_color_boxes(view):\n view.erase_phantoms('lsp_color')\n", "path": "plugin/color.py"}]} | 1,667 | 959 |
gh_patches_debug_21965 | rasdani/github-patches | git_diff | apache__tvm-10188 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] RPC Server Can't Serve Through Proxy Because of Missing Command Line Option
Now we have RPC server, proxy, and tracker, so if the host machine can't access the RPC server directly, then we can put RPC proxy between host machine and RPC server like "Host --> RPC Proxy --> RPC Server", we can do it from Python API, but we can't do it through command line. Because "tvm.exec.rpc_server" haven't exposed the parameter "is_proxy" of class "tvm.rpc.server.Server" through command line option.
### Expected behavior
Can register the RPC server to RPC proxy from command line like something below.
```shell
python -m tvm.exec.rpc_server --host 0.0.0.0 --port 9090 --through-proxy
```
### Actual behavior
Currently haven't any command line option to set the parameter "is_proxy".
### Environment
No
### Steps to reproduce
Any
</issue>
<code>
[start of python/tvm/exec/rpc_server.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 # pylint: disable=redefined-outer-name, invalid-name
18 """Start an RPC server"""
19 import argparse
20 import logging
21 from .. import rpc
22
23
24 def main(args):
25 """Main function
26
27 Parameters
28 ----------
29 args : argparse.Namespace
30 parsed args from command-line invocation
31 """
32 if args.tracker:
33 url, port = args.tracker.rsplit(":", 1)
34 port = int(port)
35 tracker_addr = (url, port)
36 if not args.key:
37 raise RuntimeError("Need key to present type of resource when tracker is available")
38 else:
39 tracker_addr = None
40
41 server = rpc.Server(
42 args.host,
43 args.port,
44 args.port_end,
45 key=args.key,
46 tracker_addr=tracker_addr,
47 load_library=args.load_library,
48 custom_addr=args.custom_addr,
49 silent=args.silent,
50 no_fork=not args.fork,
51 )
52 server.proc.join()
53
54
55 if __name__ == "__main__":
56 parser = argparse.ArgumentParser()
57 parser.add_argument(
58 "--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
59 )
60 parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
61 parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
62 parser.add_argument(
63 "--tracker",
64 type=str,
65 help=("The address of RPC tracker in host:port format. " "e.g. (10.77.1.234:9190)"),
66 )
67 parser.add_argument(
68 "--key", type=str, default="", help="The key used to identify the device type in tracker."
69 )
70 parser.add_argument("--silent", action="store_true", help="Whether run in silent mode.")
71 parser.add_argument("--load-library", type=str, help="Additional library to load")
72 parser.add_argument(
73 "--no-fork",
74 dest="fork",
75 action="store_false",
76 help="Use spawn mode to avoid fork. This option \
77 is able to avoid potential fork problems with Metal, OpenCL \
78 and ROCM compilers.",
79 )
80 parser.add_argument(
81 "--custom-addr", type=str, help="Custom IP Address to Report to RPC Tracker"
82 )
83
84 parser.set_defaults(fork=True)
85 args = parser.parse_args()
86 logging.basicConfig(level=logging.INFO)
87 if not args.fork is False and not args.silent:
88 logging.info(
89 "If you are running ROCM/Metal, fork will cause "
90 "compiler internal error. Try to launch with arg ```--no-fork```"
91 )
92 main(args)
93
[end of python/tvm/exec/rpc_server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/tvm/exec/rpc_server.py b/python/tvm/exec/rpc_server.py
--- a/python/tvm/exec/rpc_server.py
+++ b/python/tvm/exec/rpc_server.py
@@ -42,6 +42,7 @@
args.host,
args.port,
args.port_end,
+ is_proxy=args.through_proxy,
key=args.key,
tracker_addr=tracker_addr,
load_library=args.load_library,
@@ -58,6 +59,15 @@
"--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
)
parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
+ parser.add_argument(
+ "--through-proxy",
+ dest="through_proxy",
+ action="store_true",
+ help=(
+ "Whether this server provide service through a proxy. If this is true, the host and"
+ "port actually is the address of the proxy."
+ ),
+ )
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--tracker",
| {"golden_diff": "diff --git a/python/tvm/exec/rpc_server.py b/python/tvm/exec/rpc_server.py\n--- a/python/tvm/exec/rpc_server.py\n+++ b/python/tvm/exec/rpc_server.py\n@@ -42,6 +42,7 @@\n args.host,\n args.port,\n args.port_end,\n+ is_proxy=args.through_proxy,\n key=args.key,\n tracker_addr=tracker_addr,\n load_library=args.load_library,\n@@ -58,6 +59,15 @@\n \"--host\", type=str, default=\"0.0.0.0\", help=\"The host IP address the tracker binds to\"\n )\n parser.add_argument(\"--port\", type=int, default=9090, help=\"The port of the RPC\")\n+ parser.add_argument(\n+ \"--through-proxy\",\n+ dest=\"through_proxy\",\n+ action=\"store_true\",\n+ help=(\n+ \"Whether this server provide service through a proxy. If this is true, the host and\"\n+ \"port actually is the address of the proxy.\"\n+ ),\n+ )\n parser.add_argument(\"--port-end\", type=int, default=9199, help=\"The end search port of the RPC\")\n parser.add_argument(\n \"--tracker\",\n", "issue": "[Bug] RPC Server Can't Serve Through Proxy Because of Missing Command Line Option\nNow we have RPC server, proxy, and tracker, so if the host machine can't access the RPC server directly, then we can put RPC proxy between host machine and RPC server like \"Host --> RPC Proxy --> RPC Server\", we can do it from Python API, but we can't do it through command line. Because \"tvm.exec.rpc_server\" haven't exposed the parameter \"is_proxy\" of class \"tvm.rpc.server.Server\" through command line option.\r\n\r\n### Expected behavior\r\n\r\nCan register the RPC server to RPC proxy from command line like something below.\r\n```shell\r\npython -m tvm.exec.rpc_server --host 0.0.0.0 --port 9090 --through-proxy\r\n```\r\n\r\n### Actual behavior\r\n\r\nCurrently haven't any command line option to set the parameter \"is_proxy\".\r\n\r\n### Environment\r\n\r\nNo\r\n\r\n### Steps to reproduce\r\n\r\nAny\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=redefined-outer-name, invalid-name\n\"\"\"Start an RPC server\"\"\"\nimport argparse\nimport logging\nfrom .. import rpc\n\n\ndef main(args):\n \"\"\"Main function\n\n Parameters\n ----------\n args : argparse.Namespace\n parsed args from command-line invocation\n \"\"\"\n if args.tracker:\n url, port = args.tracker.rsplit(\":\", 1)\n port = int(port)\n tracker_addr = (url, port)\n if not args.key:\n raise RuntimeError(\"Need key to present type of resource when tracker is available\")\n else:\n tracker_addr = None\n\n server = rpc.Server(\n args.host,\n args.port,\n args.port_end,\n key=args.key,\n tracker_addr=tracker_addr,\n load_library=args.load_library,\n custom_addr=args.custom_addr,\n silent=args.silent,\n no_fork=not args.fork,\n )\n server.proc.join()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--host\", type=str, default=\"0.0.0.0\", help=\"The host IP address the tracker binds to\"\n )\n parser.add_argument(\"--port\", type=int, default=9090, help=\"The port of the RPC\")\n parser.add_argument(\"--port-end\", type=int, default=9199, help=\"The end search port of the RPC\")\n parser.add_argument(\n \"--tracker\",\n type=str,\n help=(\"The address of RPC tracker in host:port format. \" \"e.g. (10.77.1.234:9190)\"),\n )\n parser.add_argument(\n \"--key\", type=str, default=\"\", help=\"The key used to identify the device type in tracker.\"\n )\n parser.add_argument(\"--silent\", action=\"store_true\", help=\"Whether run in silent mode.\")\n parser.add_argument(\"--load-library\", type=str, help=\"Additional library to load\")\n parser.add_argument(\n \"--no-fork\",\n dest=\"fork\",\n action=\"store_false\",\n help=\"Use spawn mode to avoid fork. This option \\\n is able to avoid potential fork problems with Metal, OpenCL \\\n and ROCM compilers.\",\n )\n parser.add_argument(\n \"--custom-addr\", type=str, help=\"Custom IP Address to Report to RPC Tracker\"\n )\n\n parser.set_defaults(fork=True)\n args = parser.parse_args()\n logging.basicConfig(level=logging.INFO)\n if not args.fork is False and not args.silent:\n logging.info(\n \"If you are running ROCM/Metal, fork will cause \"\n \"compiler internal error. Try to launch with arg ```--no-fork```\"\n )\n main(args)\n", "path": "python/tvm/exec/rpc_server.py"}]} | 1,676 | 271 |
gh_patches_debug_31182 | rasdani/github-patches | git_diff | pypa__pip-10145 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use sysconfig.get_preferred_scheme etc.
Todo to self. This has been implemented for 3.10 and we should support is in 21.2.
</issue>
<code>
[start of src/pip/_internal/locations/_sysconfig.py]
1 import distutils.util # FIXME: For change_root.
2 import logging
3 import os
4 import sys
5 import sysconfig
6 import typing
7
8 from pip._internal.exceptions import InvalidSchemeCombination, UserInstallationInvalid
9 from pip._internal.models.scheme import SCHEME_KEYS, Scheme
10 from pip._internal.utils.virtualenv import running_under_virtualenv
11
12 from .base import get_major_minor_version, is_osx_framework
13
14 logger = logging.getLogger(__name__)
15
16
17 # Notes on _infer_* functions.
18 # Unfortunately ``_get_default_scheme()`` is private, so there's no way to
19 # ask things like "what is the '_prefix' scheme on this platform". These
20 # functions try to answer that with some heuristics while accounting for ad-hoc
21 # platforms not covered by CPython's default sysconfig implementation. If the
22 # ad-hoc implementation does not fully implement sysconfig, we'll fall back to
23 # a POSIX scheme.
24
25 _AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())
26
27
28 def _infer_prefix() -> str:
29 """Try to find a prefix scheme for the current platform.
30
31 This tries:
32
33 * A special ``osx_framework_library`` for Python distributed by Apple's
34 Command Line Tools, when not running in a virtual environment.
35 * Implementation + OS, used by PyPy on Windows (``pypy_nt``).
36 * Implementation without OS, used by PyPy on POSIX (``pypy``).
37 * OS + "prefix", used by CPython on POSIX (``posix_prefix``).
38 * Just the OS name, used by CPython on Windows (``nt``).
39
40 If none of the above works, fall back to ``posix_prefix``.
41 """
42 os_framework_global = is_osx_framework() and not running_under_virtualenv()
43 if os_framework_global and "osx_framework_library" in _AVAILABLE_SCHEMES:
44 return "osx_framework_library"
45 implementation_suffixed = f"{sys.implementation.name}_{os.name}"
46 if implementation_suffixed in _AVAILABLE_SCHEMES:
47 return implementation_suffixed
48 if sys.implementation.name in _AVAILABLE_SCHEMES:
49 return sys.implementation.name
50 suffixed = f"{os.name}_prefix"
51 if suffixed in _AVAILABLE_SCHEMES:
52 return suffixed
53 if os.name in _AVAILABLE_SCHEMES: # On Windows, prefx is just called "nt".
54 return os.name
55 return "posix_prefix"
56
57
58 def _infer_user() -> str:
59 """Try to find a user scheme for the current platform."""
60 if is_osx_framework() and not running_under_virtualenv():
61 suffixed = "osx_framework_user"
62 else:
63 suffixed = f"{os.name}_user"
64 if suffixed in _AVAILABLE_SCHEMES:
65 return suffixed
66 if "posix_user" not in _AVAILABLE_SCHEMES: # User scheme unavailable.
67 raise UserInstallationInvalid()
68 return "posix_user"
69
70
71 def _infer_home() -> str:
72 """Try to find a home for the current platform."""
73 suffixed = f"{os.name}_home"
74 if suffixed in _AVAILABLE_SCHEMES:
75 return suffixed
76 return "posix_home"
77
78
79 # Update these keys if the user sets a custom home.
80 _HOME_KEYS = [
81 "installed_base",
82 "base",
83 "installed_platbase",
84 "platbase",
85 "prefix",
86 "exec_prefix",
87 ]
88 if sysconfig.get_config_var("userbase") is not None:
89 _HOME_KEYS.append("userbase")
90
91
92 def get_scheme(
93 dist_name: str,
94 user: bool = False,
95 home: typing.Optional[str] = None,
96 root: typing.Optional[str] = None,
97 isolated: bool = False,
98 prefix: typing.Optional[str] = None,
99 ) -> Scheme:
100 """
101 Get the "scheme" corresponding to the input parameters.
102
103 :param dist_name: the name of the package to retrieve the scheme for, used
104 in the headers scheme path
105 :param user: indicates to use the "user" scheme
106 :param home: indicates to use the "home" scheme
107 :param root: root under which other directories are re-based
108 :param isolated: ignored, but kept for distutils compatibility (where
109 this controls whether the user-site pydistutils.cfg is honored)
110 :param prefix: indicates to use the "prefix" scheme and provides the
111 base directory for the same
112 """
113 if user and prefix:
114 raise InvalidSchemeCombination("--user", "--prefix")
115 if home and prefix:
116 raise InvalidSchemeCombination("--home", "--prefix")
117
118 if home is not None:
119 scheme_name = _infer_home()
120 elif user:
121 scheme_name = _infer_user()
122 else:
123 scheme_name = _infer_prefix()
124
125 if home is not None:
126 variables = {k: home for k in _HOME_KEYS}
127 elif prefix is not None:
128 variables = {k: prefix for k in _HOME_KEYS}
129 else:
130 variables = {}
131
132 paths = sysconfig.get_paths(scheme=scheme_name, vars=variables)
133
134 # Logic here is very arbitrary, we're doing it for compatibility, don't ask.
135 # 1. Pip historically uses a special header path in virtual environments.
136 # 2. If the distribution name is not known, distutils uses 'UNKNOWN'. We
137 # only do the same when not running in a virtual environment because
138 # pip's historical header path logic (see point 1) did not do this.
139 if running_under_virtualenv():
140 if user:
141 base = variables.get("userbase", sys.prefix)
142 else:
143 base = variables.get("base", sys.prefix)
144 python_xy = f"python{get_major_minor_version()}"
145 paths["include"] = os.path.join(base, "include", "site", python_xy)
146 elif not dist_name:
147 dist_name = "UNKNOWN"
148
149 scheme = Scheme(
150 platlib=paths["platlib"],
151 purelib=paths["purelib"],
152 headers=os.path.join(paths["include"], dist_name),
153 scripts=paths["scripts"],
154 data=paths["data"],
155 )
156 if root is not None:
157 for key in SCHEME_KEYS:
158 value = distutils.util.change_root(root, getattr(scheme, key))
159 setattr(scheme, key, value)
160 return scheme
161
162
163 def get_bin_prefix() -> str:
164 # Forcing to use /usr/local/bin for standard macOS framework installs.
165 if sys.platform[:6] == "darwin" and sys.prefix[:16] == "/System/Library/":
166 return "/usr/local/bin"
167 return sysconfig.get_paths()["scripts"]
168
169
170 def get_purelib() -> str:
171 return sysconfig.get_paths()["purelib"]
172
173
174 def get_platlib() -> str:
175 return sysconfig.get_paths()["platlib"]
176
177
178 def get_prefixed_libs(prefix: str) -> typing.Tuple[str, str]:
179 paths = sysconfig.get_paths(vars={"base": prefix, "platbase": prefix})
180 return (paths["purelib"], paths["platlib"])
181
[end of src/pip/_internal/locations/_sysconfig.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/locations/_sysconfig.py b/src/pip/_internal/locations/_sysconfig.py
--- a/src/pip/_internal/locations/_sysconfig.py
+++ b/src/pip/_internal/locations/_sysconfig.py
@@ -24,6 +24,8 @@
_AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())
+_HAS_PREFERRED_SCHEME_API = sys.version_info >= (3, 10)
+
def _infer_prefix() -> str:
"""Try to find a prefix scheme for the current platform.
@@ -39,6 +41,8 @@
If none of the above works, fall back to ``posix_prefix``.
"""
+ if _HAS_PREFERRED_SCHEME_API:
+ return sysconfig.get_preferred_scheme("prefix") # type: ignore
os_framework_global = is_osx_framework() and not running_under_virtualenv()
if os_framework_global and "osx_framework_library" in _AVAILABLE_SCHEMES:
return "osx_framework_library"
@@ -57,6 +61,8 @@
def _infer_user() -> str:
"""Try to find a user scheme for the current platform."""
+ if _HAS_PREFERRED_SCHEME_API:
+ return sysconfig.get_preferred_scheme("user") # type: ignore
if is_osx_framework() and not running_under_virtualenv():
suffixed = "osx_framework_user"
else:
@@ -70,6 +76,8 @@
def _infer_home() -> str:
"""Try to find a home for the current platform."""
+ if _HAS_PREFERRED_SCHEME_API:
+ return sysconfig.get_preferred_scheme("home") # type: ignore
suffixed = f"{os.name}_home"
if suffixed in _AVAILABLE_SCHEMES:
return suffixed
| {"golden_diff": "diff --git a/src/pip/_internal/locations/_sysconfig.py b/src/pip/_internal/locations/_sysconfig.py\n--- a/src/pip/_internal/locations/_sysconfig.py\n+++ b/src/pip/_internal/locations/_sysconfig.py\n@@ -24,6 +24,8 @@\n \n _AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())\n \n+_HAS_PREFERRED_SCHEME_API = sys.version_info >= (3, 10)\n+\n \n def _infer_prefix() -> str:\n \"\"\"Try to find a prefix scheme for the current platform.\n@@ -39,6 +41,8 @@\n \n If none of the above works, fall back to ``posix_prefix``.\n \"\"\"\n+ if _HAS_PREFERRED_SCHEME_API:\n+ return sysconfig.get_preferred_scheme(\"prefix\") # type: ignore\n os_framework_global = is_osx_framework() and not running_under_virtualenv()\n if os_framework_global and \"osx_framework_library\" in _AVAILABLE_SCHEMES:\n return \"osx_framework_library\"\n@@ -57,6 +61,8 @@\n \n def _infer_user() -> str:\n \"\"\"Try to find a user scheme for the current platform.\"\"\"\n+ if _HAS_PREFERRED_SCHEME_API:\n+ return sysconfig.get_preferred_scheme(\"user\") # type: ignore\n if is_osx_framework() and not running_under_virtualenv():\n suffixed = \"osx_framework_user\"\n else:\n@@ -70,6 +76,8 @@\n \n def _infer_home() -> str:\n \"\"\"Try to find a home for the current platform.\"\"\"\n+ if _HAS_PREFERRED_SCHEME_API:\n+ return sysconfig.get_preferred_scheme(\"home\") # type: ignore\n suffixed = f\"{os.name}_home\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n", "issue": "Use sysconfig.get_preferred_scheme etc.\nTodo to self. This has been implemented for 3.10 and we should support is in 21.2.\n", "before_files": [{"content": "import distutils.util # FIXME: For change_root.\nimport logging\nimport os\nimport sys\nimport sysconfig\nimport typing\n\nfrom pip._internal.exceptions import InvalidSchemeCombination, UserInstallationInvalid\nfrom pip._internal.models.scheme import SCHEME_KEYS, Scheme\nfrom pip._internal.utils.virtualenv import running_under_virtualenv\n\nfrom .base import get_major_minor_version, is_osx_framework\n\nlogger = logging.getLogger(__name__)\n\n\n# Notes on _infer_* functions.\n# Unfortunately ``_get_default_scheme()`` is private, so there's no way to\n# ask things like \"what is the '_prefix' scheme on this platform\". These\n# functions try to answer that with some heuristics while accounting for ad-hoc\n# platforms not covered by CPython's default sysconfig implementation. If the\n# ad-hoc implementation does not fully implement sysconfig, we'll fall back to\n# a POSIX scheme.\n\n_AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())\n\n\ndef _infer_prefix() -> str:\n \"\"\"Try to find a prefix scheme for the current platform.\n\n This tries:\n\n * A special ``osx_framework_library`` for Python distributed by Apple's\n Command Line Tools, when not running in a virtual environment.\n * Implementation + OS, used by PyPy on Windows (``pypy_nt``).\n * Implementation without OS, used by PyPy on POSIX (``pypy``).\n * OS + \"prefix\", used by CPython on POSIX (``posix_prefix``).\n * Just the OS name, used by CPython on Windows (``nt``).\n\n If none of the above works, fall back to ``posix_prefix``.\n \"\"\"\n os_framework_global = is_osx_framework() and not running_under_virtualenv()\n if os_framework_global and \"osx_framework_library\" in _AVAILABLE_SCHEMES:\n return \"osx_framework_library\"\n implementation_suffixed = f\"{sys.implementation.name}_{os.name}\"\n if implementation_suffixed in _AVAILABLE_SCHEMES:\n return implementation_suffixed\n if sys.implementation.name in _AVAILABLE_SCHEMES:\n return sys.implementation.name\n suffixed = f\"{os.name}_prefix\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n if os.name in _AVAILABLE_SCHEMES: # On Windows, prefx is just called \"nt\".\n return os.name\n return \"posix_prefix\"\n\n\ndef _infer_user() -> str:\n \"\"\"Try to find a user scheme for the current platform.\"\"\"\n if is_osx_framework() and not running_under_virtualenv():\n suffixed = \"osx_framework_user\"\n else:\n suffixed = f\"{os.name}_user\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n if \"posix_user\" not in _AVAILABLE_SCHEMES: # User scheme unavailable.\n raise UserInstallationInvalid()\n return \"posix_user\"\n\n\ndef _infer_home() -> str:\n \"\"\"Try to find a home for the current platform.\"\"\"\n suffixed = f\"{os.name}_home\"\n if suffixed in _AVAILABLE_SCHEMES:\n return suffixed\n return \"posix_home\"\n\n\n# Update these keys if the user sets a custom home.\n_HOME_KEYS = [\n \"installed_base\",\n \"base\",\n \"installed_platbase\",\n \"platbase\",\n \"prefix\",\n \"exec_prefix\",\n]\nif sysconfig.get_config_var(\"userbase\") is not None:\n _HOME_KEYS.append(\"userbase\")\n\n\ndef get_scheme(\n dist_name: str,\n user: bool = False,\n home: typing.Optional[str] = None,\n root: typing.Optional[str] = None,\n isolated: bool = False,\n prefix: typing.Optional[str] = None,\n) -> Scheme:\n \"\"\"\n Get the \"scheme\" corresponding to the input parameters.\n\n :param dist_name: the name of the package to retrieve the scheme for, used\n in the headers scheme path\n :param user: indicates to use the \"user\" scheme\n :param home: indicates to use the \"home\" scheme\n :param root: root under which other directories are re-based\n :param isolated: ignored, but kept for distutils compatibility (where\n this controls whether the user-site pydistutils.cfg is honored)\n :param prefix: indicates to use the \"prefix\" scheme and provides the\n base directory for the same\n \"\"\"\n if user and prefix:\n raise InvalidSchemeCombination(\"--user\", \"--prefix\")\n if home and prefix:\n raise InvalidSchemeCombination(\"--home\", \"--prefix\")\n\n if home is not None:\n scheme_name = _infer_home()\n elif user:\n scheme_name = _infer_user()\n else:\n scheme_name = _infer_prefix()\n\n if home is not None:\n variables = {k: home for k in _HOME_KEYS}\n elif prefix is not None:\n variables = {k: prefix for k in _HOME_KEYS}\n else:\n variables = {}\n\n paths = sysconfig.get_paths(scheme=scheme_name, vars=variables)\n\n # Logic here is very arbitrary, we're doing it for compatibility, don't ask.\n # 1. Pip historically uses a special header path in virtual environments.\n # 2. If the distribution name is not known, distutils uses 'UNKNOWN'. We\n # only do the same when not running in a virtual environment because\n # pip's historical header path logic (see point 1) did not do this.\n if running_under_virtualenv():\n if user:\n base = variables.get(\"userbase\", sys.prefix)\n else:\n base = variables.get(\"base\", sys.prefix)\n python_xy = f\"python{get_major_minor_version()}\"\n paths[\"include\"] = os.path.join(base, \"include\", \"site\", python_xy)\n elif not dist_name:\n dist_name = \"UNKNOWN\"\n\n scheme = Scheme(\n platlib=paths[\"platlib\"],\n purelib=paths[\"purelib\"],\n headers=os.path.join(paths[\"include\"], dist_name),\n scripts=paths[\"scripts\"],\n data=paths[\"data\"],\n )\n if root is not None:\n for key in SCHEME_KEYS:\n value = distutils.util.change_root(root, getattr(scheme, key))\n setattr(scheme, key, value)\n return scheme\n\n\ndef get_bin_prefix() -> str:\n # Forcing to use /usr/local/bin for standard macOS framework installs.\n if sys.platform[:6] == \"darwin\" and sys.prefix[:16] == \"/System/Library/\":\n return \"/usr/local/bin\"\n return sysconfig.get_paths()[\"scripts\"]\n\n\ndef get_purelib() -> str:\n return sysconfig.get_paths()[\"purelib\"]\n\n\ndef get_platlib() -> str:\n return sysconfig.get_paths()[\"platlib\"]\n\n\ndef get_prefixed_libs(prefix: str) -> typing.Tuple[str, str]:\n paths = sysconfig.get_paths(vars={\"base\": prefix, \"platbase\": prefix})\n return (paths[\"purelib\"], paths[\"platlib\"])\n", "path": "src/pip/_internal/locations/_sysconfig.py"}]} | 2,554 | 408 |
gh_patches_debug_45488 | rasdani/github-patches | git_diff | fedora-infra__bodhi-3519 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create single build updates in pending status for rawhide
Now that we have the automation in place to automatically move an update from pending to testing we should use that for single build updates too.
For that we need to change the automatic_update consumer (https://github.com/fedora-infra/bodhi/blob/develop/bodhi/server/consumers/automatic_updates.py) to create the update in the pending status.
And make sure that the signed consumer will move the updates from pending to testing when the build is signed.
</issue>
<code>
[start of bodhi/server/consumers/automatic_updates.py]
1 # Copyright © 2019 Red Hat, Inc. and others.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The Bodhi handler that creates updates automatically from tagged builds.
20
21 This module is responsible for the process of creating updates when builds are
22 tagged with certain tags.
23 """
24
25 import logging
26
27 import fedora_messaging
28
29 from bodhi.server import buildsys
30 from bodhi.server.config import config
31 from bodhi.server.models import Build, ContentType, Package, Release, TestGatingStatus
32 from bodhi.server.models import Update, UpdateStatus, UpdateType, User
33 from bodhi.server.util import transactional_session_maker
34
35 log = logging.getLogger('bodhi')
36
37
38 class AutomaticUpdateHandler:
39 """
40 The Bodhi Automatic Update Handler.
41
42 A consumer that listens for messages about tagged builds and creates
43 updates from them.
44 """
45
46 def __init__(self, db_factory: transactional_session_maker = None):
47 """
48 Initialize the Automatic Update Handler.
49
50 Args:
51 db_factory: If given, used as the db_factory for this handler. If
52 None (the default), a new TransactionalSessionMaker is created and
53 used.
54 """
55 if not db_factory:
56 self.db_factory = transactional_session_maker()
57 else:
58 self.db_factory = db_factory
59
60 def __call__(self, message: fedora_messaging.api.Message) -> None:
61 """Create updates from appropriately tagged builds.
62
63 Args:
64 message: The message we are processing.
65 """
66 body = message.body
67
68 missing = []
69 for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):
70 if mandatory not in body:
71 missing.append(mandatory)
72 if missing:
73 log.debug(f"Received incomplete tag message. Missing: {', '.join(missing)}")
74 return
75
76 btag = body['tag']
77 bnvr = '{name}-{version}-{release}'.format(**body)
78
79 koji = buildsys.get_session()
80
81 kbuildinfo = koji.getBuild(bnvr)
82 if not kbuildinfo:
83 log.debug(f"Can't find Koji build for {bnvr}.")
84 return
85
86 if 'nvr' not in kbuildinfo:
87 log.debug(f"Koji build info for {bnvr} doesn't contain 'nvr'.")
88 return
89
90 if 'owner_name' not in kbuildinfo:
91 log.debug(f"Koji build info for {bnvr} doesn't contain 'owner_name'.")
92 return
93
94 if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):
95 log.debug(f"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in "
96 "automatic_updates_blacklist, skipping.")
97 return
98
99 # some APIs want the Koji build info, some others want the same
100 # wrapped in a larger (request?) structure
101 rbuildinfo = {
102 'info': kbuildinfo,
103 'nvr': kbuildinfo['nvr'].rsplit('-', 2),
104 }
105
106 with self.db_factory() as dbsession:
107 rel = dbsession.query(Release).filter_by(create_automatic_updates=True,
108 pending_testing_tag=btag).first()
109 if not rel:
110 log.debug(f"Ignoring build being tagged into {btag!r}, no release configured for "
111 "automatic updates for it found.")
112 return
113
114 bcls = ContentType.infer_content_class(Build, kbuildinfo)
115 build = bcls.get(bnvr)
116 if build and build.update:
117 if build.update.status == UpdateStatus.pending:
118 log.info(
119 f"Build, active update for {bnvr} exists already "
120 "in Pending, moving it along.")
121 build.update.status = UpdateStatus.testing
122 build.update.request = None
123 dbsession.add(build)
124 if config.get('test_gating.required'):
125 log.debug(
126 'Test gating is required, marking the update as waiting on test '
127 'gating and updating it from Greenwave to get the real status.')
128 build.update.test_gating_status = TestGatingStatus.waiting
129 build.update.update_test_gating_status()
130 dbsession.commit()
131 else:
132 log.info(f"Build, active update for {bnvr} exists already, skipping.")
133 return
134
135 if not build:
136 log.debug(f"Build for {bnvr} doesn't exist yet, creating.")
137
138 # Package.get_or_create() infers content type already
139 log.debug("Getting/creating related package object.")
140 pkg = Package.get_or_create(rbuildinfo)
141
142 log.debug("Creating build object, adding it to the DB.")
143 build = bcls(nvr=bnvr, package=pkg)
144 dbsession.add(build)
145
146 owner_name = kbuildinfo['owner_name']
147 user = User.get(owner_name)
148 if not user:
149 log.debug(f"Creating bodhi user for '{owner_name}'.")
150 # Leave email, groups blank, these will be filled
151 # in or updated when they log into Bodhi next time, see
152 # bodhi.server.security:remember_me().
153 user = User(name=owner_name)
154 dbsession.add(user)
155
156 log.debug(f"Creating new update for {bnvr}.")
157 update = Update(
158 release=rel,
159 builds=[build],
160 notes=f"Automatic update for {bnvr}.",
161 type=UpdateType.unspecified,
162 stable_karma=3,
163 unstable_karma=-3,
164 autokarma=False,
165 user=user,
166 status=UpdateStatus.testing,
167 )
168
169 # Comment on the update that it was automatically created.
170 update.comment(
171 dbsession,
172 str("This update was automatically created"),
173 author="bodhi",
174 )
175
176 if config.get('test_gating.required'):
177 log.debug(
178 'Test gating required is enforced, marking the update as '
179 'waiting on test gating and updating it from Greenwave to '
180 'get the real status.')
181 update.test_gating_status = TestGatingStatus.waiting
182 update.update_test_gating_status()
183
184 log.debug("Adding new update to the database.")
185 dbsession.add(update)
186
187 log.debug("Committing changes to the database.")
188 dbsession.commit()
189
[end of bodhi/server/consumers/automatic_updates.py]
[start of bodhi/server/consumers/signed.py]
1 # Copyright © 2016-2019 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The "signed handler".
20
21 This module is responsible for marking builds as "signed" when they get moved
22 from the pending-signing to pending-updates-testing tag by RoboSignatory.
23 """
24
25 import logging
26
27 import fedora_messaging
28 from sqlalchemy import func
29
30 from bodhi.server.models import Build, UpdateStatus
31 from bodhi.server.util import transactional_session_maker
32
33 log = logging.getLogger('bodhi')
34
35
36 class SignedHandler(object):
37 """
38 The Bodhi Signed Handler.
39
40 A fedora-messaging listener waiting for messages from koji about builds being tagged.
41 """
42
43 def __init__(self):
44 """Initialize the SignedHandler."""
45 self.db_factory = transactional_session_maker()
46
47 def __call__(self, message: fedora_messaging.api.Message):
48 """
49 Handle messages arriving with the configured topic.
50
51 This marks a build as signed if it is assigned to the pending testing release tag.
52
53 Example message format::
54 {
55 'body': {
56 'build_id': 442562,
57 'name': 'colord',
58 'tag_id': 214,
59 'instance': 's390',
60 'tag': 'f26-updates-testing-pending',
61 'user': 'sharkcz',
62 'version': '1.3.4',
63 'owner': 'sharkcz',
64 'release': '1.fc26'
65 },
66 }
67
68 The message can contain additional keys.
69
70 Duplicate messages: this method is idempotent.
71
72 Args:
73 message: The incoming message in the format described above.
74 """
75 message = message.body
76 build_nvr = '%(name)s-%(version)s-%(release)s' % message
77 tag = message['tag']
78
79 log.info("%s tagged into %s" % (build_nvr, tag))
80
81 with self.db_factory():
82 build = Build.get(build_nvr)
83 if not build:
84 log.info("Build was not submitted, skipping")
85 return
86
87 if not build.release:
88 log.info('Build is not assigned to release, skipping')
89 return
90
91 if build.update.from_tag:
92 koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)
93 if tag != koji_testing_tag:
94 log.info("Tag is not testing side tag, skipping")
95 return
96 else:
97 if build.release.pending_testing_tag != tag:
98 log.info("Tag is not pending_testing tag, skipping")
99 return
100
101 if build.signed:
102 log.info("Build was already marked as signed (maybe a duplicate message)")
103 return
104
105 # This build was moved into the pending_testing tag for the applicable release, which
106 # is done by RoboSignatory to indicate that the build has been correctly signed and
107 # written out. Mark it as such.
108 log.info("Build has been signed, marking")
109 build.signed = True
110 log.info("Build %s has been marked as signed" % build_nvr)
111
112 # If every build in update is signed change status to testing
113 if build.update.from_tag and build.update.signed():
114 log.info("Every build in update is signed, set status to testing")
115
116 build.update.status = UpdateStatus.testing
117 build.update.date_testing = func.current_timestamp()
118 build.update.request = None
119 log.info(f"Update {build.update.display_name} status has been set to testing")
120
[end of bodhi/server/consumers/signed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bodhi/server/consumers/automatic_updates.py b/bodhi/server/consumers/automatic_updates.py
--- a/bodhi/server/consumers/automatic_updates.py
+++ b/bodhi/server/consumers/automatic_updates.py
@@ -28,7 +28,7 @@
from bodhi.server import buildsys
from bodhi.server.config import config
-from bodhi.server.models import Build, ContentType, Package, Release, TestGatingStatus
+from bodhi.server.models import Build, ContentType, Package, Release
from bodhi.server.models import Update, UpdateStatus, UpdateType, User
from bodhi.server.util import transactional_session_maker
@@ -105,7 +105,7 @@
with self.db_factory() as dbsession:
rel = dbsession.query(Release).filter_by(create_automatic_updates=True,
- pending_testing_tag=btag).first()
+ candidate_tag=btag).first()
if not rel:
log.debug(f"Ignoring build being tagged into {btag!r}, no release configured for "
"automatic updates for it found.")
@@ -114,22 +114,7 @@
bcls = ContentType.infer_content_class(Build, kbuildinfo)
build = bcls.get(bnvr)
if build and build.update:
- if build.update.status == UpdateStatus.pending:
- log.info(
- f"Build, active update for {bnvr} exists already "
- "in Pending, moving it along.")
- build.update.status = UpdateStatus.testing
- build.update.request = None
- dbsession.add(build)
- if config.get('test_gating.required'):
- log.debug(
- 'Test gating is required, marking the update as waiting on test '
- 'gating and updating it from Greenwave to get the real status.')
- build.update.test_gating_status = TestGatingStatus.waiting
- build.update.update_test_gating_status()
- dbsession.commit()
- else:
- log.info(f"Build, active update for {bnvr} exists already, skipping.")
+ log.info(f"Build, active update for {bnvr} exists already, skipping.")
return
if not build:
@@ -163,7 +148,7 @@
unstable_karma=-3,
autokarma=False,
user=user,
- status=UpdateStatus.testing,
+ status=UpdateStatus.pending,
)
# Comment on the update that it was automatically created.
@@ -173,14 +158,6 @@
author="bodhi",
)
- if config.get('test_gating.required'):
- log.debug(
- 'Test gating required is enforced, marking the update as '
- 'waiting on test gating and updating it from Greenwave to '
- 'get the real status.')
- update.test_gating_status = TestGatingStatus.waiting
- update.update_test_gating_status()
-
log.debug("Adding new update to the database.")
dbsession.add(update)
diff --git a/bodhi/server/consumers/signed.py b/bodhi/server/consumers/signed.py
--- a/bodhi/server/consumers/signed.py
+++ b/bodhi/server/consumers/signed.py
@@ -27,7 +27,8 @@
import fedora_messaging
from sqlalchemy import func
-from bodhi.server.models import Build, UpdateStatus
+from bodhi.server.config import config
+from bodhi.server.models import Build, UpdateStatus, TestGatingStatus
from bodhi.server.util import transactional_session_maker
log = logging.getLogger('bodhi')
@@ -110,10 +111,17 @@
log.info("Build %s has been marked as signed" % build_nvr)
# If every build in update is signed change status to testing
- if build.update.from_tag and build.update.signed():
+ if not build.update.release.composed_by_bodhi and build.update.signed():
log.info("Every build in update is signed, set status to testing")
build.update.status = UpdateStatus.testing
build.update.date_testing = func.current_timestamp()
build.update.request = None
+
+ if config.get("test_gating.required"):
+ log.debug('Test gating is required, marking the update as waiting on test '
+ 'gating and updating it from Greenwave to get the real status.')
+ build.update.test_gating_status = TestGatingStatus.waiting
+ build.update.update_test_gating_status()
+
log.info(f"Update {build.update.display_name} status has been set to testing")
| {"golden_diff": "diff --git a/bodhi/server/consumers/automatic_updates.py b/bodhi/server/consumers/automatic_updates.py\n--- a/bodhi/server/consumers/automatic_updates.py\n+++ b/bodhi/server/consumers/automatic_updates.py\n@@ -28,7 +28,7 @@\n \n from bodhi.server import buildsys\n from bodhi.server.config import config\n-from bodhi.server.models import Build, ContentType, Package, Release, TestGatingStatus\n+from bodhi.server.models import Build, ContentType, Package, Release\n from bodhi.server.models import Update, UpdateStatus, UpdateType, User\n from bodhi.server.util import transactional_session_maker\n \n@@ -105,7 +105,7 @@\n \n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n- pending_testing_tag=btag).first()\n+ candidate_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n@@ -114,22 +114,7 @@\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n- if build.update.status == UpdateStatus.pending:\n- log.info(\n- f\"Build, active update for {bnvr} exists already \"\n- \"in Pending, moving it along.\")\n- build.update.status = UpdateStatus.testing\n- build.update.request = None\n- dbsession.add(build)\n- if config.get('test_gating.required'):\n- log.debug(\n- 'Test gating is required, marking the update as waiting on test '\n- 'gating and updating it from Greenwave to get the real status.')\n- build.update.test_gating_status = TestGatingStatus.waiting\n- build.update.update_test_gating_status()\n- dbsession.commit()\n- else:\n- log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n+ log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n \n if not build:\n@@ -163,7 +148,7 @@\n unstable_karma=-3,\n autokarma=False,\n user=user,\n- status=UpdateStatus.testing,\n+ status=UpdateStatus.pending,\n )\n \n # Comment on the update that it was automatically created.\n@@ -173,14 +158,6 @@\n author=\"bodhi\",\n )\n \n- if config.get('test_gating.required'):\n- log.debug(\n- 'Test gating required is enforced, marking the update as '\n- 'waiting on test gating and updating it from Greenwave to '\n- 'get the real status.')\n- update.test_gating_status = TestGatingStatus.waiting\n- update.update_test_gating_status()\n-\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n \ndiff --git a/bodhi/server/consumers/signed.py b/bodhi/server/consumers/signed.py\n--- a/bodhi/server/consumers/signed.py\n+++ b/bodhi/server/consumers/signed.py\n@@ -27,7 +27,8 @@\n import fedora_messaging\n from sqlalchemy import func\n \n-from bodhi.server.models import Build, UpdateStatus\n+from bodhi.server.config import config\n+from bodhi.server.models import Build, UpdateStatus, TestGatingStatus\n from bodhi.server.util import transactional_session_maker\n \n log = logging.getLogger('bodhi')\n@@ -110,10 +111,17 @@\n log.info(\"Build %s has been marked as signed\" % build_nvr)\n \n # If every build in update is signed change status to testing\n- if build.update.from_tag and build.update.signed():\n+ if not build.update.release.composed_by_bodhi and build.update.signed():\n log.info(\"Every build in update is signed, set status to testing\")\n \n build.update.status = UpdateStatus.testing\n build.update.date_testing = func.current_timestamp()\n build.update.request = None\n+\n+ if config.get(\"test_gating.required\"):\n+ log.debug('Test gating is required, marking the update as waiting on test '\n+ 'gating and updating it from Greenwave to get the real status.')\n+ build.update.test_gating_status = TestGatingStatus.waiting\n+ build.update.update_test_gating_status()\n+\n log.info(f\"Update {build.update.display_name} status has been set to testing\")\n", "issue": "Create single build updates in pending status for rawhide\nNow that we have the automation in place to automatically move an update from pending to testing we should use that for single build updates too.\r\n\r\nFor that we need to change the automatic_update consumer (https://github.com/fedora-infra/bodhi/blob/develop/bodhi/server/consumers/automatic_updates.py) to create the update in the pending status. \r\n\r\nAnd make sure that the signed consumer will move the updates from pending to testing when the build is signed.\n", "before_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe Bodhi handler that creates updates automatically from tagged builds.\n\nThis module is responsible for the process of creating updates when builds are\ntagged with certain tags.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\n\nfrom bodhi.server import buildsys\nfrom bodhi.server.config import config\nfrom bodhi.server.models import Build, ContentType, Package, Release, TestGatingStatus\nfrom bodhi.server.models import Update, UpdateStatus, UpdateType, User\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass AutomaticUpdateHandler:\n \"\"\"\n The Bodhi Automatic Update Handler.\n\n A consumer that listens for messages about tagged builds and creates\n updates from them.\n \"\"\"\n\n def __init__(self, db_factory: transactional_session_maker = None):\n \"\"\"\n Initialize the Automatic Update Handler.\n\n Args:\n db_factory: If given, used as the db_factory for this handler. If\n None (the default), a new TransactionalSessionMaker is created and\n used.\n \"\"\"\n if not db_factory:\n self.db_factory = transactional_session_maker()\n else:\n self.db_factory = db_factory\n\n def __call__(self, message: fedora_messaging.api.Message) -> None:\n \"\"\"Create updates from appropriately tagged builds.\n\n Args:\n message: The message we are processing.\n \"\"\"\n body = message.body\n\n missing = []\n for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):\n if mandatory not in body:\n missing.append(mandatory)\n if missing:\n log.debug(f\"Received incomplete tag message. Missing: {', '.join(missing)}\")\n return\n\n btag = body['tag']\n bnvr = '{name}-{version}-{release}'.format(**body)\n\n koji = buildsys.get_session()\n\n kbuildinfo = koji.getBuild(bnvr)\n if not kbuildinfo:\n log.debug(f\"Can't find Koji build for {bnvr}.\")\n return\n\n if 'nvr' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'nvr'.\")\n return\n\n if 'owner_name' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'owner_name'.\")\n return\n\n if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):\n log.debug(f\"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in \"\n \"automatic_updates_blacklist, skipping.\")\n return\n\n # some APIs want the Koji build info, some others want the same\n # wrapped in a larger (request?) structure\n rbuildinfo = {\n 'info': kbuildinfo,\n 'nvr': kbuildinfo['nvr'].rsplit('-', 2),\n }\n\n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n pending_testing_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n return\n\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n if build.update.status == UpdateStatus.pending:\n log.info(\n f\"Build, active update for {bnvr} exists already \"\n \"in Pending, moving it along.\")\n build.update.status = UpdateStatus.testing\n build.update.request = None\n dbsession.add(build)\n if config.get('test_gating.required'):\n log.debug(\n 'Test gating is required, marking the update as waiting on test '\n 'gating and updating it from Greenwave to get the real status.')\n build.update.test_gating_status = TestGatingStatus.waiting\n build.update.update_test_gating_status()\n dbsession.commit()\n else:\n log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n\n if not build:\n log.debug(f\"Build for {bnvr} doesn't exist yet, creating.\")\n\n # Package.get_or_create() infers content type already\n log.debug(\"Getting/creating related package object.\")\n pkg = Package.get_or_create(rbuildinfo)\n\n log.debug(\"Creating build object, adding it to the DB.\")\n build = bcls(nvr=bnvr, package=pkg)\n dbsession.add(build)\n\n owner_name = kbuildinfo['owner_name']\n user = User.get(owner_name)\n if not user:\n log.debug(f\"Creating bodhi user for '{owner_name}'.\")\n # Leave email, groups blank, these will be filled\n # in or updated when they log into Bodhi next time, see\n # bodhi.server.security:remember_me().\n user = User(name=owner_name)\n dbsession.add(user)\n\n log.debug(f\"Creating new update for {bnvr}.\")\n update = Update(\n release=rel,\n builds=[build],\n notes=f\"Automatic update for {bnvr}.\",\n type=UpdateType.unspecified,\n stable_karma=3,\n unstable_karma=-3,\n autokarma=False,\n user=user,\n status=UpdateStatus.testing,\n )\n\n # Comment on the update that it was automatically created.\n update.comment(\n dbsession,\n str(\"This update was automatically created\"),\n author=\"bodhi\",\n )\n\n if config.get('test_gating.required'):\n log.debug(\n 'Test gating required is enforced, marking the update as '\n 'waiting on test gating and updating it from Greenwave to '\n 'get the real status.')\n update.test_gating_status = TestGatingStatus.waiting\n update.update_test_gating_status()\n\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n\n log.debug(\"Committing changes to the database.\")\n dbsession.commit()\n", "path": "bodhi/server/consumers/automatic_updates.py"}, {"content": "# Copyright \u00a9 2016-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"signed handler\".\n\nThis module is responsible for marking builds as \"signed\" when they get moved\nfrom the pending-signing to pending-updates-testing tag by RoboSignatory.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\nfrom sqlalchemy import func\n\nfrom bodhi.server.models import Build, UpdateStatus\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass SignedHandler(object):\n \"\"\"\n The Bodhi Signed Handler.\n\n A fedora-messaging listener waiting for messages from koji about builds being tagged.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the SignedHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"\n Handle messages arriving with the configured topic.\n\n This marks a build as signed if it is assigned to the pending testing release tag.\n\n Example message format::\n {\n 'body': {\n 'build_id': 442562,\n 'name': 'colord',\n 'tag_id': 214,\n 'instance': 's390',\n 'tag': 'f26-updates-testing-pending',\n 'user': 'sharkcz',\n 'version': '1.3.4',\n 'owner': 'sharkcz',\n 'release': '1.fc26'\n },\n }\n\n The message can contain additional keys.\n\n Duplicate messages: this method is idempotent.\n\n Args:\n message: The incoming message in the format described above.\n \"\"\"\n message = message.body\n build_nvr = '%(name)s-%(version)s-%(release)s' % message\n tag = message['tag']\n\n log.info(\"%s tagged into %s\" % (build_nvr, tag))\n\n with self.db_factory():\n build = Build.get(build_nvr)\n if not build:\n log.info(\"Build was not submitted, skipping\")\n return\n\n if not build.release:\n log.info('Build is not assigned to release, skipping')\n return\n\n if build.update.from_tag:\n koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)\n if tag != koji_testing_tag:\n log.info(\"Tag is not testing side tag, skipping\")\n return\n else:\n if build.release.pending_testing_tag != tag:\n log.info(\"Tag is not pending_testing tag, skipping\")\n return\n\n if build.signed:\n log.info(\"Build was already marked as signed (maybe a duplicate message)\")\n return\n\n # This build was moved into the pending_testing tag for the applicable release, which\n # is done by RoboSignatory to indicate that the build has been correctly signed and\n # written out. Mark it as such.\n log.info(\"Build has been signed, marking\")\n build.signed = True\n log.info(\"Build %s has been marked as signed\" % build_nvr)\n\n # If every build in update is signed change status to testing\n if build.update.from_tag and build.update.signed():\n log.info(\"Every build in update is signed, set status to testing\")\n\n build.update.status = UpdateStatus.testing\n build.update.date_testing = func.current_timestamp()\n build.update.request = None\n log.info(f\"Update {build.update.display_name} status has been set to testing\")\n", "path": "bodhi/server/consumers/signed.py"}]} | 3,835 | 1,021 |
gh_patches_debug_3628 | rasdani/github-patches | git_diff | coala__coala-1954 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Annotations: handling exceptions
When writing custom annotation, user should be able to access exceptions, that it's raising, to debug his function. Currently all that he gets is the information that something went wrong, e. g.
```
[WARNING][14:09:44] The bear ClangComplexityBear cannot be executed. Unable to convert parameter 'max_complexity' into type <function func at 0x7fa1b4d12d90>
```
This can be pretty easy fixed, by specifying exception [here](https://github.com/coala-analyzer/coala/blob/master/coalib/settings/FunctionMetadata.py#L105) and specifying in the docs what exception should annotations be allowed to raise.
This is related to https://github.com/coala-analyzer/coala/issues/1219 discussion. I think, maybe we need to figure out a better way to use exceptions so that there will be no need in catching broad ones, because they can hide bugs or other useful information.
</issue>
<code>
[start of coalib/settings/FunctionMetadata.py]
1 from collections import OrderedDict
2 from copy import copy
3 from inspect import getfullargspec, ismethod
4
5 from coalib.settings.DocumentationComment import DocumentationComment
6
7
8 class FunctionMetadata:
9 str_nodesc = "No description given."
10 str_optional = "Optional, defaults to '{}'."
11
12 def __init__(self,
13 name,
14 desc="",
15 retval_desc="",
16 non_optional_params=None,
17 optional_params=None,
18 omit=frozenset()):
19 """
20 Creates the FunctionMetadata object.
21
22 :param name: The name of the function.
23 :param desc: The description of the function.
24 :param retval_desc: The retval description of the function.
25 :param non_optional_params: A dict containing the name of non optional
26 parameters as the key and a tuple of a
27 description and the python annotation. To
28 preserve the order, use OrderedDict.
29 :param optional_params: A dict containing the name of optional
30 parameters as the key and a tuple
31 of a description, the python annotation and
32 the default value. To preserve the order,
33 use OrderedDict.
34 :param omit: A set of parameters to omit.
35 """
36 if non_optional_params is None:
37 non_optional_params = OrderedDict()
38 if optional_params is None:
39 optional_params = OrderedDict()
40
41 self.name = name
42 self.desc = desc
43 self.retval_desc = retval_desc
44 self._non_optional_params = non_optional_params
45 self._optional_params = optional_params
46 self.omit = set(omit)
47
48 def _filter_out_omitted(self, params):
49 """
50 Filters out parameters that are to omit. This is a helper method for
51 the param related properties.
52
53 :param params: The parameter dictionary to filter.
54 :return: The filtered dictionary.
55 """
56 return OrderedDict(filter(lambda p: p[0] not in self.omit,
57 tuple(params.items())))
58
59 @property
60 def non_optional_params(self):
61 """
62 Retrieves a dict containing the name of non optional parameters as the
63 key and a tuple of a description and the python annotation. Values that
64 are present in self.omit will be omitted.
65 """
66 return self._filter_out_omitted(self._non_optional_params)
67
68 @property
69 def optional_params(self):
70 """
71 Retrieves a dict containing the name of optional parameters as the key
72 and a tuple of a description, the python annotation and the default
73 value. Values that are present in self.omit will be omitted.
74 """
75 return self._filter_out_omitted(self._optional_params)
76
77 def create_params_from_section(self, section):
78 """
79 Create a params dictionary for this function that holds all values the
80 function needs plus optional ones that are available.
81
82 :param section: The section to retrieve the values from.
83 :return: The params dictionary.
84 """
85 params = {}
86
87 for param in self.non_optional_params:
88 _, annotation = self.non_optional_params[param]
89 params[param] = self._get_param(param, section, annotation)
90
91 for param in self.optional_params:
92 if param in section:
93 _, annotation, _ = self.optional_params[param]
94 params[param] = self._get_param(param, section, annotation)
95
96 return params
97
98 @staticmethod
99 def _get_param(param, section, annotation):
100 if annotation is None:
101 annotation = lambda x: x
102
103 try:
104 return annotation(section[param])
105 except:
106 raise ValueError("Unable to convert parameter {} into type "
107 "{}.".format(repr(param), annotation))
108
109 @classmethod
110 def from_function(cls, func, omit=frozenset()):
111 """
112 Creates a FunctionMetadata object from a function. Please note that any
113 variable argument lists are not supported. If you do not want the
114 first (usual named 'self') argument to appear please pass the method of
115 an actual INSTANCE of a class; passing the method of the class isn't
116 enough. Alternatively you can add "self" to the omit set.
117
118 :param func: The function. If __metadata__ of the unbound function is
119 present it will be copied and used, otherwise it will be
120 generated.
121 :param omit: A set of parameter names that are to be ignored.
122 :return: The FunctionMetadata object corresponding to the given
123 function.
124 """
125 if hasattr(func, "__metadata__"):
126 metadata = copy(func.__metadata__)
127 metadata.omit = omit
128 return metadata
129
130 doc = func.__doc__ or ""
131 doc_comment = DocumentationComment.from_docstring(doc)
132
133 non_optional_params = OrderedDict()
134 optional_params = OrderedDict()
135
136 argspec = getfullargspec(func)
137 args = argspec.args or ()
138 defaults = argspec.defaults or ()
139 num_non_defaults = len(args) - len(defaults)
140 for i, arg in enumerate(args):
141 # Implicit self argument or omitted explicitly
142 if i < 1 and ismethod(func):
143 continue
144
145 if i < num_non_defaults:
146 non_optional_params[arg] = (
147 doc_comment.param_dict.get(arg, cls.str_nodesc),
148 argspec.annotations.get(arg, None))
149 else:
150 optional_params[arg] = (
151 doc_comment.param_dict.get(arg, cls.str_nodesc) + " (" +
152 cls.str_optional.format(str(defaults[i-num_non_defaults]))
153 + ")",
154 argspec.annotations.get(arg, None),
155 defaults[i-num_non_defaults])
156
157 return cls(name=func.__name__,
158 desc=doc_comment.desc,
159 retval_desc=doc_comment.retval_desc,
160 non_optional_params=non_optional_params,
161 optional_params=optional_params,
162 omit=omit)
163
[end of coalib/settings/FunctionMetadata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/settings/FunctionMetadata.py b/coalib/settings/FunctionMetadata.py
--- a/coalib/settings/FunctionMetadata.py
+++ b/coalib/settings/FunctionMetadata.py
@@ -102,7 +102,7 @@
try:
return annotation(section[param])
- except:
+ except (TypeError, ValueError):
raise ValueError("Unable to convert parameter {} into type "
"{}.".format(repr(param), annotation))
| {"golden_diff": "diff --git a/coalib/settings/FunctionMetadata.py b/coalib/settings/FunctionMetadata.py\n--- a/coalib/settings/FunctionMetadata.py\n+++ b/coalib/settings/FunctionMetadata.py\n@@ -102,7 +102,7 @@\n \n try:\n return annotation(section[param])\n- except:\n+ except (TypeError, ValueError):\n raise ValueError(\"Unable to convert parameter {} into type \"\n \"{}.\".format(repr(param), annotation))\n", "issue": "Annotations: handling exceptions\nWhen writing custom annotation, user should be able to access exceptions, that it's raising, to debug his function. Currently all that he gets is the information that something went wrong, e. g.\n\n```\n[WARNING][14:09:44] The bear ClangComplexityBear cannot be executed. Unable to convert parameter 'max_complexity' into type <function func at 0x7fa1b4d12d90>\n```\n\nThis can be pretty easy fixed, by specifying exception [here](https://github.com/coala-analyzer/coala/blob/master/coalib/settings/FunctionMetadata.py#L105) and specifying in the docs what exception should annotations be allowed to raise.\n\nThis is related to https://github.com/coala-analyzer/coala/issues/1219 discussion. I think, maybe we need to figure out a better way to use exceptions so that there will be no need in catching broad ones, because they can hide bugs or other useful information.\n\n", "before_files": [{"content": "from collections import OrderedDict\nfrom copy import copy\nfrom inspect import getfullargspec, ismethod\n\nfrom coalib.settings.DocumentationComment import DocumentationComment\n\n\nclass FunctionMetadata:\n str_nodesc = \"No description given.\"\n str_optional = \"Optional, defaults to '{}'.\"\n\n def __init__(self,\n name,\n desc=\"\",\n retval_desc=\"\",\n non_optional_params=None,\n optional_params=None,\n omit=frozenset()):\n \"\"\"\n Creates the FunctionMetadata object.\n\n :param name: The name of the function.\n :param desc: The description of the function.\n :param retval_desc: The retval description of the function.\n :param non_optional_params: A dict containing the name of non optional\n parameters as the key and a tuple of a\n description and the python annotation. To\n preserve the order, use OrderedDict.\n :param optional_params: A dict containing the name of optional\n parameters as the key and a tuple\n of a description, the python annotation and\n the default value. To preserve the order,\n use OrderedDict.\n :param omit: A set of parameters to omit.\n \"\"\"\n if non_optional_params is None:\n non_optional_params = OrderedDict()\n if optional_params is None:\n optional_params = OrderedDict()\n\n self.name = name\n self.desc = desc\n self.retval_desc = retval_desc\n self._non_optional_params = non_optional_params\n self._optional_params = optional_params\n self.omit = set(omit)\n\n def _filter_out_omitted(self, params):\n \"\"\"\n Filters out parameters that are to omit. This is a helper method for\n the param related properties.\n\n :param params: The parameter dictionary to filter.\n :return: The filtered dictionary.\n \"\"\"\n return OrderedDict(filter(lambda p: p[0] not in self.omit,\n tuple(params.items())))\n\n @property\n def non_optional_params(self):\n \"\"\"\n Retrieves a dict containing the name of non optional parameters as the\n key and a tuple of a description and the python annotation. Values that\n are present in self.omit will be omitted.\n \"\"\"\n return self._filter_out_omitted(self._non_optional_params)\n\n @property\n def optional_params(self):\n \"\"\"\n Retrieves a dict containing the name of optional parameters as the key\n and a tuple of a description, the python annotation and the default\n value. Values that are present in self.omit will be omitted.\n \"\"\"\n return self._filter_out_omitted(self._optional_params)\n\n def create_params_from_section(self, section):\n \"\"\"\n Create a params dictionary for this function that holds all values the\n function needs plus optional ones that are available.\n\n :param section: The section to retrieve the values from.\n :return: The params dictionary.\n \"\"\"\n params = {}\n\n for param in self.non_optional_params:\n _, annotation = self.non_optional_params[param]\n params[param] = self._get_param(param, section, annotation)\n\n for param in self.optional_params:\n if param in section:\n _, annotation, _ = self.optional_params[param]\n params[param] = self._get_param(param, section, annotation)\n\n return params\n\n @staticmethod\n def _get_param(param, section, annotation):\n if annotation is None:\n annotation = lambda x: x\n\n try:\n return annotation(section[param])\n except:\n raise ValueError(\"Unable to convert parameter {} into type \"\n \"{}.\".format(repr(param), annotation))\n\n @classmethod\n def from_function(cls, func, omit=frozenset()):\n \"\"\"\n Creates a FunctionMetadata object from a function. Please note that any\n variable argument lists are not supported. If you do not want the\n first (usual named 'self') argument to appear please pass the method of\n an actual INSTANCE of a class; passing the method of the class isn't\n enough. Alternatively you can add \"self\" to the omit set.\n\n :param func: The function. If __metadata__ of the unbound function is\n present it will be copied and used, otherwise it will be\n generated.\n :param omit: A set of parameter names that are to be ignored.\n :return: The FunctionMetadata object corresponding to the given\n function.\n \"\"\"\n if hasattr(func, \"__metadata__\"):\n metadata = copy(func.__metadata__)\n metadata.omit = omit\n return metadata\n\n doc = func.__doc__ or \"\"\n doc_comment = DocumentationComment.from_docstring(doc)\n\n non_optional_params = OrderedDict()\n optional_params = OrderedDict()\n\n argspec = getfullargspec(func)\n args = argspec.args or ()\n defaults = argspec.defaults or ()\n num_non_defaults = len(args) - len(defaults)\n for i, arg in enumerate(args):\n # Implicit self argument or omitted explicitly\n if i < 1 and ismethod(func):\n continue\n\n if i < num_non_defaults:\n non_optional_params[arg] = (\n doc_comment.param_dict.get(arg, cls.str_nodesc),\n argspec.annotations.get(arg, None))\n else:\n optional_params[arg] = (\n doc_comment.param_dict.get(arg, cls.str_nodesc) + \" (\" +\n cls.str_optional.format(str(defaults[i-num_non_defaults]))\n + \")\",\n argspec.annotations.get(arg, None),\n defaults[i-num_non_defaults])\n\n return cls(name=func.__name__,\n desc=doc_comment.desc,\n retval_desc=doc_comment.retval_desc,\n non_optional_params=non_optional_params,\n optional_params=optional_params,\n omit=omit)\n", "path": "coalib/settings/FunctionMetadata.py"}]} | 2,361 | 102 |
gh_patches_debug_806 | rasdani/github-patches | git_diff | feast-dev__feast-1742 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dependency PyYAML 5.3.* has vulnerability issues
## Expected Behavior
According to [CVE-2020-14343](https://nvd.nist.gov/vuln/detail/CVE-2020-14343):
> A vulnerability was discovered in the PyYAML library in versions before 5.4, where it is susceptible to arbitrary code execution when it processes untrusted YAML files through the full_load method or with the FullLoader loader. Applications that use the library to process untrusted input may be vulnerable to this flaw. This flaw allows an attacker to execute arbitrary code on the system by abusing the python/object/new constructor. This flaw is due to an incomplete fix for CVE-2020-1747. See CVE-2020-14343.
## Current Behavior
Feast Python SDK requires `PyYAML==5.3.*` version.
This not only affects Feast, but also any app depending on it, since dependencies are shared.
## Steps to reproduce
N/A
### Specifications
N/A
## Possible Solution
Bump PyYAML to a ">=5.4" version.
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import glob
15 import os
16 import re
17 import subprocess
18
19 from distutils.cmd import Command
20 from setuptools import find_packages
21
22 try:
23 from setuptools import setup
24 from setuptools.command.install import install
25 from setuptools.command.develop import develop
26 from setuptools.command.egg_info import egg_info
27 from setuptools.command.sdist import sdist
28 from setuptools.command.build_py import build_py
29 except ImportError:
30 from distutils.core import setup
31 from distutils.command.install import install
32 from distutils.command.build_py import build_py
33
34 NAME = "feast"
35 DESCRIPTION = "Python SDK for Feast"
36 URL = "https://github.com/feast-dev/feast"
37 AUTHOR = "Feast"
38 REQUIRES_PYTHON = ">=3.7.0"
39
40 REQUIRED = [
41 "Click==7.*",
42 "colorama>=0.3.9",
43 "fastavro>=1.1.0",
44 "google-api-core>=1.23.0",
45 "googleapis-common-protos==1.52.*",
46 "grpcio>=1.34.0",
47 "Jinja2>=2.0.0",
48 "jsonschema",
49 "mmh3",
50 "pandas>=1.0.0",
51 "pandavro==1.5.*",
52 "protobuf>=3.10",
53 "pyarrow>=2.0.0",
54 "pydantic>=1.0.0",
55 "PyYAML==5.3.*",
56 "tabulate==0.8.*",
57 "tenacity>=7.*",
58 "toml==0.10.*",
59 "tqdm==4.*",
60 ]
61
62 GCP_REQUIRED = [
63 "google-cloud-bigquery>=2.0.*",
64 "google-cloud-bigquery-storage >= 2.0.0",
65 "google-cloud-datastore>=2.1.*",
66 "google-cloud-storage>=1.34.*",
67 "google-cloud-core==1.4.*",
68 ]
69
70 REDIS_REQUIRED = [
71 "redis-py-cluster==2.1.2",
72 ]
73
74 AWS_REQUIRED = [
75 "boto3==1.17.*",
76 ]
77
78 CI_REQUIRED = [
79 "cryptography==3.3.2",
80 "flake8",
81 "black==19.10b0",
82 "isort>=5",
83 "grpcio-tools==1.34.0",
84 "grpcio-testing==1.34.0",
85 "mock==2.0.0",
86 "moto",
87 "mypy==0.790",
88 "mypy-protobuf==1.24",
89 "avro==1.10.0",
90 "gcsfs",
91 "urllib3>=1.25.4",
92 "pytest==6.0.0",
93 "pytest-cov",
94 "pytest-xdist",
95 "pytest-lazy-fixture==0.6.3",
96 "pytest-timeout==1.4.2",
97 "pytest-ordering==0.6.*",
98 "pytest-mock==1.10.4",
99 "Sphinx!=4.0.0",
100 "sphinx-rtd-theme",
101 "adlfs==0.5.9",
102 "firebase-admin==4.5.2",
103 "pre-commit",
104 "assertpy==1.1",
105 "google-cloud-bigquery>=2.0.*",
106 "google-cloud-bigquery-storage >= 2.0.0",
107 "google-cloud-datastore>=2.1.*",
108 "google-cloud-storage>=1.20.*",
109 "google-cloud-core==1.4.*",
110 "redis-py-cluster==2.1.2",
111 "boto3==1.17.*",
112 ]
113
114
115 # README file from Feast repo root directory
116 repo_root = (
117 subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
118 .communicate()[0]
119 .rstrip()
120 .decode("utf-8")
121 )
122 README_FILE = os.path.join(repo_root, "README.md")
123 with open(README_FILE, "r") as f:
124 LONG_DESCRIPTION = f.read()
125
126 # Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
127 # Regex modified from default tag regex in:
128 # https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
129 TAG_REGEX = re.compile(
130 r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
131 )
132
133
134 class BuildProtoCommand(Command):
135 description = "Builds the proto files into python files."
136
137 def initialize_options(self):
138 self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
139 self.proto_folder = os.path.join(repo_root, "protos")
140 self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
141 self.sub_folders = ["core", "serving", "types", "storage"]
142
143 def finalize_options(self):
144 pass
145
146 def _generate_protos(self, path):
147 proto_files = glob.glob(os.path.join(self.proto_folder, path))
148
149 subprocess.check_call(self.protoc + [
150 '-I', self.proto_folder,
151 '--python_out', self.this_package,
152 '--grpc_python_out', self.this_package,
153 '--mypy_out', self.this_package] + proto_files)
154
155 def run(self):
156 for sub_folder in self.sub_folders:
157 self._generate_protos(f'feast/{sub_folder}/*.proto')
158
159 from pathlib import Path
160
161 for path in Path('feast/protos').rglob('*.py'):
162 for folder in self.sub_folders:
163 # Read in the file
164 with open(path, 'r') as file:
165 filedata = file.read()
166
167 # Replace the target string
168 filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
169
170 # Write the file out again
171 with open(path, 'w') as file:
172 file.write(filedata)
173
174
175 class BuildCommand(build_py):
176 """Custom build command."""
177
178 def run(self):
179 self.run_command('build_proto')
180 build_py.run(self)
181
182
183 class DevelopCommand(develop):
184 """Custom develop command."""
185
186 def run(self):
187 self.run_command('build_proto')
188 develop.run(self)
189
190
191 setup(
192 name=NAME,
193 author=AUTHOR,
194 description=DESCRIPTION,
195 long_description=LONG_DESCRIPTION,
196 long_description_content_type="text/markdown",
197 python_requires=REQUIRES_PYTHON,
198 url=URL,
199 packages=find_packages(exclude=("tests",)),
200 install_requires=REQUIRED,
201 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements
202 # Install dev requirements with: pip install -e .[dev]
203 extras_require={
204 "dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"],
205 "ci": CI_REQUIRED,
206 "gcp": GCP_REQUIRED,
207 "aws": AWS_REQUIRED,
208 "redis": REDIS_REQUIRED,
209 },
210 include_package_data=True,
211 license="Apache",
212 classifiers=[
213 # Trove classifiers
214 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
215 "License :: OSI Approved :: Apache Software License",
216 "Programming Language :: Python",
217 "Programming Language :: Python :: 3",
218 "Programming Language :: Python :: 3.7",
219 ],
220 entry_points={"console_scripts": ["feast=feast.cli:cli"]},
221 use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
222 setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.34.0", "mypy-protobuf", "sphinx!=4.0.0"],
223 package_data={
224 "": [
225 "protos/feast/**/*.proto",
226 "protos/feast/third_party/grpc/health/v1/*.proto",
227 "protos/tensorflow_metadata/proto/v0/*.proto",
228 "feast/protos/feast/**/*.py",
229 "tensorflow_metadata/proto/v0/*.py"
230 ],
231 },
232 cmdclass={
233 "build_proto": BuildProtoCommand,
234 "build_py": BuildCommand,
235 "develop": DevelopCommand,
236 },
237 )
238
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -52,7 +52,7 @@
"protobuf>=3.10",
"pyarrow>=2.0.0",
"pydantic>=1.0.0",
- "PyYAML==5.3.*",
+ "PyYAML>=5.4.*",
"tabulate==0.8.*",
"tenacity>=7.*",
"toml==0.10.*",
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -52,7 +52,7 @@\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n- \"PyYAML==5.3.*\",\n+ \"PyYAML>=5.4.*\",\n \"tabulate==0.8.*\",\n \"tenacity>=7.*\",\n \"toml==0.10.*\",\n", "issue": "Dependency PyYAML 5.3.* has vulnerability issues\n## Expected Behavior \r\n\r\nAccording to [CVE-2020-14343](https://nvd.nist.gov/vuln/detail/CVE-2020-14343):\r\n\r\n> A vulnerability was discovered in the PyYAML library in versions before 5.4, where it is susceptible to arbitrary code execution when it processes untrusted YAML files through the full_load method or with the FullLoader loader. Applications that use the library to process untrusted input may be vulnerable to this flaw. This flaw allows an attacker to execute arbitrary code on the system by abusing the python/object/new constructor. This flaw is due to an incomplete fix for CVE-2020-1747. See CVE-2020-14343.\r\n\r\n## Current Behavior\r\n\r\nFeast Python SDK requires `PyYAML==5.3.*` version.\r\n\r\nThis not only affects Feast, but also any app depending on it, since dependencies are shared.\r\n\r\n## Steps to reproduce\r\n\r\nN/A\r\n\r\n### Specifications\r\n\r\nN/A\r\n\r\n## Possible Solution\r\n\r\nBump PyYAML to a \">=5.4\" version.\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=1.1.0\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.34.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"tenacity>=7.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.34.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nREDIS_REQUIRED = [\n \"redis-py-cluster==2.1.2\",\n]\n\nAWS_REQUIRED = [\n \"boto3==1.17.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.34.0\",\n \"grpcio-testing==1.34.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx!=4.0.0\",\n \"sphinx-rtd-theme\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"redis-py-cluster==2.1.2\",\n \"boto3==1.17.*\",\n]\n\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx!=4.0.0\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py"}]} | 3,423 | 125 |
gh_patches_debug_1265 | rasdani/github-patches | git_diff | webkom__lego-1505 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add end_time of an event when getting all events with get request
I want to be able to get the end time of an event when getting all events. I know I can get the end time when getting a specific event, but it is a bit cumbersome.
</issue>
<code>
[start of lego/apps/events/serializers/events.py]
1 from django.db import transaction
2 from rest_framework import serializers
3 from rest_framework.fields import BooleanField, CharField
4
5 from lego.apps.comments.serializers import CommentSerializer
6 from lego.apps.companies.fields import CompanyField
7 from lego.apps.companies.models import Company
8 from lego.apps.content.fields import ContentSerializerField
9 from lego.apps.events.constants import PRESENT
10 from lego.apps.events.fields import ActivationTimeField, SpotsLeftField
11 from lego.apps.events.models import Event, Pool
12 from lego.apps.events.serializers.pools import (
13 PoolAdministrateSerializer,
14 PoolCreateAndUpdateSerializer,
15 PoolReadAuthSerializer,
16 PoolReadSerializer,
17 )
18 from lego.apps.events.serializers.registrations import (
19 RegistrationReadDetailedSerializer,
20 RegistrationReadSerializer,
21 )
22 from lego.apps.files.fields import ImageField
23 from lego.apps.tags.serializers import TagSerializerMixin
24 from lego.apps.users.constants import GROUP_GRADE
25 from lego.apps.users.fields import AbakusGroupField
26 from lego.apps.users.models import AbakusGroup
27 from lego.apps.users.serializers.users import PublicUserSerializer
28 from lego.utils.serializers import BasisModelSerializer
29
30
31 class EventPublicSerializer(BasisModelSerializer):
32
33 thumbnail = ImageField(
34 source="cover",
35 required=False,
36 options={"height": 500, "width": 500, "smart": True},
37 )
38
39 class Meta:
40 model = Event
41 fields = ("id", "title", "description", "event_type", "location", "thumbnail")
42 read_only = True
43
44
45 class EventReadSerializer(TagSerializerMixin, BasisModelSerializer):
46 company = CompanyField(queryset=Company.objects.all())
47 cover = ImageField(required=False, options={"height": 500})
48 thumbnail = ImageField(
49 source="cover",
50 required=False,
51 options={"height": 500, "width": 500, "smart": True},
52 )
53 activation_time = ActivationTimeField()
54
55 class Meta:
56 model = Event
57 fields = (
58 "id",
59 "title",
60 "description",
61 "cover",
62 "event_type",
63 "location",
64 "start_time",
65 "thumbnail",
66 "total_capacity",
67 "company",
68 "registration_count",
69 "tags",
70 "activation_time",
71 )
72 read_only = True
73
74
75 class EventReadDetailedSerializer(TagSerializerMixin, BasisModelSerializer):
76 comments = CommentSerializer(read_only=True, many=True)
77 comment_target = CharField(read_only=True)
78 cover = ImageField(required=False, options={"height": 500})
79 company = CompanyField(queryset=Company.objects.all())
80 responsible_group = AbakusGroupField(
81 queryset=AbakusGroup.objects.all(), required=False, allow_null=True
82 )
83 pools = PoolReadSerializer(many=True)
84 active_capacity = serializers.ReadOnlyField()
85 text = ContentSerializerField()
86 created_by = PublicUserSerializer()
87
88 registration_close_time = serializers.DateTimeField(read_only=True)
89
90 class Meta:
91 model = Event
92 fields = (
93 "id",
94 "title",
95 "description",
96 "cover",
97 "text",
98 "event_type",
99 "location",
100 "comments",
101 "comment_target",
102 "start_time",
103 "end_time",
104 "merge_time",
105 "pools",
106 "registration_close_time",
107 "registration_deadline_hours",
108 "unregistration_deadline",
109 "company",
110 "responsible_group",
111 "active_capacity",
112 "feedback_description",
113 "feedback_required",
114 "is_priced",
115 "price_member",
116 "price_guest",
117 "use_stripe",
118 "payment_due_date",
119 "use_captcha",
120 "waiting_registration_count",
121 "tags",
122 "is_merged",
123 "heed_penalties",
124 "created_by",
125 "is_abakom_only",
126 "registration_count",
127 "survey",
128 "use_consent",
129 )
130 read_only = True
131
132
133 class EventForSurveySerializer(EventReadSerializer):
134 attended_count = serializers.SerializerMethodField()
135
136 class Meta:
137 model = Event
138 fields = EventReadSerializer.Meta.fields + (
139 "registration_count",
140 "waiting_registration_count",
141 "attended_count",
142 )
143 read_only = True
144
145 def get_attended_count(self, event):
146 return event.registrations.filter(presence=PRESENT).count()
147
148
149 class EventUserRegSerializer(EventReadSerializer):
150 user_reg = serializers.SerializerMethodField()
151
152 class Meta:
153 model = Event
154 fields = EventReadSerializer.Meta.fields + ("user_reg",)
155 read_only = True
156
157 def get_user_reg(self, event):
158 return RegistrationReadSerializer(event.user_reg[0]).data
159
160
161 class EventReadUserDetailedSerializer(EventReadDetailedSerializer):
162 """ User specfic event serializer that appends data based on request.user """
163
164 activation_time = ActivationTimeField()
165 spots_left = SpotsLeftField()
166 price = serializers.SerializerMethodField()
167
168 class Meta(EventReadDetailedSerializer.Meta):
169 fields = EventReadDetailedSerializer.Meta.fields + (
170 "price",
171 "activation_time",
172 "spots_left",
173 )
174
175 def get_price(self, obj):
176 request = self.context.get("request", None)
177 if request:
178 return obj.get_price(user=request.user)
179
180
181 class EventReadAuthUserDetailedSerializer(EventReadUserDetailedSerializer):
182 pools = PoolReadAuthSerializer(many=True)
183 waiting_registrations = RegistrationReadSerializer(many=True)
184 unanswered_surveys = serializers.SerializerMethodField()
185
186 class Meta(EventReadUserDetailedSerializer.Meta):
187 fields = EventReadUserDetailedSerializer.Meta.fields + (
188 "waiting_registrations",
189 "unanswered_surveys",
190 )
191
192 def get_unanswered_surveys(self, obj):
193 request = self.context.get("request", None)
194 return request.user.unanswered_surveys()
195
196
197 class EventAdministrateSerializer(EventReadSerializer):
198 pools = PoolAdministrateSerializer(many=True)
199 unregistered = RegistrationReadDetailedSerializer(many=True)
200 waiting_registrations = RegistrationReadDetailedSerializer(many=True)
201
202 class Meta(EventReadSerializer.Meta):
203 fields = EventReadSerializer.Meta.fields + (
204 "pools",
205 "unregistered",
206 "waiting_registrations",
207 "use_consent",
208 )
209
210
211 class EventCreateAndUpdateSerializer(TagSerializerMixin, BasisModelSerializer):
212 cover = ImageField(required=False, options={"height": 500})
213 responsible_group = AbakusGroupField(
214 queryset=AbakusGroup.objects.all(), required=False, allow_null=True
215 )
216 pools = PoolCreateAndUpdateSerializer(many=True, required=False)
217 text = ContentSerializerField()
218 is_abakom_only = BooleanField(required=False, default=False)
219
220 registration_close_time = serializers.DateTimeField(read_only=True)
221
222 class Meta:
223 model = Event
224 fields = (
225 "id",
226 "title",
227 "cover",
228 "description",
229 "text",
230 "company",
231 "responsible_group",
232 "feedback_description",
233 "feedback_required",
234 "event_type",
235 "location",
236 "is_priced",
237 "price_member",
238 "price_guest",
239 "use_stripe",
240 "payment_due_date",
241 "start_time",
242 "end_time",
243 "merge_time",
244 "use_captcha",
245 "tags",
246 "pools",
247 "unregistration_deadline",
248 "pinned",
249 "use_consent",
250 "heed_penalties",
251 "is_abakom_only",
252 "registration_deadline_hours",
253 "registration_close_time",
254 )
255
256 def validate(self, data):
257 """
258 Check that start is before finish.
259 """
260 if hasattr(data, "start_time") and hasattr(data, "end_time"):
261 if data["start_time"] > data["end_time"]:
262 raise serializers.ValidationError(
263 {
264 "end_time": "User does not have the required permissions for time travel"
265 }
266 )
267 return data
268
269 def create(self, validated_data):
270 pools = validated_data.pop("pools", [])
271 is_abakom_only = validated_data.pop("is_abakom_only", False)
272 with transaction.atomic():
273 event = super().create(validated_data)
274 for pool in pools:
275 permission_groups = pool.pop("permission_groups")
276 created_pool = Pool.objects.create(event=event, **pool)
277 created_pool.permission_groups.set(permission_groups)
278 event.set_abakom_only(is_abakom_only)
279 return event
280
281 def update(self, instance, validated_data):
282 pools = validated_data.pop("pools", None)
283 is_abakom_only = validated_data.pop("is_abakom_only", False)
284 with transaction.atomic():
285 if pools is not None:
286 existing_pools = list(instance.pools.all().values_list("id", flat=True))
287 for pool in pools:
288 pool_id = pool.get("id", None)
289 if pool_id in existing_pools:
290 existing_pools.remove(pool_id)
291 permission_groups = pool.pop("permission_groups")
292 created_pool = Pool.objects.update_or_create(
293 event=instance,
294 id=pool_id,
295 defaults={
296 "name": pool.get("name"),
297 "capacity": pool.get("capacity", 0),
298 "activation_date": pool.get("activation_date"),
299 },
300 )[0]
301 created_pool.permission_groups.set(permission_groups)
302 for pool_id in existing_pools:
303 Pool.objects.get(id=pool_id).delete()
304 instance.set_abakom_only(is_abakom_only)
305 return super().update(instance, validated_data)
306
307
308 class EventSearchSerializer(serializers.ModelSerializer):
309 cover = ImageField(required=False, options={"height": 500})
310 thumbnail = ImageField(
311 source="cover",
312 required=False,
313 options={"height": 500, "width": 500, "smart": True},
314 )
315 text = ContentSerializerField()
316 activation_time = ActivationTimeField()
317
318 class Meta:
319 model = Event
320 fields = (
321 "id",
322 "title",
323 "description",
324 "cover",
325 "text",
326 "event_type",
327 "location",
328 "start_time",
329 "thumbnail",
330 "end_time",
331 "total_capacity",
332 "company",
333 "registration_count",
334 "tags",
335 "activation_time",
336 "pinned",
337 )
338 read_only = True
339
340
341 def populate_event_registration_users_with_grade(event_dict):
342 """
343 Populates every user in registrations in a serialized event with `grade`.
344 Mainly used in the administrate endpoint
345 :param event_dict:
346 :return:
347 """
348 grades = AbakusGroup.objects.filter(type=GROUP_GRADE).values("id", "name")
349 grade_dict = {item["id"]: item for item in grades}
350 for pool in event_dict.get("pools", []):
351 for registration in pool.get("registrations", []):
352 user = registration.get("user", {})
353 abakus_groups = user.get("abakus_groups", [])
354 user["grade"] = None
355 for id in abakus_groups:
356 grade = grade_dict.get(id, None)
357 if grade:
358 user["grade"] = grade
359 return event_dict
360
[end of lego/apps/events/serializers/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lego/apps/events/serializers/events.py b/lego/apps/events/serializers/events.py
--- a/lego/apps/events/serializers/events.py
+++ b/lego/apps/events/serializers/events.py
@@ -62,6 +62,7 @@
"event_type",
"location",
"start_time",
+ "end_time",
"thumbnail",
"total_capacity",
"company",
| {"golden_diff": "diff --git a/lego/apps/events/serializers/events.py b/lego/apps/events/serializers/events.py\n--- a/lego/apps/events/serializers/events.py\n+++ b/lego/apps/events/serializers/events.py\n@@ -62,6 +62,7 @@\n \"event_type\",\n \"location\",\n \"start_time\",\n+ \"end_time\",\n \"thumbnail\",\n \"total_capacity\",\n \"company\",\n", "issue": "Add end_time of an event when getting all events with get request\nI want to be able to get the end time of an event when getting all events. I know I can get the end time when getting a specific event, but it is a bit cumbersome.\n", "before_files": [{"content": "from django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.fields import BooleanField, CharField\n\nfrom lego.apps.comments.serializers import CommentSerializer\nfrom lego.apps.companies.fields import CompanyField\nfrom lego.apps.companies.models import Company\nfrom lego.apps.content.fields import ContentSerializerField\nfrom lego.apps.events.constants import PRESENT\nfrom lego.apps.events.fields import ActivationTimeField, SpotsLeftField\nfrom lego.apps.events.models import Event, Pool\nfrom lego.apps.events.serializers.pools import (\n PoolAdministrateSerializer,\n PoolCreateAndUpdateSerializer,\n PoolReadAuthSerializer,\n PoolReadSerializer,\n)\nfrom lego.apps.events.serializers.registrations import (\n RegistrationReadDetailedSerializer,\n RegistrationReadSerializer,\n)\nfrom lego.apps.files.fields import ImageField\nfrom lego.apps.tags.serializers import TagSerializerMixin\nfrom lego.apps.users.constants import GROUP_GRADE\nfrom lego.apps.users.fields import AbakusGroupField\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass EventPublicSerializer(BasisModelSerializer):\n\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n\n class Meta:\n model = Event\n fields = (\"id\", \"title\", \"description\", \"event_type\", \"location\", \"thumbnail\")\n read_only = True\n\n\nclass EventReadSerializer(TagSerializerMixin, BasisModelSerializer):\n company = CompanyField(queryset=Company.objects.all())\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"thumbnail\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n )\n read_only = True\n\n\nclass EventReadDetailedSerializer(TagSerializerMixin, BasisModelSerializer):\n comments = CommentSerializer(read_only=True, many=True)\n comment_target = CharField(read_only=True)\n cover = ImageField(required=False, options={\"height\": 500})\n company = CompanyField(queryset=Company.objects.all())\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolReadSerializer(many=True)\n active_capacity = serializers.ReadOnlyField()\n text = ContentSerializerField()\n created_by = PublicUserSerializer()\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"comments\",\n \"comment_target\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"pools\",\n \"registration_close_time\",\n \"registration_deadline_hours\",\n \"unregistration_deadline\",\n \"company\",\n \"responsible_group\",\n \"active_capacity\",\n \"feedback_description\",\n \"feedback_required\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"use_captcha\",\n \"waiting_registration_count\",\n \"tags\",\n \"is_merged\",\n \"heed_penalties\",\n \"created_by\",\n \"is_abakom_only\",\n \"registration_count\",\n \"survey\",\n \"use_consent\",\n )\n read_only = True\n\n\nclass EventForSurveySerializer(EventReadSerializer):\n attended_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\n \"registration_count\",\n \"waiting_registration_count\",\n \"attended_count\",\n )\n read_only = True\n\n def get_attended_count(self, event):\n return event.registrations.filter(presence=PRESENT).count()\n\n\nclass EventUserRegSerializer(EventReadSerializer):\n user_reg = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\"user_reg\",)\n read_only = True\n\n def get_user_reg(self, event):\n return RegistrationReadSerializer(event.user_reg[0]).data\n\n\nclass EventReadUserDetailedSerializer(EventReadDetailedSerializer):\n \"\"\" User specfic event serializer that appends data based on request.user \"\"\"\n\n activation_time = ActivationTimeField()\n spots_left = SpotsLeftField()\n price = serializers.SerializerMethodField()\n\n class Meta(EventReadDetailedSerializer.Meta):\n fields = EventReadDetailedSerializer.Meta.fields + (\n \"price\",\n \"activation_time\",\n \"spots_left\",\n )\n\n def get_price(self, obj):\n request = self.context.get(\"request\", None)\n if request:\n return obj.get_price(user=request.user)\n\n\nclass EventReadAuthUserDetailedSerializer(EventReadUserDetailedSerializer):\n pools = PoolReadAuthSerializer(many=True)\n waiting_registrations = RegistrationReadSerializer(many=True)\n unanswered_surveys = serializers.SerializerMethodField()\n\n class Meta(EventReadUserDetailedSerializer.Meta):\n fields = EventReadUserDetailedSerializer.Meta.fields + (\n \"waiting_registrations\",\n \"unanswered_surveys\",\n )\n\n def get_unanswered_surveys(self, obj):\n request = self.context.get(\"request\", None)\n return request.user.unanswered_surveys()\n\n\nclass EventAdministrateSerializer(EventReadSerializer):\n pools = PoolAdministrateSerializer(many=True)\n unregistered = RegistrationReadDetailedSerializer(many=True)\n waiting_registrations = RegistrationReadDetailedSerializer(many=True)\n\n class Meta(EventReadSerializer.Meta):\n fields = EventReadSerializer.Meta.fields + (\n \"pools\",\n \"unregistered\",\n \"waiting_registrations\",\n \"use_consent\",\n )\n\n\nclass EventCreateAndUpdateSerializer(TagSerializerMixin, BasisModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolCreateAndUpdateSerializer(many=True, required=False)\n text = ContentSerializerField()\n is_abakom_only = BooleanField(required=False, default=False)\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"cover\",\n \"description\",\n \"text\",\n \"company\",\n \"responsible_group\",\n \"feedback_description\",\n \"feedback_required\",\n \"event_type\",\n \"location\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"use_captcha\",\n \"tags\",\n \"pools\",\n \"unregistration_deadline\",\n \"pinned\",\n \"use_consent\",\n \"heed_penalties\",\n \"is_abakom_only\",\n \"registration_deadline_hours\",\n \"registration_close_time\",\n )\n\n def validate(self, data):\n \"\"\"\n Check that start is before finish.\n \"\"\"\n if hasattr(data, \"start_time\") and hasattr(data, \"end_time\"):\n if data[\"start_time\"] > data[\"end_time\"]:\n raise serializers.ValidationError(\n {\n \"end_time\": \"User does not have the required permissions for time travel\"\n }\n )\n return data\n\n def create(self, validated_data):\n pools = validated_data.pop(\"pools\", [])\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n event = super().create(validated_data)\n for pool in pools:\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.create(event=event, **pool)\n created_pool.permission_groups.set(permission_groups)\n event.set_abakom_only(is_abakom_only)\n return event\n\n def update(self, instance, validated_data):\n pools = validated_data.pop(\"pools\", None)\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n if pools is not None:\n existing_pools = list(instance.pools.all().values_list(\"id\", flat=True))\n for pool in pools:\n pool_id = pool.get(\"id\", None)\n if pool_id in existing_pools:\n existing_pools.remove(pool_id)\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.update_or_create(\n event=instance,\n id=pool_id,\n defaults={\n \"name\": pool.get(\"name\"),\n \"capacity\": pool.get(\"capacity\", 0),\n \"activation_date\": pool.get(\"activation_date\"),\n },\n )[0]\n created_pool.permission_groups.set(permission_groups)\n for pool_id in existing_pools:\n Pool.objects.get(id=pool_id).delete()\n instance.set_abakom_only(is_abakom_only)\n return super().update(instance, validated_data)\n\n\nclass EventSearchSerializer(serializers.ModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n text = ContentSerializerField()\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"thumbnail\",\n \"end_time\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n \"pinned\",\n )\n read_only = True\n\n\ndef populate_event_registration_users_with_grade(event_dict):\n \"\"\"\n Populates every user in registrations in a serialized event with `grade`.\n Mainly used in the administrate endpoint\n :param event_dict:\n :return:\n \"\"\"\n grades = AbakusGroup.objects.filter(type=GROUP_GRADE).values(\"id\", \"name\")\n grade_dict = {item[\"id\"]: item for item in grades}\n for pool in event_dict.get(\"pools\", []):\n for registration in pool.get(\"registrations\", []):\n user = registration.get(\"user\", {})\n abakus_groups = user.get(\"abakus_groups\", [])\n user[\"grade\"] = None\n for id in abakus_groups:\n grade = grade_dict.get(id, None)\n if grade:\n user[\"grade\"] = grade\n return event_dict\n", "path": "lego/apps/events/serializers/events.py"}]} | 3,977 | 97 |
gh_patches_debug_5099 | rasdani/github-patches | git_diff | translate__pootle-6747 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add current character count when entering translations
We're trying to use Pootle to translate metadata strings for an app in the iOS AppStore. The metadata includes individual messages for the app name, subtitle, description, privacy URL and so on and there are different limits on the number of characters allowed in each of them. For instance, an app's name can be no more than 30 characters.
When entering translations, it would be really helpful to see the current number of characters that the translation uses as you type to ensure that you're not exceeding the limit. This could maybe fit on the lower right corner of the input view. You currently have timeline / comment / raw on the left. Current character count could just be a small label that floats to the right on the same line.
# Environment (i.e. 'pootle --version', DB, OS, Browser):
Pootle 2.8.0
</issue>
<code>
[start of pootle/core/templatetags/core.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django import template
11 from django.utils.html import escapejs
12 from django.utils.safestring import mark_safe
13
14 from ..utils.json import jsonify
15
16
17 register = template.Library()
18
19
20 @register.filter
21 def to_js(value):
22 """Returns a string which leaves the value readily available for JS
23 consumption.
24 """
25 return mark_safe('JSON.parse("%s")' % escapejs(jsonify(value)))
26
27
28 @register.inclusion_tag('includes/formtable.html')
29 def formtable(formtable):
30 return dict(formtable=formtable)
31
[end of pootle/core/templatetags/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/core/templatetags/core.py b/pootle/core/templatetags/core.py
--- a/pootle/core/templatetags/core.py
+++ b/pootle/core/templatetags/core.py
@@ -25,6 +25,13 @@
return mark_safe('JSON.parse("%s")' % escapejs(jsonify(value)))
[email protected]
+def map_to_lengths(value):
+ """Maps a list value by replacing each element with its length.
+ """
+ return [len(e) for e in value]
+
+
@register.inclusion_tag('includes/formtable.html')
def formtable(formtable):
return dict(formtable=formtable)
| {"golden_diff": "diff --git a/pootle/core/templatetags/core.py b/pootle/core/templatetags/core.py\n--- a/pootle/core/templatetags/core.py\n+++ b/pootle/core/templatetags/core.py\n@@ -25,6 +25,13 @@\n return mark_safe('JSON.parse(\"%s\")' % escapejs(jsonify(value)))\n \n \[email protected]\n+def map_to_lengths(value):\n+ \"\"\"Maps a list value by replacing each element with its length.\n+ \"\"\"\n+ return [len(e) for e in value]\n+\n+\n @register.inclusion_tag('includes/formtable.html')\n def formtable(formtable):\n return dict(formtable=formtable)\n", "issue": "Add current character count when entering translations\nWe're trying to use Pootle to translate metadata strings for an app in the iOS AppStore. The metadata includes individual messages for the app name, subtitle, description, privacy URL and so on and there are different limits on the number of characters allowed in each of them. For instance, an app's name can be no more than 30 characters. \r\n\r\nWhen entering translations, it would be really helpful to see the current number of characters that the translation uses as you type to ensure that you're not exceeding the limit. This could maybe fit on the lower right corner of the input view. You currently have timeline / comment / raw on the left. Current character count could just be a small label that floats to the right on the same line.\r\n\r\n# Environment (i.e. 'pootle --version', DB, OS, Browser):\r\n\r\nPootle 2.8.0\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import template\nfrom django.utils.html import escapejs\nfrom django.utils.safestring import mark_safe\n\nfrom ..utils.json import jsonify\n\n\nregister = template.Library()\n\n\[email protected]\ndef to_js(value):\n \"\"\"Returns a string which leaves the value readily available for JS\n consumption.\n \"\"\"\n return mark_safe('JSON.parse(\"%s\")' % escapejs(jsonify(value)))\n\n\[email protected]_tag('includes/formtable.html')\ndef formtable(formtable):\n return dict(formtable=formtable)\n", "path": "pootle/core/templatetags/core.py"}]} | 979 | 157 |
gh_patches_debug_3964 | rasdani/github-patches | git_diff | learningequality__kolibri-11933 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CSV Report generation API tests fail when the current date in the active timezone is different to the UTC date
## Observed behavior
When running the tests in `kolibri/plugins/facility/test/test_api.py` 2 tests can sometimes fail - the ones to do with generating a CSV Summary Log or Session Log report.
This occurs when the date as reported on the local machine in the currently set timezone is different to the current date according to UTC. In my specific case, the tests would fail when I ran them after 4pm PST (UTC - 8). I have not tested, but would assume that a similar issue would occur running the tests at say 7am in (UTC + 8) timezone.
## Errors and logs
```
kolibri/plugins/facility/test/test_api.py F..F..... [100%]
==================================================================================================== FAILURES =====================================================================================================
_____________________________________________________________________ ContentSummaryLogCSVExportTestCase.test_csv_download_admin_permissions ______________________________________________________________________
self = <kolibri.plugins.facility.test.test_api.ContentSummaryLogCSVExportTestCase testMethod=test_csv_download_admin_permissions>, mock_enqueue = <MagicMock name='enqueue' id='140268344491728'>
@mock.patch.object(log_exports_cleanup, "enqueue", return_value=None)
def test_csv_download_admin_permissions(self, mock_enqueue):
call_command(
"exportlogs",
log_type="summary",
output_file=output_filename(
"summary",
self.facility,
start_date=self.start_date,
end_date=self.end_date,
),
overwrite=True,
start_date=self.start_date,
end_date=self.end_date,
)
self.client.login(
username=self.admin.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
response = self.client.get(
reverse(
"kolibri:kolibri.plugins.facility:download_csv_file",
kwargs={"csv_type": "summary", "facility_id": self.facility.id},
)
)
> self.assertEqual(response.status_code, 200)
E AssertionError: 404 != 200
kolibri/plugins/facility/test/test_api.py:149: AssertionError
---------------------------------------------------------------------------------------------- Captured stdout setup ----------------------------------------------------------------------------------------------
Installed 2 object(s) from 1 fixture(s)
---------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------
INFO 2024-03-01 16:58:51,322 Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv
INFO 2024-03-01 16:58:51,327 Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines
INFO 2024-03-01 16:58:51,504 127.0.0.1 - - "GET /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/" 404 0 "" "unknown"
WARNING 2024-03-01 16:58:51,504 Not Found: /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/
------------------------------------------------------------------------------------------------ Captured log call ------------------------------------------------------------------------------------------------
csv_export.py 186 INFO Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv
exportlogs.py 159 INFO Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines
log.py 224 WARNING Not Found: /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/
_____________________________________________________________________ ContentSessionLogCSVExportTestCase.test_csv_download_admin_permissions ______________________________________________________________________
self = <kolibri.plugins.facility.test.test_api.ContentSessionLogCSVExportTestCase testMethod=test_csv_download_admin_permissions>, mock_enqueue = <MagicMock name='enqueue' id='140268351318224'>
@mock.patch.object(log_exports_cleanup, "enqueue", return_value=None)
def test_csv_download_admin_permissions(self, mock_enqueue):
call_command(
"exportlogs",
log_type="session",
output_file=output_filename(
"session",
self.facility,
start_date=self.start_date,
end_date=self.end_date,
),
overwrite=True,
start_date=self.start_date,
end_date=self.end_date,
)
self.client.login(
username=self.admin.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
response = self.client.get(
reverse(
"kolibri:kolibri.plugins.facility:download_csv_file",
kwargs={"csv_type": "session", "facility_id": self.facility.id},
)
)
> self.assertEqual(response.status_code, 200)
E AssertionError: 404 != 200
kolibri/plugins/facility/test/test_api.py:252: AssertionError
---------------------------------------------------------------------------------------------- Captured stdout setup ----------------------------------------------------------------------------------------------
Installed 2 object(s) from 1 fixture(s)
---------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------
INFO 2024-03-01 16:58:52,079 Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv
INFO 2024-03-01 16:58:52,084 Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines
INFO 2024-03-01 16:58:52,177 127.0.0.1 - - "GET /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/" 404 0 "" "unknown"
WARNING 2024-03-01 16:58:52,177 Not Found: /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/
------------------------------------------------------------------------------------------------ Captured log call ------------------------------------------------------------------------------------------------
csv_export.py 186 INFO Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv
exportlogs.py 159 INFO Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines
log.py 224 WARNING Not Found: /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/
======================================================================================= 2 failed, 7 passed in 13.86 seconds =======================================================================================
```
Note - I did a test of the actual functionality that relies on this in the Facility plugin, and observed no issues, so this seems at the moment to be purely a testing artifact.
## Expected behavior
Tests should pass regardless of time of day or timezone!
## User-facing consequences
I think none - but would be good to work out why this is happening to be sure.
## Steps to reproduce
Set your system timezone to PST.
Set your system time to after 4pm.
Run the test suite above with pytest.
## Context
Noticed in the develop branch, but also extant on release-v0.16.x
</issue>
<code>
[start of kolibri/utils/time_utils.py]
1 from django.utils import timezone
2
3
4 def local_now():
5 return timezone.localtime(timezone.now())
6
7
8 def naive_utc_datetime(dt):
9 return timezone.make_naive(dt, timezone=timezone.utc)
10
[end of kolibri/utils/time_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/utils/time_utils.py b/kolibri/utils/time_utils.py
--- a/kolibri/utils/time_utils.py
+++ b/kolibri/utils/time_utils.py
@@ -2,8 +2,18 @@
def local_now():
+ """
+ Returns the current time in the local timezone.
+ """
return timezone.localtime(timezone.now())
+def utc_now():
+ """
+ Returns the current time in the UTC timezone.
+ """
+ return timezone.now()
+
+
def naive_utc_datetime(dt):
return timezone.make_naive(dt, timezone=timezone.utc)
| {"golden_diff": "diff --git a/kolibri/utils/time_utils.py b/kolibri/utils/time_utils.py\n--- a/kolibri/utils/time_utils.py\n+++ b/kolibri/utils/time_utils.py\n@@ -2,8 +2,18 @@\n \n \n def local_now():\n+ \"\"\"\n+ Returns the current time in the local timezone.\n+ \"\"\"\n return timezone.localtime(timezone.now())\n \n \n+def utc_now():\n+ \"\"\"\n+ Returns the current time in the UTC timezone.\n+ \"\"\"\n+ return timezone.now()\n+\n+\n def naive_utc_datetime(dt):\n return timezone.make_naive(dt, timezone=timezone.utc)\n", "issue": "CSV Report generation API tests fail when the current date in the active timezone is different to the UTC date\n## Observed behavior\r\nWhen running the tests in `kolibri/plugins/facility/test/test_api.py` 2 tests can sometimes fail - the ones to do with generating a CSV Summary Log or Session Log report.\r\n\r\nThis occurs when the date as reported on the local machine in the currently set timezone is different to the current date according to UTC. In my specific case, the tests would fail when I ran them after 4pm PST (UTC - 8). I have not tested, but would assume that a similar issue would occur running the tests at say 7am in (UTC + 8) timezone.\r\n\r\n## Errors and logs\r\n```\r\nkolibri/plugins/facility/test/test_api.py F..F..... [100%]\r\n\r\n==================================================================================================== FAILURES =====================================================================================================\r\n_____________________________________________________________________ ContentSummaryLogCSVExportTestCase.test_csv_download_admin_permissions ______________________________________________________________________\r\n\r\nself = <kolibri.plugins.facility.test.test_api.ContentSummaryLogCSVExportTestCase testMethod=test_csv_download_admin_permissions>, mock_enqueue = <MagicMock name='enqueue' id='140268344491728'>\r\n\r\n @mock.patch.object(log_exports_cleanup, \"enqueue\", return_value=None)\r\n def test_csv_download_admin_permissions(self, mock_enqueue):\r\n call_command(\r\n \"exportlogs\",\r\n log_type=\"summary\",\r\n output_file=output_filename(\r\n \"summary\",\r\n self.facility,\r\n start_date=self.start_date,\r\n end_date=self.end_date,\r\n ),\r\n overwrite=True,\r\n start_date=self.start_date,\r\n end_date=self.end_date,\r\n )\r\n self.client.login(\r\n username=self.admin.username,\r\n password=DUMMY_PASSWORD,\r\n facility=self.facility,\r\n )\r\n response = self.client.get(\r\n reverse(\r\n \"kolibri:kolibri.plugins.facility:download_csv_file\",\r\n kwargs={\"csv_type\": \"summary\", \"facility_id\": self.facility.id},\r\n )\r\n )\r\n> self.assertEqual(response.status_code, 200)\r\nE AssertionError: 404 != 200\r\n\r\nkolibri/plugins/facility/test/test_api.py:149: AssertionError\r\n---------------------------------------------------------------------------------------------- Captured stdout setup ----------------------------------------------------------------------------------------------\r\nInstalled 2 object(s) from 1 fixture(s)\r\n---------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------\r\nINFO 2024-03-01 16:58:51,322 Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv\r\nINFO 2024-03-01 16:58:51,327 Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines\r\nINFO 2024-03-01 16:58:51,504 127.0.0.1 - - \"GET /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/\" 404 0 \"\" \"unknown\"\r\nWARNING 2024-03-01 16:58:51,504 Not Found: /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/\r\n------------------------------------------------------------------------------------------------ Captured log call ------------------------------------------------------------------------------------------------\r\ncsv_export.py 186 INFO Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv\r\nexportlogs.py 159 INFO Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #0_0f6e_content_summary_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines\r\nlog.py 224 WARNING Not Found: /facility/api/downloadcsvfile/summary/0f6ee14289d0447dbb105688560eee29/\r\n_____________________________________________________________________ ContentSessionLogCSVExportTestCase.test_csv_download_admin_permissions ______________________________________________________________________\r\n\r\nself = <kolibri.plugins.facility.test.test_api.ContentSessionLogCSVExportTestCase testMethod=test_csv_download_admin_permissions>, mock_enqueue = <MagicMock name='enqueue' id='140268351318224'>\r\n\r\n @mock.patch.object(log_exports_cleanup, \"enqueue\", return_value=None)\r\n def test_csv_download_admin_permissions(self, mock_enqueue):\r\n call_command(\r\n \"exportlogs\",\r\n log_type=\"session\",\r\n output_file=output_filename(\r\n \"session\",\r\n self.facility,\r\n start_date=self.start_date,\r\n end_date=self.end_date,\r\n ),\r\n overwrite=True,\r\n start_date=self.start_date,\r\n end_date=self.end_date,\r\n )\r\n self.client.login(\r\n username=self.admin.username,\r\n password=DUMMY_PASSWORD,\r\n facility=self.facility,\r\n )\r\n response = self.client.get(\r\n reverse(\r\n \"kolibri:kolibri.plugins.facility:download_csv_file\",\r\n kwargs={\"csv_type\": \"session\", \"facility_id\": self.facility.id},\r\n )\r\n )\r\n> self.assertEqual(response.status_code, 200)\r\nE AssertionError: 404 != 200\r\n\r\nkolibri/plugins/facility/test/test_api.py:252: AssertionError\r\n---------------------------------------------------------------------------------------------- Captured stdout setup ----------------------------------------------------------------------------------------------\r\nInstalled 2 object(s) from 1 fixture(s)\r\n---------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------\r\nINFO 2024-03-01 16:58:52,079 Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv\r\nINFO 2024-03-01 16:58:52,084 Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines\r\nINFO 2024-03-01 16:58:52,177 127.0.0.1 - - \"GET /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/\" 404 0 \"\" \"unknown\"\r\nWARNING 2024-03-01 16:58:52,177 Not Found: /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/\r\n------------------------------------------------------------------------------------------------ Captured log call ------------------------------------------------------------------------------------------------\r\ncsv_export.py 186 INFO Creating csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv\r\nexportlogs.py 159 INFO Created csv file /home/richard/github/kolibri/.pytest_kolibri_home/log_export/Rock N' Roll High School #1_207d_content_session_logs_from_2020-10-21_to_2024-03-01.csv with 3 lines\r\nlog.py 224 WARNING Not Found: /facility/api/downloadcsvfile/session/207d21f8516da5fad0659fc6488359d3/\r\n======================================================================================= 2 failed, 7 passed in 13.86 seconds =======================================================================================\r\n```\r\n\r\nNote - I did a test of the actual functionality that relies on this in the Facility plugin, and observed no issues, so this seems at the moment to be purely a testing artifact.\r\n\r\n## Expected behavior\r\nTests should pass regardless of time of day or timezone!\r\n\r\n## User-facing consequences\r\nI think none - but would be good to work out why this is happening to be sure.\r\n\r\n## Steps to reproduce\r\nSet your system timezone to PST.\r\nSet your system time to after 4pm.\r\nRun the test suite above with pytest.\r\n\r\n## Context\r\n\r\nNoticed in the develop branch, but also extant on release-v0.16.x\r\n\n", "before_files": [{"content": "from django.utils import timezone\n\n\ndef local_now():\n return timezone.localtime(timezone.now())\n\n\ndef naive_utc_datetime(dt):\n return timezone.make_naive(dt, timezone=timezone.utc)\n", "path": "kolibri/utils/time_utils.py"}]} | 2,633 | 131 |
gh_patches_debug_27424 | rasdani/github-patches | git_diff | learningequality__kolibri-2117 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
can no longer sign in using a pre-existing superuser account after upgrade
It appears that my superuser account is no longer available since upgrading to the latest develop.
I would have expected it to get migrated to an admin account with superuser flags enabled.
It actually looks like the user might still be there:

</issue>
<code>
[start of kolibri/auth/migrations/0004_auto_20170816_1607.py]
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.9.7 on 2017-08-16 23:07
3 from __future__ import unicode_literals
4
5 import django.core.validators
6 from django.db import migrations, models
7 from kolibri.auth.constants.role_kinds import ADMIN
8
9
10 def device_owner_to_super_user(apps, schema_editor):
11 DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')
12 FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')
13 Facility = apps.get_model('kolibriauth', 'Facility')
14 default_facility = Facility.objects.all().first()
15 DevicePermissions = apps.get_model('device', 'DevicePermissions')
16 DeviceSettings = apps.get_model('device', 'DeviceSettings')
17 Role = apps.get_model('kolibriauth', 'Role')
18 from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole
19 real_default_facility = RealFacility.get_default_facility()
20 # Can't do much if no facilities exist, as no facility to FK the users onto
21 if default_facility:
22 for device_owner in DeviceOwner.objects.all():
23 dataset_id = real_default_facility.dataset_id
24 real_superuser = RealFacilityUser(
25 username=device_owner.username,
26 facility=real_default_facility,
27 dataset_id=dataset_id
28 )
29 uuid = real_superuser.calculate_uuid()
30 superuser = FacilityUser.objects.create(
31 username=device_owner.username,
32 password=device_owner.password,
33 facility=default_facility,
34 full_name=device_owner.full_name,
35 date_joined=device_owner.date_joined,
36 id=uuid,
37 dataset_id=dataset_id,
38 _morango_source_id=real_superuser._morango_source_id,
39 _morango_partition=real_superuser._morango_partition,
40 )
41 real_role = RealRole(
42 user=real_superuser,
43 collection=real_default_facility,
44 kind=ADMIN,
45 dataset_id=dataset_id,
46 )
47 role_uuid = real_role.calculate_uuid()
48 role = Role.objects.create(
49 user=superuser,
50 collection=default_facility,
51 kind=ADMIN,
52 id=role_uuid,
53 dataset_id=dataset_id,
54 _morango_source_id=real_role._morango_source_id,
55 _morango_partition=real_role._morango_partition,
56 )
57 DevicePermissions.objects.create(user=superuser, is_superuser=True)
58 # Finally, set the is_provisioned flag
59 settings, created = DeviceSettings.objects.get_or_create(is_provisioned=True)
60
61
62 class Migration(migrations.Migration):
63
64 dependencies = [
65 ('kolibriauth', '0003_auto_20170621_0958'),
66 ('device', '0001_initial')
67 ]
68
69 operations = [
70 migrations.RunPython(device_owner_to_super_user, migrations.RunPython.noop),
71 migrations.DeleteModel(
72 name='DeviceOwner',
73 ),
74 ]
75
[end of kolibri/auth/migrations/0004_auto_20170816_1607.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/auth/migrations/0004_auto_20170816_1607.py b/kolibri/auth/migrations/0004_auto_20170816_1607.py
--- a/kolibri/auth/migrations/0004_auto_20170816_1607.py
+++ b/kolibri/auth/migrations/0004_auto_20170816_1607.py
@@ -8,17 +8,17 @@
def device_owner_to_super_user(apps, schema_editor):
- DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')
- FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')
- Facility = apps.get_model('kolibriauth', 'Facility')
- default_facility = Facility.objects.all().first()
- DevicePermissions = apps.get_model('device', 'DevicePermissions')
- DeviceSettings = apps.get_model('device', 'DeviceSettings')
- Role = apps.get_model('kolibriauth', 'Role')
from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole
real_default_facility = RealFacility.get_default_facility()
# Can't do much if no facilities exist, as no facility to FK the users onto
- if default_facility:
+ if real_default_facility:
+ DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')
+ FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')
+ Facility = apps.get_model('kolibriauth', 'Facility')
+ default_facility = Facility.objects.get(pk=real_default_facility.id)
+ DevicePermissions = apps.get_model('device', 'DevicePermissions')
+ DeviceSettings = apps.get_model('device', 'DeviceSettings')
+ Role = apps.get_model('kolibriauth', 'Role')
for device_owner in DeviceOwner.objects.all():
dataset_id = real_default_facility.dataset_id
real_superuser = RealFacilityUser(
| {"golden_diff": "diff --git a/kolibri/auth/migrations/0004_auto_20170816_1607.py b/kolibri/auth/migrations/0004_auto_20170816_1607.py\n--- a/kolibri/auth/migrations/0004_auto_20170816_1607.py\n+++ b/kolibri/auth/migrations/0004_auto_20170816_1607.py\n@@ -8,17 +8,17 @@\n \n \n def device_owner_to_super_user(apps, schema_editor):\n- DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n- FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n- Facility = apps.get_model('kolibriauth', 'Facility')\n- default_facility = Facility.objects.all().first()\n- DevicePermissions = apps.get_model('device', 'DevicePermissions')\n- DeviceSettings = apps.get_model('device', 'DeviceSettings')\n- Role = apps.get_model('kolibriauth', 'Role')\n from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole\n real_default_facility = RealFacility.get_default_facility()\n # Can't do much if no facilities exist, as no facility to FK the users onto\n- if default_facility:\n+ if real_default_facility:\n+ DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n+ FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n+ Facility = apps.get_model('kolibriauth', 'Facility')\n+ default_facility = Facility.objects.get(pk=real_default_facility.id)\n+ DevicePermissions = apps.get_model('device', 'DevicePermissions')\n+ DeviceSettings = apps.get_model('device', 'DeviceSettings')\n+ Role = apps.get_model('kolibriauth', 'Role')\n for device_owner in DeviceOwner.objects.all():\n dataset_id = real_default_facility.dataset_id\n real_superuser = RealFacilityUser(\n", "issue": "can no longer sign in using a pre-existing superuser account after upgrade\nIt appears that my superuser account is no longer available since upgrading to the latest develop. \r\n\r\nI would have expected it to get migrated to an admin account with superuser flags enabled.\r\n\r\nIt actually looks like the user might still be there:\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2017-08-16 23:07\nfrom __future__ import unicode_literals\n\nimport django.core.validators\nfrom django.db import migrations, models\nfrom kolibri.auth.constants.role_kinds import ADMIN\n\n\ndef device_owner_to_super_user(apps, schema_editor):\n DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n Facility = apps.get_model('kolibriauth', 'Facility')\n default_facility = Facility.objects.all().first()\n DevicePermissions = apps.get_model('device', 'DevicePermissions')\n DeviceSettings = apps.get_model('device', 'DeviceSettings')\n Role = apps.get_model('kolibriauth', 'Role')\n from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole\n real_default_facility = RealFacility.get_default_facility()\n # Can't do much if no facilities exist, as no facility to FK the users onto\n if default_facility:\n for device_owner in DeviceOwner.objects.all():\n dataset_id = real_default_facility.dataset_id\n real_superuser = RealFacilityUser(\n username=device_owner.username,\n facility=real_default_facility,\n dataset_id=dataset_id\n )\n uuid = real_superuser.calculate_uuid()\n superuser = FacilityUser.objects.create(\n username=device_owner.username,\n password=device_owner.password,\n facility=default_facility,\n full_name=device_owner.full_name,\n date_joined=device_owner.date_joined,\n id=uuid,\n dataset_id=dataset_id,\n _morango_source_id=real_superuser._morango_source_id,\n _morango_partition=real_superuser._morango_partition,\n )\n real_role = RealRole(\n user=real_superuser,\n collection=real_default_facility,\n kind=ADMIN,\n dataset_id=dataset_id,\n )\n role_uuid = real_role.calculate_uuid()\n role = Role.objects.create(\n user=superuser,\n collection=default_facility,\n kind=ADMIN,\n id=role_uuid,\n dataset_id=dataset_id,\n _morango_source_id=real_role._morango_source_id,\n _morango_partition=real_role._morango_partition,\n )\n DevicePermissions.objects.create(user=superuser, is_superuser=True)\n # Finally, set the is_provisioned flag\n settings, created = DeviceSettings.objects.get_or_create(is_provisioned=True)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('kolibriauth', '0003_auto_20170621_0958'),\n ('device', '0001_initial')\n ]\n\n operations = [\n migrations.RunPython(device_owner_to_super_user, migrations.RunPython.noop),\n migrations.DeleteModel(\n name='DeviceOwner',\n ),\n ]\n", "path": "kolibri/auth/migrations/0004_auto_20170816_1607.py"}]} | 1,486 | 471 |
gh_patches_debug_18591 | rasdani/github-patches | git_diff | StackStorm__st2-4007 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pinned eventlet version has outstanding bugs
pinned version 0.17 of eventlet has outstanding bugs on it's monkey patching of the ssl module.
e.g.
https://github.com/eventlet/eventlet/issues/371
```
# Note: 0.20.0 removed select.poll() on which some of our code and libraries we
# depend on rely
```
@Kami committed this reversion in https://github.com/StackStorm/st2/commit/1ec43d294e6770e56ec8f9990c805cb9dffe98c5
What was the specific issue?
</issue>
<code>
[start of st2common/st2common/util/monkey_patch.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 #
15
16 """
17 Module for performing eventlet and other monkey patching.
18 """
19
20 from __future__ import absolute_import
21
22 import sys
23
24 __all__ = [
25 'monkey_patch',
26 'is_use_debugger_flag_provided'
27 ]
28
29 USE_DEBUGGER_FLAG = '--use-debugger'
30 PARENT_ARGS_FLAG = '--parent-args='
31
32
33 def monkey_patch():
34 """
35 Function which performs eventlet monkey patching and also takes into account "--use-debugger"
36 argument in the command line arguments.
37
38 If this argument is found, no monkey patching is performed for the thread module. This allows
39 user to use remote debuggers.
40 """
41 import eventlet
42
43 patch_thread = not is_use_debugger_flag_provided()
44 eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)
45
46
47 def is_use_debugger_flag_provided():
48 # 1. Check sys.argv directly
49 if USE_DEBUGGER_FLAG in sys.argv:
50 return True
51
52 # 2. Check "parent-args" arguments. This is used for spawned processes such as sensors and
53 # Python runner actions
54
55 for arg in sys.argv:
56 if arg.startswith(PARENT_ARGS_FLAG) and USE_DEBUGGER_FLAG in arg:
57 return True
58
59 return False
60
[end of st2common/st2common/util/monkey_patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/st2common/st2common/util/monkey_patch.py b/st2common/st2common/util/monkey_patch.py
--- a/st2common/st2common/util/monkey_patch.py
+++ b/st2common/st2common/util/monkey_patch.py
@@ -23,6 +23,7 @@
__all__ = [
'monkey_patch',
+ 'use_select_poll_workaround',
'is_use_debugger_flag_provided'
]
@@ -44,6 +45,21 @@
eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)
+def use_select_poll_workaround():
+ """
+ Work around for some tests which injects original select module with select.poll()
+ available to sys.modules.
+ """
+ import sys
+ import subprocess
+ import eventlet
+
+ # Work around to get tests to pass with eventlet >= 0.20.0
+ if 'nose' in sys.modules.keys():
+ sys.modules['select'] = eventlet.patcher.original('select')
+ subprocess.select = eventlet.patcher.original('select')
+
+
def is_use_debugger_flag_provided():
# 1. Check sys.argv directly
if USE_DEBUGGER_FLAG in sys.argv:
| {"golden_diff": "diff --git a/st2common/st2common/util/monkey_patch.py b/st2common/st2common/util/monkey_patch.py\n--- a/st2common/st2common/util/monkey_patch.py\n+++ b/st2common/st2common/util/monkey_patch.py\n@@ -23,6 +23,7 @@\n \n __all__ = [\n 'monkey_patch',\n+ 'use_select_poll_workaround',\n 'is_use_debugger_flag_provided'\n ]\n \n@@ -44,6 +45,21 @@\n eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)\n \n \n+def use_select_poll_workaround():\n+ \"\"\"\n+ Work around for some tests which injects original select module with select.poll()\n+ available to sys.modules.\n+ \"\"\"\n+ import sys\n+ import subprocess\n+ import eventlet\n+\n+ # Work around to get tests to pass with eventlet >= 0.20.0\n+ if 'nose' in sys.modules.keys():\n+ sys.modules['select'] = eventlet.patcher.original('select')\n+ subprocess.select = eventlet.patcher.original('select')\n+\n+\n def is_use_debugger_flag_provided():\n # 1. Check sys.argv directly\n if USE_DEBUGGER_FLAG in sys.argv:\n", "issue": "Pinned eventlet version has outstanding bugs\npinned version 0.17 of eventlet has outstanding bugs on it's monkey patching of the ssl module.\r\n\r\ne.g.\r\nhttps://github.com/eventlet/eventlet/issues/371\r\n\r\n```\r\n# Note: 0.20.0 removed select.poll() on which some of our code and libraries we\r\n# depend on rely\r\n```\r\n\r\n@Kami committed this reversion in https://github.com/StackStorm/st2/commit/1ec43d294e6770e56ec8f9990c805cb9dffe98c5\r\n\r\nWhat was the specific issue?\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n#\n\n\"\"\"\nModule for performing eventlet and other monkey patching.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport sys\n\n__all__ = [\n 'monkey_patch',\n 'is_use_debugger_flag_provided'\n]\n\nUSE_DEBUGGER_FLAG = '--use-debugger'\nPARENT_ARGS_FLAG = '--parent-args='\n\n\ndef monkey_patch():\n \"\"\"\n Function which performs eventlet monkey patching and also takes into account \"--use-debugger\"\n argument in the command line arguments.\n\n If this argument is found, no monkey patching is performed for the thread module. This allows\n user to use remote debuggers.\n \"\"\"\n import eventlet\n\n patch_thread = not is_use_debugger_flag_provided()\n eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)\n\n\ndef is_use_debugger_flag_provided():\n # 1. Check sys.argv directly\n if USE_DEBUGGER_FLAG in sys.argv:\n return True\n\n # 2. Check \"parent-args\" arguments. This is used for spawned processes such as sensors and\n # Python runner actions\n\n for arg in sys.argv:\n if arg.startswith(PARENT_ARGS_FLAG) and USE_DEBUGGER_FLAG in arg:\n return True\n\n return False\n", "path": "st2common/st2common/util/monkey_patch.py"}]} | 1,248 | 286 |
gh_patches_debug_8508 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1304 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
capture_exception raises exception in UUID __repr__
**Describe the bug**: When capturing exceptions, if the exception is because of a raise in UUID `__init__`, apm agent itself raises with exception:
```
self = <[AttributeError('int') raised in repr()] UUID object at 0x12508cd00>
def __str__(self):
> hex = '%032x' % self.int
E AttributeError: int
```
**To Reproduce**
1. Add this test to `exception_tests.py`:
```python
def test_fail_on_uuid_raise(elasticapm_client):
def generate_uuid():
from uuid import UUID
return UUID("INVALID")
try:
generate_uuid()
except Exception:
elasticapm_client.capture_exception()
```
**Environment (please complete the following information)**
- OS: MacOS / Linux
- Python version: 3.8.11
- Framework and version [e.g. Django 2.1]: Custom / Starlette
- APM Server version: N/A
- Agent version: 6.3.3
**Additional context**
Add any other context about the problem here.
`stacktrace`:
```
../../elasticapm/base.py:272: in capture_exception
return self.capture("Exception", exc_info=exc_info, handled=handled, **kwargs)
../../elasticapm/base.py:241: in capture
data = self._build_msg_for_logging(
../../elasticapm/base.py:469: in _build_msg_for_logging
result = handler.capture(self, **kwargs)
../../elasticapm/events.py:96: in capture
frames = get_stack_info(
../../elasticapm/utils/stacks.py:325: in get_stack_info
result = get_frame_info(
../../elasticapm/utils/stacks.py:294: in get_frame_info
f_locals = {varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals)}
../../elasticapm/utils/stacks.py:294: in <dictcomp>
f_locals = {varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals)}
../../elasticapm/events.py:103: in <lambda>
locals_processor_func=lambda local_var: varmap(
../../elasticapm/utils/__init__.py:71: in varmap
ret = func(name, var, **kwargs)
../../elasticapm/events.py:104: in <lambda>
lambda k, val: shorten(
../../elasticapm/utils/encoding.py:205: in shorten
var = transform(var)
../../elasticapm/utils/encoding.py:132: in transform
ret = repr(value)
/usr/local/Cellar/[email protected]/3.8.11/Frameworks/Python.framework/Versions/3.8/lib/python3.8/uuid.py:268: in __repr__
return '%s(%r)' % (self.__class__.__name__, str(self))
```
</issue>
<code>
[start of elasticapm/utils/encoding.py]
1 # -*- coding: utf-8 -*-
2 #
3 # BSD 3-Clause License
4 #
5 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
6 # Copyright (c) 2019, Elasticsearch BV
7 # All rights reserved.
8 #
9 # Redistribution and use in source and binary forms, with or without
10 # modification, are permitted provided that the following conditions are met:
11 #
12 # * Redistributions of source code must retain the above copyright notice, this
13 # list of conditions and the following disclaimer.
14 #
15 # * Redistributions in binary form must reproduce the above copyright notice,
16 # this list of conditions and the following disclaimer in the documentation
17 # and/or other materials provided with the distribution.
18 #
19 # * Neither the name of the copyright holder nor the names of its
20 # contributors may be used to endorse or promote products derived from
21 # this software without specific prior written permission.
22 #
23 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
27 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32
33
34 import datetime
35 import itertools
36 import uuid
37 from decimal import Decimal
38
39 from elasticapm.conf.constants import KEYWORD_MAX_LENGTH, LABEL_RE, LABEL_TYPES
40 from elasticapm.utils import compat
41
42 PROTECTED_TYPES = compat.integer_types + (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time)
43
44
45 def is_protected_type(obj):
46 """Determine if the object instance is of a protected type.
47
48 Objects of protected types are preserved as-is when passed to
49 force_text(strings_only=True).
50 """
51 return isinstance(obj, PROTECTED_TYPES)
52
53
54 def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
55 """
56 Similar to smart_text, except that lazy instances are resolved to
57 strings, rather than kept as lazy objects.
58
59 If strings_only is True, don't convert (some) non-string-like objects.
60 """
61 # Handle the common case first, saves 30-40% when s is an instance of
62 # compat.text_type. This function gets called often in that setting.
63 #
64 # Adapted from Django
65 if isinstance(s, compat.text_type):
66 return s
67 if strings_only and is_protected_type(s):
68 return s
69 try:
70 if not isinstance(s, compat.string_types):
71 if hasattr(s, "__unicode__"):
72 s = s.__unicode__()
73 else:
74 if compat.PY3:
75 if isinstance(s, bytes):
76 s = compat.text_type(s, encoding, errors)
77 else:
78 s = compat.text_type(s)
79 else:
80 s = compat.text_type(bytes(s), encoding, errors)
81 else:
82 # Note: We use .decode() here, instead of compat.text_type(s, encoding,
83 # errors), so that if s is a SafeBytes, it ends up being a
84 # SafeText at the end.
85 s = s.decode(encoding, errors)
86 except UnicodeDecodeError as e:
87 if not isinstance(s, Exception):
88 raise UnicodeDecodeError(*e.args)
89 else:
90 # If we get to here, the caller has passed in an Exception
91 # subclass populated with non-ASCII bytestring data without a
92 # working unicode method. Try to handle this without raising a
93 # further exception by individually forcing the exception args
94 # to unicode.
95 s = " ".join([force_text(arg, encoding, strings_only, errors) for arg in s])
96 return s
97
98
99 def _has_elasticapm_metadata(value):
100 try:
101 return callable(value.__getattribute__("__elasticapm__"))
102 except Exception:
103 return False
104
105
106 def transform(value, stack=None, context=None):
107 # TODO: make this extendable
108 if context is None:
109 context = {}
110 if stack is None:
111 stack = []
112
113 objid = id(value)
114 if objid in context:
115 return "<...>"
116
117 context[objid] = 1
118 transform_rec = lambda o: transform(o, stack + [value], context)
119
120 if any(value is s for s in stack):
121 ret = "cycle"
122 elif isinstance(value, (tuple, list, set, frozenset)):
123 try:
124 ret = type(value)(transform_rec(o) for o in value)
125 except Exception:
126 # We may be dealing with a namedtuple
127 class value_type(list):
128 __name__ = type(value).__name__
129
130 ret = value_type(transform_rec(o) for o in value)
131 elif isinstance(value, uuid.UUID):
132 ret = repr(value)
133 elif isinstance(value, dict):
134 ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))
135 elif isinstance(value, compat.text_type):
136 ret = to_unicode(value)
137 elif isinstance(value, compat.binary_type):
138 ret = to_string(value)
139 elif not isinstance(value, compat.class_types) and _has_elasticapm_metadata(value):
140 ret = transform_rec(value.__elasticapm__())
141 elif isinstance(value, bool):
142 ret = bool(value)
143 elif isinstance(value, float):
144 ret = float(value)
145 elif isinstance(value, int):
146 ret = int(value)
147 elif compat.PY2 and isinstance(value, long): # noqa F821
148 ret = long(value) # noqa F821
149 elif value is not None:
150 try:
151 ret = transform(repr(value))
152 except Exception:
153 # It's common case that a model's __unicode__ definition may try to query the database
154 # which if it was not cleaned up correctly, would hit a transaction aborted exception
155 ret = u"<BadRepr: %s>" % type(value)
156 else:
157 ret = None
158 del context[objid]
159 return ret
160
161
162 def to_unicode(value):
163 try:
164 value = compat.text_type(force_text(value))
165 except (UnicodeEncodeError, UnicodeDecodeError):
166 value = "(Error decoding value)"
167 except Exception: # in some cases we get a different exception
168 try:
169 value = compat.binary_type(repr(type(value)))
170 except Exception:
171 value = "(Error decoding value)"
172 return value
173
174
175 def to_string(value):
176 try:
177 return compat.binary_type(value.decode("utf-8").encode("utf-8"))
178 except Exception:
179 return to_unicode(value).encode("utf-8")
180
181
182 def shorten(var, list_length=50, string_length=200, dict_length=50, **kwargs):
183 """
184 Shorten a given variable based on configurable maximum lengths, leaving
185 breadcrumbs in the object to show that it was shortened.
186
187 For strings, truncate the string to the max length, and append "..." so
188 the user knows data was lost.
189
190 For lists, truncate the list to the max length, and append two new strings
191 to the list: "..." and "(<x> more elements)" where <x> is the number of
192 elements removed.
193
194 For dicts, truncate the dict to the max length (based on number of key/value
195 pairs) and add a new (key, value) pair to the dict:
196 ("...", "(<x> more elements)") where <x> is the number of key/value pairs
197 removed.
198
199 :param var: Variable to be shortened
200 :param list_length: Max length (in items) of lists
201 :param string_length: Max length (in characters) of strings
202 :param dict_length: Max length (in key/value pairs) of dicts
203 :return: Shortened variable
204 """
205 var = transform(var)
206 if isinstance(var, compat.string_types) and len(var) > string_length:
207 var = var[: string_length - 3] + "..."
208 elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > list_length:
209 # TODO: we should write a real API for storing some metadata with vars when
210 # we get around to doing ref storage
211 var = list(var)[:list_length] + ["...", "(%d more elements)" % (len(var) - list_length,)]
212 elif isinstance(var, dict) and len(var) > dict_length:
213 trimmed_tuples = [(k, v) for (k, v) in itertools.islice(compat.iteritems(var), dict_length)]
214 if "<truncated>" not in var:
215 trimmed_tuples += [("<truncated>", "(%d more elements)" % (len(var) - dict_length))]
216 var = dict(trimmed_tuples)
217 return var
218
219
220 def keyword_field(string):
221 """
222 If the given string is longer than KEYWORD_MAX_LENGTH, truncate it to
223 KEYWORD_MAX_LENGTH-1, adding the "…" character at the end.
224 """
225 if not isinstance(string, compat.string_types) or len(string) <= KEYWORD_MAX_LENGTH:
226 return string
227 return string[: KEYWORD_MAX_LENGTH - 1] + u"…"
228
229
230 def enforce_label_format(labels):
231 """
232 Enforces label format:
233 * dots, double quotes or stars in keys are replaced by underscores
234 * string values are limited to a length of 1024 characters
235 * values can only be of a limited set of types
236
237 :param labels: a dictionary of labels
238 :return: a new dictionary with sanitized keys/values
239 """
240 new = {}
241 for key, value in compat.iteritems(labels):
242 if not isinstance(value, LABEL_TYPES):
243 value = keyword_field(compat.text_type(value))
244 new[LABEL_RE.sub("_", compat.text_type(key))] = value
245 return new
246
[end of elasticapm/utils/encoding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/utils/encoding.py b/elasticapm/utils/encoding.py
--- a/elasticapm/utils/encoding.py
+++ b/elasticapm/utils/encoding.py
@@ -129,7 +129,10 @@
ret = value_type(transform_rec(o) for o in value)
elif isinstance(value, uuid.UUID):
- ret = repr(value)
+ try:
+ ret = repr(value)
+ except AttributeError:
+ ret = None
elif isinstance(value, dict):
ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))
elif isinstance(value, compat.text_type):
| {"golden_diff": "diff --git a/elasticapm/utils/encoding.py b/elasticapm/utils/encoding.py\n--- a/elasticapm/utils/encoding.py\n+++ b/elasticapm/utils/encoding.py\n@@ -129,7 +129,10 @@\n \n ret = value_type(transform_rec(o) for o in value)\n elif isinstance(value, uuid.UUID):\n- ret = repr(value)\n+ try:\n+ ret = repr(value)\n+ except AttributeError:\n+ ret = None\n elif isinstance(value, dict):\n ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))\n elif isinstance(value, compat.text_type):\n", "issue": "capture_exception raises exception in UUID __repr__\n**Describe the bug**: When capturing exceptions, if the exception is because of a raise in UUID `__init__`, apm agent itself raises with exception:\r\n\r\n```\r\nself = <[AttributeError('int') raised in repr()] UUID object at 0x12508cd00>\r\n\r\n def __str__(self):\r\n> hex = '%032x' % self.int\r\nE AttributeError: int\r\n```\r\n\r\n**To Reproduce**\r\n\r\n1. Add this test to `exception_tests.py`:\r\n```python\r\ndef test_fail_on_uuid_raise(elasticapm_client):\r\n def generate_uuid():\r\n from uuid import UUID\r\n return UUID(\"INVALID\")\r\n\r\n try:\r\n generate_uuid()\r\n except Exception:\r\n elasticapm_client.capture_exception()\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: MacOS / Linux\r\n- Python version: 3.8.11\r\n- Framework and version [e.g. Django 2.1]: Custom / Starlette\r\n- APM Server version: N/A\r\n- Agent version: 6.3.3\r\n\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n`stacktrace`:\r\n```\r\n../../elasticapm/base.py:272: in capture_exception\r\n return self.capture(\"Exception\", exc_info=exc_info, handled=handled, **kwargs)\r\n../../elasticapm/base.py:241: in capture\r\n data = self._build_msg_for_logging(\r\n../../elasticapm/base.py:469: in _build_msg_for_logging\r\n result = handler.capture(self, **kwargs)\r\n../../elasticapm/events.py:96: in capture\r\n frames = get_stack_info(\r\n../../elasticapm/utils/stacks.py:325: in get_stack_info\r\n result = get_frame_info(\r\n../../elasticapm/utils/stacks.py:294: in get_frame_info\r\n f_locals = {varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals)}\r\n../../elasticapm/utils/stacks.py:294: in <dictcomp>\r\n f_locals = {varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals)}\r\n../../elasticapm/events.py:103: in <lambda>\r\n locals_processor_func=lambda local_var: varmap(\r\n../../elasticapm/utils/__init__.py:71: in varmap\r\n ret = func(name, var, **kwargs)\r\n../../elasticapm/events.py:104: in <lambda>\r\n lambda k, val: shorten(\r\n../../elasticapm/utils/encoding.py:205: in shorten\r\n var = transform(var)\r\n../../elasticapm/utils/encoding.py:132: in transform\r\n ret = repr(value)\r\n/usr/local/Cellar/[email protected]/3.8.11/Frameworks/Python.framework/Versions/3.8/lib/python3.8/uuid.py:268: in __repr__\r\n return '%s(%r)' % (self.__class__.__name__, str(self))\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport datetime\nimport itertools\nimport uuid\nfrom decimal import Decimal\n\nfrom elasticapm.conf.constants import KEYWORD_MAX_LENGTH, LABEL_RE, LABEL_TYPES\nfrom elasticapm.utils import compat\n\nPROTECTED_TYPES = compat.integer_types + (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time)\n\n\ndef is_protected_type(obj):\n \"\"\"Determine if the object instance is of a protected type.\n\n Objects of protected types are preserved as-is when passed to\n force_text(strings_only=True).\n \"\"\"\n return isinstance(obj, PROTECTED_TYPES)\n\n\ndef force_text(s, encoding=\"utf-8\", strings_only=False, errors=\"strict\"):\n \"\"\"\n Similar to smart_text, except that lazy instances are resolved to\n strings, rather than kept as lazy objects.\n\n If strings_only is True, don't convert (some) non-string-like objects.\n \"\"\"\n # Handle the common case first, saves 30-40% when s is an instance of\n # compat.text_type. This function gets called often in that setting.\n #\n # Adapted from Django\n if isinstance(s, compat.text_type):\n return s\n if strings_only and is_protected_type(s):\n return s\n try:\n if not isinstance(s, compat.string_types):\n if hasattr(s, \"__unicode__\"):\n s = s.__unicode__()\n else:\n if compat.PY3:\n if isinstance(s, bytes):\n s = compat.text_type(s, encoding, errors)\n else:\n s = compat.text_type(s)\n else:\n s = compat.text_type(bytes(s), encoding, errors)\n else:\n # Note: We use .decode() here, instead of compat.text_type(s, encoding,\n # errors), so that if s is a SafeBytes, it ends up being a\n # SafeText at the end.\n s = s.decode(encoding, errors)\n except UnicodeDecodeError as e:\n if not isinstance(s, Exception):\n raise UnicodeDecodeError(*e.args)\n else:\n # If we get to here, the caller has passed in an Exception\n # subclass populated with non-ASCII bytestring data without a\n # working unicode method. Try to handle this without raising a\n # further exception by individually forcing the exception args\n # to unicode.\n s = \" \".join([force_text(arg, encoding, strings_only, errors) for arg in s])\n return s\n\n\ndef _has_elasticapm_metadata(value):\n try:\n return callable(value.__getattribute__(\"__elasticapm__\"))\n except Exception:\n return False\n\n\ndef transform(value, stack=None, context=None):\n # TODO: make this extendable\n if context is None:\n context = {}\n if stack is None:\n stack = []\n\n objid = id(value)\n if objid in context:\n return \"<...>\"\n\n context[objid] = 1\n transform_rec = lambda o: transform(o, stack + [value], context)\n\n if any(value is s for s in stack):\n ret = \"cycle\"\n elif isinstance(value, (tuple, list, set, frozenset)):\n try:\n ret = type(value)(transform_rec(o) for o in value)\n except Exception:\n # We may be dealing with a namedtuple\n class value_type(list):\n __name__ = type(value).__name__\n\n ret = value_type(transform_rec(o) for o in value)\n elif isinstance(value, uuid.UUID):\n ret = repr(value)\n elif isinstance(value, dict):\n ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))\n elif isinstance(value, compat.text_type):\n ret = to_unicode(value)\n elif isinstance(value, compat.binary_type):\n ret = to_string(value)\n elif not isinstance(value, compat.class_types) and _has_elasticapm_metadata(value):\n ret = transform_rec(value.__elasticapm__())\n elif isinstance(value, bool):\n ret = bool(value)\n elif isinstance(value, float):\n ret = float(value)\n elif isinstance(value, int):\n ret = int(value)\n elif compat.PY2 and isinstance(value, long): # noqa F821\n ret = long(value) # noqa F821\n elif value is not None:\n try:\n ret = transform(repr(value))\n except Exception:\n # It's common case that a model's __unicode__ definition may try to query the database\n # which if it was not cleaned up correctly, would hit a transaction aborted exception\n ret = u\"<BadRepr: %s>\" % type(value)\n else:\n ret = None\n del context[objid]\n return ret\n\n\ndef to_unicode(value):\n try:\n value = compat.text_type(force_text(value))\n except (UnicodeEncodeError, UnicodeDecodeError):\n value = \"(Error decoding value)\"\n except Exception: # in some cases we get a different exception\n try:\n value = compat.binary_type(repr(type(value)))\n except Exception:\n value = \"(Error decoding value)\"\n return value\n\n\ndef to_string(value):\n try:\n return compat.binary_type(value.decode(\"utf-8\").encode(\"utf-8\"))\n except Exception:\n return to_unicode(value).encode(\"utf-8\")\n\n\ndef shorten(var, list_length=50, string_length=200, dict_length=50, **kwargs):\n \"\"\"\n Shorten a given variable based on configurable maximum lengths, leaving\n breadcrumbs in the object to show that it was shortened.\n\n For strings, truncate the string to the max length, and append \"...\" so\n the user knows data was lost.\n\n For lists, truncate the list to the max length, and append two new strings\n to the list: \"...\" and \"(<x> more elements)\" where <x> is the number of\n elements removed.\n\n For dicts, truncate the dict to the max length (based on number of key/value\n pairs) and add a new (key, value) pair to the dict:\n (\"...\", \"(<x> more elements)\") where <x> is the number of key/value pairs\n removed.\n\n :param var: Variable to be shortened\n :param list_length: Max length (in items) of lists\n :param string_length: Max length (in characters) of strings\n :param dict_length: Max length (in key/value pairs) of dicts\n :return: Shortened variable\n \"\"\"\n var = transform(var)\n if isinstance(var, compat.string_types) and len(var) > string_length:\n var = var[: string_length - 3] + \"...\"\n elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > list_length:\n # TODO: we should write a real API for storing some metadata with vars when\n # we get around to doing ref storage\n var = list(var)[:list_length] + [\"...\", \"(%d more elements)\" % (len(var) - list_length,)]\n elif isinstance(var, dict) and len(var) > dict_length:\n trimmed_tuples = [(k, v) for (k, v) in itertools.islice(compat.iteritems(var), dict_length)]\n if \"<truncated>\" not in var:\n trimmed_tuples += [(\"<truncated>\", \"(%d more elements)\" % (len(var) - dict_length))]\n var = dict(trimmed_tuples)\n return var\n\n\ndef keyword_field(string):\n \"\"\"\n If the given string is longer than KEYWORD_MAX_LENGTH, truncate it to\n KEYWORD_MAX_LENGTH-1, adding the \"\u2026\" character at the end.\n \"\"\"\n if not isinstance(string, compat.string_types) or len(string) <= KEYWORD_MAX_LENGTH:\n return string\n return string[: KEYWORD_MAX_LENGTH - 1] + u\"\u2026\"\n\n\ndef enforce_label_format(labels):\n \"\"\"\n Enforces label format:\n * dots, double quotes or stars in keys are replaced by underscores\n * string values are limited to a length of 1024 characters\n * values can only be of a limited set of types\n\n :param labels: a dictionary of labels\n :return: a new dictionary with sanitized keys/values\n \"\"\"\n new = {}\n for key, value in compat.iteritems(labels):\n if not isinstance(value, LABEL_TYPES):\n value = keyword_field(compat.text_type(value))\n new[LABEL_RE.sub(\"_\", compat.text_type(key))] = value\n return new\n", "path": "elasticapm/utils/encoding.py"}]} | 4,031 | 149 |
gh_patches_debug_7094 | rasdani/github-patches | git_diff | sktime__sktime-735 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SettingWithCopyWarning in Prophet with exogenous data
**Describe the bug**
Running `Prophet` with `X` different from `None` throws the following warning
```python
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
X["ds"] = X.index
/Users/muhlbach/opt/anaconda3/envs/experimental/lib/python3.8/site-packages/sktime/forecasting/base/adapters/_fbprophet.py:190: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
```
Will you please change line 190 in _fbprophet.py to
```python
X.loc[:,"ds"]=X.index
```
Thanks!
</issue>
<code>
[start of sktime/forecasting/base/adapters/_fbprophet.py]
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4
5 __author__ = ["Markus Löning", "Martin Walter"]
6 __all__ = ["_ProphetAdapter"]
7
8 import os
9
10 import pandas as pd
11
12 from sktime.forecasting.base._base import DEFAULT_ALPHA
13 from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
14 from sktime.forecasting.base._sktime import _SktimeForecaster
15 from contextlib import contextmanager
16
17
18 class _ProphetAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):
19 """Base class for interfacing fbprophet and neuralprophet"""
20
21 def fit(self, y, X=None, fh=None, **fit_params):
22 """Fit to training data.
23 Parameters
24 ----------
25 y : pd.Series
26 Target time series to which to fit the forecaster.
27 X : pd.DataFrame, optional (default=None)
28 Exogenous variables.
29 fh : int, list or np.array, optional (default=None)
30 The forecasters horizon with the steps ahead to to predict.
31 Returns
32 -------
33 self : returns an instance of self.
34 """
35 self._instantiate_model()
36 self._check_changepoints()
37 self._set_y_X(y, X, enforce_index_type=pd.DatetimeIndex)
38 self._set_fh(fh)
39
40 # We have to bring the data into the required format for fbprophet:
41 df = pd.DataFrame({"y": y, "ds": y.index})
42
43 # Add seasonality
44 if self.add_seasonality:
45 self._forecaster.add_seasonality(**self.add_seasonality)
46
47 # Add country holidays
48 if self.add_country_holidays:
49 self._forecaster.add_country_holidays(**self.add_country_holidays)
50
51 # Add regressor (multivariate)
52 if X is not None:
53 df, X = _merge_X(df, X)
54 for col in X.columns:
55 self._forecaster.add_regressor(col)
56
57 if self.verbose:
58 self._forecaster.fit(df=df, **fit_params)
59 else:
60 with _suppress_stdout_stderr():
61 self._forecaster.fit(df=df, **fit_params)
62
63 self._is_fitted = True
64 return self
65
66 def predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
67 """Predict
68
69 Parameters
70 ----------
71 fh : array-like
72 The forecasters horizon with the steps ahead to to predict.
73 Default is
74 one-step ahead forecast, i.e. np.array([1]).
75 X : pd.DataFrame, optional
76 Exogenous data, by default None
77 return_pred_int : bool, optional
78 Returns a pd.DataFrame with confidence intervalls, by default False
79 alpha : float, optional
80 Alpha level for confidence intervalls, by default DEFAULT_ALPHA
81
82 Returns
83 -------
84 y_pred : pandas.Series
85 Returns series of predicted values.
86
87 Raises
88 ------
89 Exception
90 Error when merging data
91 """
92 self.check_is_fitted()
93 self._set_fh(fh)
94 self._update_X(X, enforce_index_type=pd.DatetimeIndex)
95
96 fh = self.fh.to_absolute(cutoff=self.cutoff).to_pandas()
97 if not isinstance(fh, pd.DatetimeIndex):
98 raise ValueError("absolute `fh` must be represented as a pd.DatetimeIndex")
99 df = pd.DataFrame({"ds": fh}, index=fh)
100
101 # Merge X with df (of created future DatetimeIndex values)
102 if X is not None:
103 df, X = _merge_X(df, X)
104
105 # don't compute confidence intervals if not asked for
106 with self._return_pred_int(return_pred_int):
107 out = self._forecaster.predict(df)
108
109 out.set_index("ds", inplace=True)
110 y_pred = out.loc[:, "yhat"]
111
112 if return_pred_int:
113 pred_int = out.loc[:, ["yhat_lower", "yhat_upper"]]
114 pred_int.columns = pred_int.columns.str.strip("yhat_")
115 return y_pred, pred_int
116 else:
117 return y_pred
118
119 def get_fitted_params(self):
120 """Get fitted parameters
121
122 Returns
123 -------
124 fitted_params : dict
125
126 References
127 ----------
128 https://facebook.github.io/prophet/docs/additional_topics.html
129 """
130 self.check_is_fitted()
131 fitted_params = {}
132 for name in ["k", "m", "sigma_obs"]:
133 fitted_params[name] = self._forecaster.params[name][0][0]
134 for name in ["delta", "beta"]:
135 fitted_params[name] = self._forecaster.params[name][0]
136 return fitted_params
137
138 def _check_changepoints(self):
139 """Checking arguments for changepoints and assign related arguments
140
141 Returns
142 -------
143 self
144 """
145 if self.changepoints is not None:
146 self.changepoints = pd.Series(pd.to_datetime(self.changepoints), name="ds")
147 self.n_changepoints = len(self.changepoints)
148 self.specified_changepoints = True
149 else:
150 self.specified_changepoints = False
151 return self
152
153 @contextmanager
154 def _return_pred_int(self, return_pred_int):
155 if not return_pred_int:
156 # setting uncertainty samples to zero avoids computing pred ints
157 self._forecaster.uncertainty_samples = 0
158 try:
159 yield
160 finally:
161 if not return_pred_int:
162 self._forecaster.uncertainty_samples = self.uncertainty_samples
163
164
165 def _merge_X(df, X):
166 """Merge X and df on the DatetimeIndex
167
168 Parameters
169 ----------
170 fh : sktime.ForecastingHorizon
171 X : pd.DataFrame
172 Exog data
173 df : pd.DataFrame
174 Contains a DatetimeIndex column "ds"
175
176 Returns
177 -------
178 pd.DataFrame
179 DataFrame with containing X and df (with a DatetimeIndex column "ds")
180
181 Raises
182 ------
183 TypeError
184 Error if merging was not possible
185 """
186 # Merging on the index is unreliable, possibly due to loss of freq information in fh
187 X.columns = X.columns.astype(str)
188 if "ds" in X.columns:
189 raise ValueError("Column name 'ds' is reserved in fbprophet")
190 X["ds"] = X.index
191 # df = df.merge(X, how="inner", on="ds", copy=False)
192 df = df.merge(X, how="inner", on="ds")
193 return df, X.drop(columns="ds")
194
195
196 class _suppress_stdout_stderr(object):
197 """
198 A context manager for doing a "deep suppression" of stdout and stderr in
199 Python, i.e. will suppress all print, even if the print originates in a
200 compiled C/Fortran sub-function.
201 This will not suppress raised exceptions, since exceptions are printed
202 to stderr just before a script exits, and after the context manager has
203 exited (at least, I think that is why it lets exceptions through).
204
205 References
206 ----------
207 https://github.com/facebook/prophet/issues/223
208 """
209
210 def __init__(self):
211 # Open a pair of null files
212 self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
213 # Save the actual stdout (1) and stderr (2) file descriptors.
214 self.save_fds = [os.dup(1), os.dup(2)]
215
216 def __enter__(self):
217 # Assign the null pointers to stdout and stderr.
218 os.dup2(self.null_fds[0], 1)
219 os.dup2(self.null_fds[1], 2)
220
221 def __exit__(self, *_):
222 # Re-assign the real stdout/stderr back to (1) and (2)
223 os.dup2(self.save_fds[0], 1)
224 os.dup2(self.save_fds[1], 2)
225 # Close the null files
226 for fd in self.null_fds + self.save_fds:
227 os.close(fd)
228
[end of sktime/forecasting/base/adapters/_fbprophet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/forecasting/base/adapters/_fbprophet.py b/sktime/forecasting/base/adapters/_fbprophet.py
--- a/sktime/forecasting/base/adapters/_fbprophet.py
+++ b/sktime/forecasting/base/adapters/_fbprophet.py
@@ -187,7 +187,7 @@
X.columns = X.columns.astype(str)
if "ds" in X.columns:
raise ValueError("Column name 'ds' is reserved in fbprophet")
- X["ds"] = X.index
+ X.loc[:, "ds"] = X.index
# df = df.merge(X, how="inner", on="ds", copy=False)
df = df.merge(X, how="inner", on="ds")
return df, X.drop(columns="ds")
| {"golden_diff": "diff --git a/sktime/forecasting/base/adapters/_fbprophet.py b/sktime/forecasting/base/adapters/_fbprophet.py\n--- a/sktime/forecasting/base/adapters/_fbprophet.py\n+++ b/sktime/forecasting/base/adapters/_fbprophet.py\n@@ -187,7 +187,7 @@\n X.columns = X.columns.astype(str)\n if \"ds\" in X.columns:\n raise ValueError(\"Column name 'ds' is reserved in fbprophet\")\n- X[\"ds\"] = X.index\n+ X.loc[:, \"ds\"] = X.index\n # df = df.merge(X, how=\"inner\", on=\"ds\", copy=False)\n df = df.merge(X, how=\"inner\", on=\"ds\")\n return df, X.drop(columns=\"ds\")\n", "issue": "SettingWithCopyWarning in Prophet with exogenous data\n**Describe the bug**\r\nRunning `Prophet` with `X` different from `None` throws the following warning\r\n\r\n```python\r\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\r\n X[\"ds\"] = X.index\r\n/Users/muhlbach/opt/anaconda3/envs/experimental/lib/python3.8/site-packages/sktime/forecasting/base/adapters/_fbprophet.py:190: SettingWithCopyWarning: \r\nA value is trying to be set on a copy of a slice from a DataFrame.\r\nTry using .loc[row_indexer,col_indexer] = value instead\r\n```\r\n\r\nWill you please change line 190 in _fbprophet.py to\r\n```python\r\nX.loc[:,\"ds\"]=X.index\r\n```\r\n\r\nThanks!\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__author__ = [\"Markus L\u00f6ning\", \"Martin Walter\"]\n__all__ = [\"_ProphetAdapter\"]\n\nimport os\n\nimport pandas as pd\n\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin\nfrom sktime.forecasting.base._sktime import _SktimeForecaster\nfrom contextlib import contextmanager\n\n\nclass _ProphetAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):\n \"\"\"Base class for interfacing fbprophet and neuralprophet\"\"\"\n\n def fit(self, y, X=None, fh=None, **fit_params):\n \"\"\"Fit to training data.\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._instantiate_model()\n self._check_changepoints()\n self._set_y_X(y, X, enforce_index_type=pd.DatetimeIndex)\n self._set_fh(fh)\n\n # We have to bring the data into the required format for fbprophet:\n df = pd.DataFrame({\"y\": y, \"ds\": y.index})\n\n # Add seasonality\n if self.add_seasonality:\n self._forecaster.add_seasonality(**self.add_seasonality)\n\n # Add country holidays\n if self.add_country_holidays:\n self._forecaster.add_country_holidays(**self.add_country_holidays)\n\n # Add regressor (multivariate)\n if X is not None:\n df, X = _merge_X(df, X)\n for col in X.columns:\n self._forecaster.add_regressor(col)\n\n if self.verbose:\n self._forecaster.fit(df=df, **fit_params)\n else:\n with _suppress_stdout_stderr():\n self._forecaster.fit(df=df, **fit_params)\n\n self._is_fitted = True\n return self\n\n def predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):\n \"\"\"Predict\n\n Parameters\n ----------\n fh : array-like\n The forecasters horizon with the steps ahead to to predict.\n Default is\n one-step ahead forecast, i.e. np.array([1]).\n X : pd.DataFrame, optional\n Exogenous data, by default None\n return_pred_int : bool, optional\n Returns a pd.DataFrame with confidence intervalls, by default False\n alpha : float, optional\n Alpha level for confidence intervalls, by default DEFAULT_ALPHA\n\n Returns\n -------\n y_pred : pandas.Series\n Returns series of predicted values.\n\n Raises\n ------\n Exception\n Error when merging data\n \"\"\"\n self.check_is_fitted()\n self._set_fh(fh)\n self._update_X(X, enforce_index_type=pd.DatetimeIndex)\n\n fh = self.fh.to_absolute(cutoff=self.cutoff).to_pandas()\n if not isinstance(fh, pd.DatetimeIndex):\n raise ValueError(\"absolute `fh` must be represented as a pd.DatetimeIndex\")\n df = pd.DataFrame({\"ds\": fh}, index=fh)\n\n # Merge X with df (of created future DatetimeIndex values)\n if X is not None:\n df, X = _merge_X(df, X)\n\n # don't compute confidence intervals if not asked for\n with self._return_pred_int(return_pred_int):\n out = self._forecaster.predict(df)\n\n out.set_index(\"ds\", inplace=True)\n y_pred = out.loc[:, \"yhat\"]\n\n if return_pred_int:\n pred_int = out.loc[:, [\"yhat_lower\", \"yhat_upper\"]]\n pred_int.columns = pred_int.columns.str.strip(\"yhat_\")\n return y_pred, pred_int\n else:\n return y_pred\n\n def get_fitted_params(self):\n \"\"\"Get fitted parameters\n\n Returns\n -------\n fitted_params : dict\n\n References\n ----------\n https://facebook.github.io/prophet/docs/additional_topics.html\n \"\"\"\n self.check_is_fitted()\n fitted_params = {}\n for name in [\"k\", \"m\", \"sigma_obs\"]:\n fitted_params[name] = self._forecaster.params[name][0][0]\n for name in [\"delta\", \"beta\"]:\n fitted_params[name] = self._forecaster.params[name][0]\n return fitted_params\n\n def _check_changepoints(self):\n \"\"\"Checking arguments for changepoints and assign related arguments\n\n Returns\n -------\n self\n \"\"\"\n if self.changepoints is not None:\n self.changepoints = pd.Series(pd.to_datetime(self.changepoints), name=\"ds\")\n self.n_changepoints = len(self.changepoints)\n self.specified_changepoints = True\n else:\n self.specified_changepoints = False\n return self\n\n @contextmanager\n def _return_pred_int(self, return_pred_int):\n if not return_pred_int:\n # setting uncertainty samples to zero avoids computing pred ints\n self._forecaster.uncertainty_samples = 0\n try:\n yield\n finally:\n if not return_pred_int:\n self._forecaster.uncertainty_samples = self.uncertainty_samples\n\n\ndef _merge_X(df, X):\n \"\"\"Merge X and df on the DatetimeIndex\n\n Parameters\n ----------\n fh : sktime.ForecastingHorizon\n X : pd.DataFrame\n Exog data\n df : pd.DataFrame\n Contains a DatetimeIndex column \"ds\"\n\n Returns\n -------\n pd.DataFrame\n DataFrame with containing X and df (with a DatetimeIndex column \"ds\")\n\n Raises\n ------\n TypeError\n Error if merging was not possible\n \"\"\"\n # Merging on the index is unreliable, possibly due to loss of freq information in fh\n X.columns = X.columns.astype(str)\n if \"ds\" in X.columns:\n raise ValueError(\"Column name 'ds' is reserved in fbprophet\")\n X[\"ds\"] = X.index\n # df = df.merge(X, how=\"inner\", on=\"ds\", copy=False)\n df = df.merge(X, how=\"inner\", on=\"ds\")\n return df, X.drop(columns=\"ds\")\n\n\nclass _suppress_stdout_stderr(object):\n \"\"\"\n A context manager for doing a \"deep suppression\" of stdout and stderr in\n Python, i.e. will suppress all print, even if the print originates in a\n compiled C/Fortran sub-function.\n This will not suppress raised exceptions, since exceptions are printed\n to stderr just before a script exits, and after the context manager has\n exited (at least, I think that is why it lets exceptions through).\n\n References\n ----------\n https://github.com/facebook/prophet/issues/223\n \"\"\"\n\n def __init__(self):\n # Open a pair of null files\n self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]\n # Save the actual stdout (1) and stderr (2) file descriptors.\n self.save_fds = [os.dup(1), os.dup(2)]\n\n def __enter__(self):\n # Assign the null pointers to stdout and stderr.\n os.dup2(self.null_fds[0], 1)\n os.dup2(self.null_fds[1], 2)\n\n def __exit__(self, *_):\n # Re-assign the real stdout/stderr back to (1) and (2)\n os.dup2(self.save_fds[0], 1)\n os.dup2(self.save_fds[1], 2)\n # Close the null files\n for fd in self.null_fds + self.save_fds:\n os.close(fd)\n", "path": "sktime/forecasting/base/adapters/_fbprophet.py"}]} | 3,077 | 182 |
gh_patches_debug_9640 | rasdani/github-patches | git_diff | chainer__chainer-7760 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor utility link in optimizer_hooks unit tests
`chainer_tests/optimizer_hooks_tests` uses similar dummy links, which can be refactored to reduce repetition.
</issue>
<code>
[start of chainer/optimizer_hooks/gradient_hard_clipping.py]
1 import chainer
2 from chainer import backend
3
4
5 class GradientHardClipping(object):
6
7 """Optimizer/UpdateRule hook function for gradient clipping.
8
9 This hook function clips all gradient arrays to be within a lower and upper
10 bound.
11
12 Args:
13 lower_bound (float): The lower bound of the gradient value.
14 upper_bound (float): The upper bound of the gradient value.
15
16 Attributes:
17 ~optimizer_hooks.GradientHardClipping.lower_bound (float): The
18 lower bound of the gradient value.
19 ~optimizer_hooks.GradientHardClipping.upper_bound (float): The
20 upper bound of the gradient value.
21 ~optimizer_hooks.GradientHardClipping.timing (string): Specifies
22 when this hook should be called by the
23 Optimizer/UpdateRule. Valid values are 'pre'
24 (before any updates) and 'post'
25 (after any updates).
26 ~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \
27 Specifies if this hook is called for each parameter
28 (``True``) or only once (``False``) by an optimizer to
29 which this hook is registered. This function does
30 not expect users to switch the value from default one,
31 which is `True`.
32
33 .. versionadded:: 4.0.0
34 The *timing* parameter.
35
36 """
37 name = 'GradientHardClipping'
38 call_for_each_param = True
39 timing = 'pre'
40
41 def __init__(self, lower_bound, upper_bound):
42 self.lower_bound = lower_bound
43 self.upper_bound = upper_bound
44
45 def __call__(self, rule, param):
46 grad = param.grad
47 if grad is None:
48 return
49 with chainer.using_device(param.device):
50 xp = param.device.xp
51 # TODO(kshitij12345): Fix when chainerx.clip
52 # supports kwarg `out`.
53 if xp == backend.chainerx \
54 or isinstance(param.grad, backend.intel64.mdarray):
55 grad[:] = grad.clip(self.lower_bound, self.upper_bound)
56 else:
57 # Save on new object allocation when using numpy and cupy
58 # using kwarg `out`
59 xp.clip(grad, self.lower_bound, self.upper_bound, out=grad)
60
[end of chainer/optimizer_hooks/gradient_hard_clipping.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/optimizer_hooks/gradient_hard_clipping.py b/chainer/optimizer_hooks/gradient_hard_clipping.py
--- a/chainer/optimizer_hooks/gradient_hard_clipping.py
+++ b/chainer/optimizer_hooks/gradient_hard_clipping.py
@@ -52,7 +52,7 @@
# supports kwarg `out`.
if xp == backend.chainerx \
or isinstance(param.grad, backend.intel64.mdarray):
- grad[:] = grad.clip(self.lower_bound, self.upper_bound)
+ grad[...] = grad.clip(self.lower_bound, self.upper_bound)
else:
# Save on new object allocation when using numpy and cupy
# using kwarg `out`
| {"golden_diff": "diff --git a/chainer/optimizer_hooks/gradient_hard_clipping.py b/chainer/optimizer_hooks/gradient_hard_clipping.py\n--- a/chainer/optimizer_hooks/gradient_hard_clipping.py\n+++ b/chainer/optimizer_hooks/gradient_hard_clipping.py\n@@ -52,7 +52,7 @@\n # supports kwarg `out`.\n if xp == backend.chainerx \\\n or isinstance(param.grad, backend.intel64.mdarray):\n- grad[:] = grad.clip(self.lower_bound, self.upper_bound)\n+ grad[...] = grad.clip(self.lower_bound, self.upper_bound)\n else:\n # Save on new object allocation when using numpy and cupy\n # using kwarg `out`\n", "issue": "Refactor utility link in optimizer_hooks unit tests\n`chainer_tests/optimizer_hooks_tests` uses similar dummy links, which can be refactored to reduce repetition.\n", "before_files": [{"content": "import chainer\nfrom chainer import backend\n\n\nclass GradientHardClipping(object):\n\n \"\"\"Optimizer/UpdateRule hook function for gradient clipping.\n\n This hook function clips all gradient arrays to be within a lower and upper\n bound.\n\n Args:\n lower_bound (float): The lower bound of the gradient value.\n upper_bound (float): The upper bound of the gradient value.\n\n Attributes:\n ~optimizer_hooks.GradientHardClipping.lower_bound (float): The\n lower bound of the gradient value.\n ~optimizer_hooks.GradientHardClipping.upper_bound (float): The\n upper bound of the gradient value.\n ~optimizer_hooks.GradientHardClipping.timing (string): Specifies\n when this hook should be called by the\n Optimizer/UpdateRule. Valid values are 'pre'\n (before any updates) and 'post'\n (after any updates).\n ~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \\\n Specifies if this hook is called for each parameter\n (``True``) or only once (``False``) by an optimizer to\n which this hook is registered. This function does\n not expect users to switch the value from default one,\n which is `True`.\n\n .. versionadded:: 4.0.0\n The *timing* parameter.\n\n \"\"\"\n name = 'GradientHardClipping'\n call_for_each_param = True\n timing = 'pre'\n\n def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n def __call__(self, rule, param):\n grad = param.grad\n if grad is None:\n return\n with chainer.using_device(param.device):\n xp = param.device.xp\n # TODO(kshitij12345): Fix when chainerx.clip\n # supports kwarg `out`.\n if xp == backend.chainerx \\\n or isinstance(param.grad, backend.intel64.mdarray):\n grad[:] = grad.clip(self.lower_bound, self.upper_bound)\n else:\n # Save on new object allocation when using numpy and cupy\n # using kwarg `out`\n xp.clip(grad, self.lower_bound, self.upper_bound, out=grad)\n", "path": "chainer/optimizer_hooks/gradient_hard_clipping.py"}]} | 1,181 | 155 |
gh_patches_debug_7042 | rasdani/github-patches | git_diff | pytorch__vision-7449 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dataset MovingMNIST: `split=None` returns test dataset
### 🐛 Describe the bug
I've found a bug in the code for torchvision's MovingMNIST dataset, which causes only the test dataset to be returned when split=None. According to the documentation, when split is set to None, the entire dataset should be returned. However, this is not currently happening.
https://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L13-L19
I've tested this with the following code:
```python
from torchvision import datasets
import torch
dataset = datasets.MovingMNIST(root="data", download=True)
dataset[0].size() # returns torch.Size([10, 1, 64, 64]), but I expected torch.Size([20, 1, 64, 64])
```
I believe the bug is caused by lines 58-62 in the code, which handle None and test splits together:
https://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L42-L62
To fix this, I propose the following two changes:
- Separate the handling of None and test splits in the code.
- Only process lines 46-50 when split is not None.
Reference issue: #6981
I'm happy to help on this issue, please assign to me on this one.
### Versions
PyTorch version: 2.0.0
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 13.2.1 (arm64)
GCC version: Could not collect
Clang version: 14.0.0 (clang-1400.0.29.202)
CMake version: Could not collect
Libc version: N/A
Python version: 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] (64-bit runtime)
Python platform: macOS-13.2.1-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Apple M1 Pro
Versions of relevant libraries:
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.24.2
[pip3] torch==2.0.0
[pip3] torch-tb-profiler==0.4.1
[pip3] torchvision==0.15.1
[conda] numpy 1.24.2 py310h3d2048e_0 conda-forge
[conda] pytorch 2.0.0 py3.10_0 pytorch
[conda] torch 2.0.0 pypi_0 pypi
[conda] torch-tb-profiler 0.4.1 pypi_0 pypi
[conda] torchvision 0.15.1 pypi_0 pypi
cc @pmeier
Dataset MovingMNIST: `split=None` returns test dataset
### 🐛 Describe the bug
I've found a bug in the code for torchvision's MovingMNIST dataset, which causes only the test dataset to be returned when split=None. According to the documentation, when split is set to None, the entire dataset should be returned. However, this is not currently happening.
https://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L13-L19
I've tested this with the following code:
```python
from torchvision import datasets
import torch
dataset = datasets.MovingMNIST(root="data", download=True)
dataset[0].size() # returns torch.Size([10, 1, 64, 64]), but I expected torch.Size([20, 1, 64, 64])
```
I believe the bug is caused by lines 58-62 in the code, which handle None and test splits together:
https://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L42-L62
To fix this, I propose the following two changes:
- Separate the handling of None and test splits in the code.
- Only process lines 46-50 when split is not None.
Reference issue: #6981
I'm happy to help on this issue, please assign to me on this one.
### Versions
PyTorch version: 2.0.0
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 13.2.1 (arm64)
GCC version: Could not collect
Clang version: 14.0.0 (clang-1400.0.29.202)
CMake version: Could not collect
Libc version: N/A
Python version: 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] (64-bit runtime)
Python platform: macOS-13.2.1-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Apple M1 Pro
Versions of relevant libraries:
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.24.2
[pip3] torch==2.0.0
[pip3] torch-tb-profiler==0.4.1
[pip3] torchvision==0.15.1
[conda] numpy 1.24.2 py310h3d2048e_0 conda-forge
[conda] pytorch 2.0.0 py3.10_0 pytorch
[conda] torch 2.0.0 pypi_0 pypi
[conda] torch-tb-profiler 0.4.1 pypi_0 pypi
[conda] torchvision 0.15.1 pypi_0 pypi
cc @pmeier
</issue>
<code>
[start of torchvision/datasets/moving_mnist.py]
1 import os.path
2 from typing import Callable, Optional
3
4 import numpy as np
5 import torch
6 from torchvision.datasets.utils import download_url, verify_str_arg
7 from torchvision.datasets.vision import VisionDataset
8
9
10 class MovingMNIST(VisionDataset):
11 """`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.
12
13 Args:
14 root (string): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.
15 split (string, optional): The dataset split, supports ``None`` (default), ``"train"`` and ``"test"``.
16 If ``split=None``, the full data is returned.
17 split_ratio (int, optional): The split ratio of number of frames. If ``split="train"``, the first split
18 frames ``data[:, :split_ratio]`` is returned. If ``split="test"``, the last split frames ``data[:, split_ratio:]``
19 is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.
20 transform (callable, optional): A function/transform that takes in an torch Tensor
21 and returns a transformed version. E.g, ``transforms.RandomCrop``
22 download (bool, optional): If true, downloads the dataset from the internet and
23 puts it in root directory. If dataset is already downloaded, it is not
24 downloaded again.
25 """
26
27 _URL = "http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy"
28
29 def __init__(
30 self,
31 root: str,
32 split: Optional[str] = None,
33 split_ratio: int = 10,
34 download: bool = False,
35 transform: Optional[Callable] = None,
36 ) -> None:
37 super().__init__(root, transform=transform)
38
39 self._base_folder = os.path.join(self.root, self.__class__.__name__)
40 self._filename = self._URL.split("/")[-1]
41
42 if split is not None:
43 verify_str_arg(split, "split", ("train", "test"))
44 self.split = split
45
46 if not isinstance(split_ratio, int):
47 raise TypeError(f"`split_ratio` should be an integer, but got {type(split_ratio)}")
48 elif not (1 <= split_ratio <= 19):
49 raise ValueError(f"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.")
50 self.split_ratio = split_ratio
51
52 if download:
53 self.download()
54
55 if not self._check_exists():
56 raise RuntimeError("Dataset not found. You can use download=True to download it.")
57
58 data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
59 if self.split == "train":
60 data = data[: self.split_ratio]
61 else:
62 data = data[self.split_ratio :]
63 self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
64
65 def __getitem__(self, idx: int) -> torch.Tensor:
66 """
67 Args:
68 index (int): Index
69 Returns:
70 torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.
71 """
72 data = self.data[idx]
73 if self.transform is not None:
74 data = self.transform(data)
75
76 return data
77
78 def __len__(self) -> int:
79 return len(self.data)
80
81 def _check_exists(self) -> bool:
82 return os.path.exists(os.path.join(self._base_folder, self._filename))
83
84 def download(self) -> None:
85 if self._check_exists():
86 return
87
88 download_url(
89 url=self._URL,
90 root=self._base_folder,
91 filename=self._filename,
92 md5="be083ec986bfe91a449d63653c411eb2",
93 )
94
[end of torchvision/datasets/moving_mnist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/datasets/moving_mnist.py b/torchvision/datasets/moving_mnist.py
--- a/torchvision/datasets/moving_mnist.py
+++ b/torchvision/datasets/moving_mnist.py
@@ -58,7 +58,7 @@
data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
if self.split == "train":
data = data[: self.split_ratio]
- else:
+ elif self.split == "test":
data = data[self.split_ratio :]
self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
| {"golden_diff": "diff --git a/torchvision/datasets/moving_mnist.py b/torchvision/datasets/moving_mnist.py\n--- a/torchvision/datasets/moving_mnist.py\n+++ b/torchvision/datasets/moving_mnist.py\n@@ -58,7 +58,7 @@\n data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))\n if self.split == \"train\":\n data = data[: self.split_ratio]\n- else:\n+ elif self.split == \"test\":\n data = data[self.split_ratio :]\n self.data = data.transpose(0, 1).unsqueeze(2).contiguous()\n", "issue": "Dataset MovingMNIST: `split=None` returns test dataset\n### \ud83d\udc1b Describe the bug\n\nI've found a bug in the code for torchvision's MovingMNIST dataset, which causes only the test dataset to be returned when split=None. According to the documentation, when split is set to None, the entire dataset should be returned. However, this is not currently happening.\r\nhttps://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L13-L19\r\n\r\nI've tested this with the following code:\r\n```python\r\nfrom torchvision import datasets\r\nimport torch\r\n\r\ndataset = datasets.MovingMNIST(root=\"data\", download=True)\r\ndataset[0].size() # returns torch.Size([10, 1, 64, 64]), but I expected torch.Size([20, 1, 64, 64])\r\n```\r\n\r\nI believe the bug is caused by lines 58-62 in the code, which handle None and test splits together:\r\nhttps://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L42-L62\r\n\r\nTo fix this, I propose the following two changes:\r\n- Separate the handling of None and test splits in the code.\r\n- Only process lines 46-50 when split is not None.\r\n\r\nReference issue: #6981 \r\n\r\nI'm happy to help on this issue, please assign to me on this one.\n\n### Versions\n\nPyTorch version: 2.0.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: None\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: macOS 13.2.1 (arm64)\r\nGCC version: Could not collect\r\nClang version: 14.0.0 (clang-1400.0.29.202)\r\nCMake version: Could not collect\r\nLibc version: N/A\r\n\r\nPython version: 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] (64-bit runtime)\r\nPython platform: macOS-13.2.1-arm64-arm-64bit\r\nIs CUDA available: False\r\nCUDA runtime version: No CUDA\r\nCUDA_MODULE_LOADING set to: N/A\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nCPU:\r\nApple M1 Pro\r\n\r\nVersions of relevant libraries:\r\n[pip3] mypy-extensions==1.0.0\r\n[pip3] numpy==1.24.2\r\n[pip3] torch==2.0.0\r\n[pip3] torch-tb-profiler==0.4.1\r\n[pip3] torchvision==0.15.1\r\n[conda] numpy 1.24.2 py310h3d2048e_0 conda-forge\r\n[conda] pytorch 2.0.0 py3.10_0 pytorch\r\n[conda] torch 2.0.0 pypi_0 pypi\r\n[conda] torch-tb-profiler 0.4.1 pypi_0 pypi\r\n[conda] torchvision 0.15.1 pypi_0 pypi\n\ncc @pmeier\nDataset MovingMNIST: `split=None` returns test dataset\n### \ud83d\udc1b Describe the bug\n\nI've found a bug in the code for torchvision's MovingMNIST dataset, which causes only the test dataset to be returned when split=None. According to the documentation, when split is set to None, the entire dataset should be returned. However, this is not currently happening.\r\nhttps://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L13-L19\r\n\r\nI've tested this with the following code:\r\n```python\r\nfrom torchvision import datasets\r\nimport torch\r\n\r\ndataset = datasets.MovingMNIST(root=\"data\", download=True)\r\ndataset[0].size() # returns torch.Size([10, 1, 64, 64]), but I expected torch.Size([20, 1, 64, 64])\r\n```\r\n\r\nI believe the bug is caused by lines 58-62 in the code, which handle None and test splits together:\r\nhttps://github.com/pytorch/vision/blob/b403bfc771e0caf31efd06d43860b09004f4ac61/torchvision/datasets/moving_mnist.py#L42-L62\r\n\r\nTo fix this, I propose the following two changes:\r\n- Separate the handling of None and test splits in the code.\r\n- Only process lines 46-50 when split is not None.\r\n\r\nReference issue: #6981 \r\n\r\nI'm happy to help on this issue, please assign to me on this one.\n\n### Versions\n\nPyTorch version: 2.0.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: None\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: macOS 13.2.1 (arm64)\r\nGCC version: Could not collect\r\nClang version: 14.0.0 (clang-1400.0.29.202)\r\nCMake version: Could not collect\r\nLibc version: N/A\r\n\r\nPython version: 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] (64-bit runtime)\r\nPython platform: macOS-13.2.1-arm64-arm-64bit\r\nIs CUDA available: False\r\nCUDA runtime version: No CUDA\r\nCUDA_MODULE_LOADING set to: N/A\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nCPU:\r\nApple M1 Pro\r\n\r\nVersions of relevant libraries:\r\n[pip3] mypy-extensions==1.0.0\r\n[pip3] numpy==1.24.2\r\n[pip3] torch==2.0.0\r\n[pip3] torch-tb-profiler==0.4.1\r\n[pip3] torchvision==0.15.1\r\n[conda] numpy 1.24.2 py310h3d2048e_0 conda-forge\r\n[conda] pytorch 2.0.0 py3.10_0 pytorch\r\n[conda] torch 2.0.0 pypi_0 pypi\r\n[conda] torch-tb-profiler 0.4.1 pypi_0 pypi\r\n[conda] torchvision 0.15.1 pypi_0 pypi\n\ncc @pmeier\n", "before_files": [{"content": "import os.path\nfrom typing import Callable, Optional\n\nimport numpy as np\nimport torch\nfrom torchvision.datasets.utils import download_url, verify_str_arg\nfrom torchvision.datasets.vision import VisionDataset\n\n\nclass MovingMNIST(VisionDataset):\n \"\"\"`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.\n split (string, optional): The dataset split, supports ``None`` (default), ``\"train\"`` and ``\"test\"``.\n If ``split=None``, the full data is returned.\n split_ratio (int, optional): The split ratio of number of frames. If ``split=\"train\"``, the first split\n frames ``data[:, :split_ratio]`` is returned. If ``split=\"test\"``, the last split frames ``data[:, split_ratio:]``\n is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.\n transform (callable, optional): A function/transform that takes in an torch Tensor\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n\n _URL = \"http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy\"\n\n def __init__(\n self,\n root: str,\n split: Optional[str] = None,\n split_ratio: int = 10,\n download: bool = False,\n transform: Optional[Callable] = None,\n ) -> None:\n super().__init__(root, transform=transform)\n\n self._base_folder = os.path.join(self.root, self.__class__.__name__)\n self._filename = self._URL.split(\"/\")[-1]\n\n if split is not None:\n verify_str_arg(split, \"split\", (\"train\", \"test\"))\n self.split = split\n\n if not isinstance(split_ratio, int):\n raise TypeError(f\"`split_ratio` should be an integer, but got {type(split_ratio)}\")\n elif not (1 <= split_ratio <= 19):\n raise ValueError(f\"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.\")\n self.split_ratio = split_ratio\n\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError(\"Dataset not found. You can use download=True to download it.\")\n\n data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))\n if self.split == \"train\":\n data = data[: self.split_ratio]\n else:\n data = data[self.split_ratio :]\n self.data = data.transpose(0, 1).unsqueeze(2).contiguous()\n\n def __getitem__(self, idx: int) -> torch.Tensor:\n \"\"\"\n Args:\n index (int): Index\n Returns:\n torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.\n \"\"\"\n data = self.data[idx]\n if self.transform is not None:\n data = self.transform(data)\n\n return data\n\n def __len__(self) -> int:\n return len(self.data)\n\n def _check_exists(self) -> bool:\n return os.path.exists(os.path.join(self._base_folder, self._filename))\n\n def download(self) -> None:\n if self._check_exists():\n return\n\n download_url(\n url=self._URL,\n root=self._base_folder,\n filename=self._filename,\n md5=\"be083ec986bfe91a449d63653c411eb2\",\n )\n", "path": "torchvision/datasets/moving_mnist.py"}]} | 3,242 | 141 |
gh_patches_debug_41042 | rasdani/github-patches | git_diff | conda__conda-2915 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Regression] Conda create environment fails on lock if root environment is not under user control
This issue is introduced in Conda 4.1.0 (Conda 4.0.8 works fine).
```
$ conda create -n root2 python=2 [123/1811]
Fetching package metadata .......
Solving package specifications .............
Package plan for installation in environment /home/frol/.conda/envs/root2:
The following NEW packages will be INSTALLED:
openssl: 1.0.2h-1 (soft-link)
pip: 8.1.2-py27_0 (soft-link)
python: 2.7.11-0 (soft-link)
readline: 6.2-2 (soft-link)
setuptools: 23.0.0-py27_0 (soft-link)
sqlite: 3.13.0-0 (soft-link)
tk: 8.5.18-0 (soft-link)
wheel: 0.29.0-py27_0 (soft-link)
zlib: 1.2.8-3 (soft-link)
Proceed ([y]/n)?
Linking packages ...
An unexpected error has occurred, please consider sending the
following traceback to the conda GitHub issue tracker at:
https://github.com/conda/conda/issues
Include the output of the command 'conda info' in your report.
Traceback (most recent call last):
File "/usr/local/miniconda/bin/conda", line 6, in <module>
sys.exit(main())
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main.py", line 120, in main
args_func(args, p)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main.py", line 127, in args_func
args.func(args, p)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main_create.py", line 57, in execute
install(args, parser, 'create')
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/install.py", line 407, in install
execute_actions(actions, index, verbose=not args.quiet)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/plan.py", line 566, in execute_actions
inst.execute_instructions(plan, index, verbose)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/instructions.py", line 137, in execute_instructions
cmd(state, arg)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/instructions.py", line 80, in LINK_CMD
link(state['prefix'], dist, lt, index=state['index'], shortcuts=shortcuts)
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/install.py", line 1035, in link
with Locked(prefix), Locked(pkgs_dir):
File "/usr/local/miniconda/lib/python2.7/site-packages/conda/lock.py", line 60, in __enter__
os.makedirs(self.lock_path)
File "/usr/local/miniconda/lib/python2.7/os.py", line 157, in makedirs
mkdir(name, mode)
OSError: [Errno 13] Permission denied: '/usr/local/miniconda/pkgs/.conda_lock-949'
```
`/usr/local/miniconda/` is a system-wide installation of miniconda, so obviously, users cannot create lock files there.
P.S. I have a dream that updating conda software won't break things on every release...
</issue>
<code>
[start of conda/lock.py]
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6
7 """
8 Tools for working with locks
9
10 A lock is just an empty directory. We use directories because this lets us use
11 the race condition-proof os.makedirs.
12
13 For now, there is one global lock for all of conda, because some things happen
14 globally (such as downloading packages).
15
16 We don't raise an error if the lock is named with the current PID
17 """
18 from __future__ import absolute_import, division, print_function
19
20 import logging
21 import os
22 import time
23
24 from .exceptions import LockError
25
26 LOCKFN = '.conda_lock'
27
28
29 stdoutlog = logging.getLogger('stdoutlog')
30
31
32 class Locked(object):
33 """
34 Context manager to handle locks.
35 """
36 def __init__(self, path, retries=10):
37 self.path = path
38 self.end = "-" + str(os.getpid())
39 self.lock_path = os.path.join(self.path, LOCKFN + self.end)
40 self.retries = retries
41
42 def __enter__(self):
43 # Keep the string "LOCKERROR" in this string so that external
44 # programs can look for it.
45 lockstr = ("""\
46 LOCKERROR: It looks like conda is already doing something.
47 The lock %s was found. Wait for it to finish before continuing.
48 If you are sure that conda is not running, remove it and try again.
49 You can also use: $ conda clean --lock\n""")
50 sleeptime = 1
51
52 for _ in range(self.retries):
53 if os.path.isdir(self.lock_path):
54 stdoutlog.info(lockstr % self.lock_path)
55 stdoutlog.info("Sleeping for %s seconds\n" % sleeptime)
56
57 time.sleep(sleeptime)
58 sleeptime *= 2
59 else:
60 os.makedirs(self.lock_path)
61 return self
62
63 stdoutlog.error("Exceeded max retries, giving up")
64 raise LockError(lockstr % self.lock_path)
65
66 def __exit__(self, exc_type, exc_value, traceback):
67 try:
68 os.rmdir(self.lock_path)
69 os.rmdir(self.path)
70 except OSError:
71 pass
72
[end of conda/lock.py]
[start of conda/exceptions.py]
1 from __future__ import absolute_import, division, print_function
2
3 class CondaException(Exception):
4 pass
5
6
7 class InvalidInstruction(CondaException):
8 def __init__(self, instruction, *args, **kwargs):
9 msg = "No handler for instruction: %r" % instruction
10 super(InvalidInstruction, self).__init__(msg, *args, **kwargs)
11
12 class LockError(RuntimeError, CondaException):
13 pass
14
[end of conda/exceptions.py]
[start of conda/egg_info.py]
1 """
2 Functions related to core conda functionality that relates to manually
3 installed Python packages, e.g. using "python setup.py install", or "pip".
4 """
5 from __future__ import absolute_import, division, print_function
6
7 from io import open
8 import os
9 import re
10 import sys
11 from os.path import isdir, isfile, join
12
13 from .compat import itervalues
14 from .install import linked_data
15 from .misc import rel_path
16
17
18 def get_site_packages_dir(installed_pkgs):
19 for info in itervalues(installed_pkgs):
20 if info['name'] == 'python':
21 if sys.platform == 'win32':
22 stdlib_dir = 'Lib'
23 else:
24 py_ver = info['version'][:3]
25 stdlib_dir = 'lib/python%s' % py_ver
26 return join(stdlib_dir, 'site-packages')
27 return None
28
29
30 def get_egg_info_files(sp_dir):
31 for fn in os.listdir(sp_dir):
32 if not fn.endswith(('.egg', '.egg-info', '.dist-info')):
33 continue
34 path = join(sp_dir, fn)
35 if isfile(path):
36 yield path
37 elif isdir(path):
38 for path2 in [join(path, 'PKG-INFO'),
39 join(path, 'EGG-INFO', 'PKG-INFO'),
40 join(path, 'METADATA')]:
41 if isfile(path2):
42 yield path2
43
44
45 pat = re.compile(r'(\w+):\s*(\S+)', re.I)
46 def parse_egg_info(path):
47 """
48 Parse an .egg-info file and return its canonical distribution name
49 """
50 info = {}
51 for line in open(path, encoding='utf-8'):
52 line = line.strip()
53 m = pat.match(line)
54 if m:
55 key = m.group(1).lower()
56 info[key] = m.group(2)
57 try:
58 return '%(name)s-%(version)s-<pip>' % info
59 except KeyError:
60 pass
61 return None
62
63
64 def get_egg_info(prefix, all_pkgs=False):
65 """
66 Return a set of canonical names of all Python packages (in `prefix`),
67 by inspecting the .egg-info files inside site-packages.
68 By default, only untracked (not conda installed) .egg-info files are
69 considered. Setting `all_pkgs` to True changes this.
70 """
71 installed_pkgs = linked_data(prefix)
72 sp_dir = get_site_packages_dir(installed_pkgs)
73 if sp_dir is None:
74 return set()
75
76 conda_files = set()
77 for info in itervalues(installed_pkgs):
78 conda_files.update(info.get('files', []))
79
80 res = set()
81 for path in get_egg_info_files(join(prefix, sp_dir)):
82 f = rel_path(prefix, path)
83 if all_pkgs or f not in conda_files:
84 try:
85 dist = parse_egg_info(path)
86 except UnicodeDecodeError:
87 dist = None
88 if dist:
89 res.add(dist)
90 return res
91
92
93 if __name__ == '__main__':
94 from pprint import pprint
95 pprint(get_egg_info(sys.prefix))
96
[end of conda/egg_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/egg_info.py b/conda/egg_info.py
--- a/conda/egg_info.py
+++ b/conda/egg_info.py
@@ -15,6 +15,7 @@
from .misc import rel_path
+
def get_site_packages_dir(installed_pkgs):
for info in itervalues(installed_pkgs):
if info['name'] == 'python':
diff --git a/conda/exceptions.py b/conda/exceptions.py
--- a/conda/exceptions.py
+++ b/conda/exceptions.py
@@ -8,6 +8,3 @@
def __init__(self, instruction, *args, **kwargs):
msg = "No handler for instruction: %r" % instruction
super(InvalidInstruction, self).__init__(msg, *args, **kwargs)
-
-class LockError(RuntimeError, CondaException):
- pass
diff --git a/conda/lock.py b/conda/lock.py
--- a/conda/lock.py
+++ b/conda/lock.py
@@ -17,11 +17,11 @@
"""
from __future__ import absolute_import, division, print_function
-import logging
import os
-import time
-
-from .exceptions import LockError
+import logging
+from os.path import join
+import glob
+from time import sleep
LOCKFN = '.conda_lock'
@@ -33,13 +33,15 @@
"""
Context manager to handle locks.
"""
- def __init__(self, path, retries=10):
+ def __init__(self, path):
self.path = path
self.end = "-" + str(os.getpid())
- self.lock_path = os.path.join(self.path, LOCKFN + self.end)
- self.retries = retries
+ self.lock_path = join(self.path, LOCKFN + self.end)
+ self.pattern = join(self.path, LOCKFN + '-*')
+ self.remove = True
def __enter__(self):
+ retries = 10
# Keep the string "LOCKERROR" in this string so that external
# programs can look for it.
lockstr = ("""\
@@ -48,24 +50,33 @@
If you are sure that conda is not running, remove it and try again.
You can also use: $ conda clean --lock\n""")
sleeptime = 1
-
- for _ in range(self.retries):
- if os.path.isdir(self.lock_path):
- stdoutlog.info(lockstr % self.lock_path)
+ files = None
+ while retries:
+ files = glob.glob(self.pattern)
+ if files and not files[0].endswith(self.end):
+ stdoutlog.info(lockstr % str(files))
stdoutlog.info("Sleeping for %s seconds\n" % sleeptime)
-
- time.sleep(sleeptime)
+ sleep(sleeptime)
sleeptime *= 2
+ retries -= 1
else:
- os.makedirs(self.lock_path)
- return self
+ break
+ else:
+ stdoutlog.error("Exceeded max retries, giving up")
+ raise RuntimeError(lockstr % str(files))
- stdoutlog.error("Exceeded max retries, giving up")
- raise LockError(lockstr % self.lock_path)
+ if not files:
+ try:
+ os.makedirs(self.lock_path)
+ except OSError:
+ pass
+ else: # PID lock already here --- someone else will remove it.
+ self.remove = False
def __exit__(self, exc_type, exc_value, traceback):
- try:
- os.rmdir(self.lock_path)
- os.rmdir(self.path)
- except OSError:
- pass
+ if self.remove:
+ for path in self.lock_path, self.path:
+ try:
+ os.rmdir(path)
+ except OSError:
+ pass
| {"golden_diff": "diff --git a/conda/egg_info.py b/conda/egg_info.py\n--- a/conda/egg_info.py\n+++ b/conda/egg_info.py\n@@ -15,6 +15,7 @@\n from .misc import rel_path\n \n \n+\n def get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\ndiff --git a/conda/exceptions.py b/conda/exceptions.py\n--- a/conda/exceptions.py\n+++ b/conda/exceptions.py\n@@ -8,6 +8,3 @@\n def __init__(self, instruction, *args, **kwargs):\n msg = \"No handler for instruction: %r\" % instruction\n super(InvalidInstruction, self).__init__(msg, *args, **kwargs)\n-\n-class LockError(RuntimeError, CondaException):\n- pass\ndiff --git a/conda/lock.py b/conda/lock.py\n--- a/conda/lock.py\n+++ b/conda/lock.py\n@@ -17,11 +17,11 @@\n \"\"\"\n from __future__ import absolute_import, division, print_function\n \n-import logging\n import os\n-import time\n-\n-from .exceptions import LockError\n+import logging\n+from os.path import join\n+import glob\n+from time import sleep\n \n LOCKFN = '.conda_lock'\n \n@@ -33,13 +33,15 @@\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n- def __init__(self, path, retries=10):\n+ def __init__(self, path):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n- self.lock_path = os.path.join(self.path, LOCKFN + self.end)\n- self.retries = retries\n+ self.lock_path = join(self.path, LOCKFN + self.end)\n+ self.pattern = join(self.path, LOCKFN + '-*')\n+ self.remove = True\n \n def __enter__(self):\n+ retries = 10\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n lockstr = (\"\"\"\\\n@@ -48,24 +50,33 @@\n If you are sure that conda is not running, remove it and try again.\n You can also use: $ conda clean --lock\\n\"\"\")\n sleeptime = 1\n-\n- for _ in range(self.retries):\n- if os.path.isdir(self.lock_path):\n- stdoutlog.info(lockstr % self.lock_path)\n+ files = None\n+ while retries:\n+ files = glob.glob(self.pattern)\n+ if files and not files[0].endswith(self.end):\n+ stdoutlog.info(lockstr % str(files))\n stdoutlog.info(\"Sleeping for %s seconds\\n\" % sleeptime)\n-\n- time.sleep(sleeptime)\n+ sleep(sleeptime)\n sleeptime *= 2\n+ retries -= 1\n else:\n- os.makedirs(self.lock_path)\n- return self\n+ break\n+ else:\n+ stdoutlog.error(\"Exceeded max retries, giving up\")\n+ raise RuntimeError(lockstr % str(files))\n \n- stdoutlog.error(\"Exceeded max retries, giving up\")\n- raise LockError(lockstr % self.lock_path)\n+ if not files:\n+ try:\n+ os.makedirs(self.lock_path)\n+ except OSError:\n+ pass\n+ else: # PID lock already here --- someone else will remove it.\n+ self.remove = False\n \n def __exit__(self, exc_type, exc_value, traceback):\n- try:\n- os.rmdir(self.lock_path)\n- os.rmdir(self.path)\n- except OSError:\n- pass\n+ if self.remove:\n+ for path in self.lock_path, self.path:\n+ try:\n+ os.rmdir(path)\n+ except OSError:\n+ pass\n", "issue": "[Regression] Conda create environment fails on lock if root environment is not under user control\nThis issue is introduced in Conda 4.1.0 (Conda 4.0.8 works fine).\n\n```\n$ conda create -n root2 python=2 [123/1811]\nFetching package metadata .......\nSolving package specifications .............\n\nPackage plan for installation in environment /home/frol/.conda/envs/root2:\n\nThe following NEW packages will be INSTALLED:\n\n openssl: 1.0.2h-1 (soft-link)\n pip: 8.1.2-py27_0 (soft-link)\n python: 2.7.11-0 (soft-link)\n readline: 6.2-2 (soft-link)\n setuptools: 23.0.0-py27_0 (soft-link)\n sqlite: 3.13.0-0 (soft-link)\n tk: 8.5.18-0 (soft-link)\n wheel: 0.29.0-py27_0 (soft-link)\n zlib: 1.2.8-3 (soft-link)\n\nProceed ([y]/n)?\n\nLinking packages ...\nAn unexpected error has occurred, please consider sending the\nfollowing traceback to the conda GitHub issue tracker at:\n\n https://github.com/conda/conda/issues\n\nInclude the output of the command 'conda info' in your report.\n\n\nTraceback (most recent call last):\n File \"/usr/local/miniconda/bin/conda\", line 6, in <module>\n sys.exit(main())\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main.py\", line 120, in main\n args_func(args, p)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main.py\", line 127, in args_func\n args.func(args, p)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/main_create.py\", line 57, in execute\n install(args, parser, 'create')\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/cli/install.py\", line 407, in install\n execute_actions(actions, index, verbose=not args.quiet)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/plan.py\", line 566, in execute_actions\n inst.execute_instructions(plan, index, verbose)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/instructions.py\", line 137, in execute_instructions\n cmd(state, arg)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/instructions.py\", line 80, in LINK_CMD\n link(state['prefix'], dist, lt, index=state['index'], shortcuts=shortcuts)\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/install.py\", line 1035, in link\n with Locked(prefix), Locked(pkgs_dir):\n File \"/usr/local/miniconda/lib/python2.7/site-packages/conda/lock.py\", line 60, in __enter__\n os.makedirs(self.lock_path)\n File \"/usr/local/miniconda/lib/python2.7/os.py\", line 157, in makedirs\n mkdir(name, mode)\nOSError: [Errno 13] Permission denied: '/usr/local/miniconda/pkgs/.conda_lock-949'\n```\n\n`/usr/local/miniconda/` is a system-wide installation of miniconda, so obviously, users cannot create lock files there.\n\nP.S. I have a dream that updating conda software won't break things on every release...\n\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nTools for working with locks\n\nA lock is just an empty directory. We use directories because this lets us use\nthe race condition-proof os.makedirs.\n\nFor now, there is one global lock for all of conda, because some things happen\nglobally (such as downloading packages).\n\nWe don't raise an error if the lock is named with the current PID\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport os\nimport time\n\nfrom .exceptions import LockError\n\nLOCKFN = '.conda_lock'\n\n\nstdoutlog = logging.getLogger('stdoutlog')\n\n\nclass Locked(object):\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n def __init__(self, path, retries=10):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n self.lock_path = os.path.join(self.path, LOCKFN + self.end)\n self.retries = retries\n\n def __enter__(self):\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n lockstr = (\"\"\"\\\n LOCKERROR: It looks like conda is already doing something.\n The lock %s was found. Wait for it to finish before continuing.\n If you are sure that conda is not running, remove it and try again.\n You can also use: $ conda clean --lock\\n\"\"\")\n sleeptime = 1\n\n for _ in range(self.retries):\n if os.path.isdir(self.lock_path):\n stdoutlog.info(lockstr % self.lock_path)\n stdoutlog.info(\"Sleeping for %s seconds\\n\" % sleeptime)\n\n time.sleep(sleeptime)\n sleeptime *= 2\n else:\n os.makedirs(self.lock_path)\n return self\n\n stdoutlog.error(\"Exceeded max retries, giving up\")\n raise LockError(lockstr % self.lock_path)\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n os.rmdir(self.lock_path)\n os.rmdir(self.path)\n except OSError:\n pass\n", "path": "conda/lock.py"}, {"content": "from __future__ import absolute_import, division, print_function\n\nclass CondaException(Exception):\n pass\n\n\nclass InvalidInstruction(CondaException):\n def __init__(self, instruction, *args, **kwargs):\n msg = \"No handler for instruction: %r\" % instruction\n super(InvalidInstruction, self).__init__(msg, *args, **kwargs)\n\nclass LockError(RuntimeError, CondaException):\n pass\n", "path": "conda/exceptions.py"}, {"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom io import open\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom .compat import itervalues\nfrom .install import linked_data\nfrom .misc import rel_path\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if sys.platform == 'win32':\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(dist)\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}]} | 3,044 | 865 |
gh_patches_debug_1954 | rasdani/github-patches | git_diff | ivy-llc__ivy-19363 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
T
</issue>
<code>
[start of ivy/functional/frontends/jax/devicearray.py]
1 # global
2
3 # local
4 import ivy
5 import ivy.functional.frontends.jax as jax_frontend
6
7
8 class DeviceArray:
9 def __init__(self, array, weak_type=False):
10 self._ivy_array = array if isinstance(array, ivy.Array) else ivy.array(array)
11 self.weak_type = weak_type
12
13 def __repr__(self):
14 main = (
15 str(self.ivy_array.__repr__())
16 .replace("ivy.array", "ivy.frontends.jax.DeviceArray")
17 .replace(")", "")
18 + ", dtype="
19 + str(self.ivy_array.dtype)
20 )
21 if self.weak_type:
22 return main + ", weak_type=True)"
23 return main + ")"
24
25 # Properties #
26 # ---------- #
27
28 @property
29 def ivy_array(self):
30 return self._ivy_array
31
32 @property
33 def dtype(self):
34 return self.ivy_array.dtype
35
36 @property
37 def shape(self):
38 return self.ivy_array.shape
39
40 @property
41 def at(self):
42 return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)
43
44 # Instance Methods #
45 # ---------------- #
46
47 def all(self, *, axis=None, out=None, keepdims=False):
48 return jax_frontend.numpy.all(
49 self._ivy_array, axis=axis, keepdims=keepdims, out=out
50 )
51
52 def argmax(
53 self,
54 /,
55 *,
56 axis=None,
57 out=None,
58 keepdims=False,
59 ):
60 return jax_frontend.numpy.argmax(
61 self,
62 axis=axis,
63 out=out,
64 keepdims=keepdims,
65 )
66
67 def conj(self, /):
68 return jax_frontend.numpy.conj(self._ivy_array)
69
70 def conjugate(self, /):
71 return jax_frontend.numpy.conjugate(self._ivy_array)
72
73 def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, where=None):
74 return jax_frontend.numpy.mean(
75 self._ivy_array,
76 axis=axis,
77 dtype=dtype,
78 out=out,
79 keepdims=keepdims,
80 where=where,
81 )
82
83 def cumprod(self, axis=None, dtype=None, out=None):
84 return jax_frontend.numpy.cumprod(
85 self,
86 axis=axis,
87 dtype=dtype,
88 out=out,
89 )
90
91 def cumsum(self, axis=None, dtype=None, out=None):
92 return jax_frontend.numpy.cumsum(
93 self,
94 axis=axis,
95 dtype=dtype,
96 out=out,
97 )
98
99 def nonzero(self, *, size=None, fill_value=None):
100 return jax_frontend.numpy.nonzero(
101 self,
102 size=size,
103 fill_value=fill_value,
104 )
105
106 def ravel(self, order="C"):
107 return jax_frontend.numpy.ravel(
108 self,
109 order=order,
110 )
111
112 def sort(self, axis=-1, order=None):
113 return jax_frontend.numpy.sort(
114 self,
115 axis=axis,
116 order=order,
117 )
118
119 def __add__(self, other):
120 return jax_frontend.numpy.add(self, other)
121
122 def __radd__(self, other):
123 return jax_frontend.numpy.add(other, self)
124
125 def __sub__(self, other):
126 return jax_frontend.lax.sub(self, other)
127
128 def __rsub__(self, other):
129 return jax_frontend.lax.sub(other, self)
130
131 def __mul__(self, other):
132 return jax_frontend.lax.mul(self, other)
133
134 def __rmul__(self, other):
135 return jax_frontend.lax.mul(other, self)
136
137 def __div__(self, other):
138 return jax_frontend.numpy.divide(self, other)
139
140 def __rdiv__(self, other):
141 return jax_frontend.numpy.divide(other, self)
142
143 def __mod__(self, other):
144 return jax_frontend.numpy.mod(self, other)
145
146 def __rmod__(self, other):
147 return jax_frontend.numpy.mod(other, self)
148
149 def __truediv__(self, other):
150 return jax_frontend.numpy.divide(self, other)
151
152 def __rtruediv__(self, other):
153 return jax_frontend.numpy.divide(other, self)
154
155 def __matmul__(self, other):
156 return jax_frontend.numpy.dot(self, other)
157
158 def __rmatmul__(self, other):
159 return jax_frontend.numpy.dot(other, self)
160
161 def __pos__(self):
162 return self
163
164 def __neg__(self):
165 return jax_frontend.lax.neg(self)
166
167 def __eq__(self, other):
168 return jax_frontend.lax.eq(self, other)
169
170 def __ne__(self, other):
171 return jax_frontend.lax.ne(self, other)
172
173 def __lt__(self, other):
174 return jax_frontend.lax.lt(self, other)
175
176 def __le__(self, other):
177 return jax_frontend.lax.le(self, other)
178
179 def __gt__(self, other):
180 return jax_frontend.lax.gt(self, other)
181
182 def __ge__(self, other):
183 return jax_frontend.lax.ge(self, other)
184
185 def __abs__(self):
186 return jax_frontend.numpy.abs(self)
187
188 def __pow__(self, other):
189 return jax_frontend.lax.pow(self, other)
190
191 def __rpow__(self, other):
192 other = ivy.asarray(other)
193 return jax_frontend.lax.pow(other, self)
194
195 def __and__(self, other):
196 return jax_frontend.numpy.bitwise_and(self, other)
197
198 def __rand__(self, other):
199 return jax_frontend.numpy.bitwise_and(other, self)
200
201 def __or__(self, other):
202 return jax_frontend.numpy.bitwise_or(self, other)
203
204 def __ror__(self, other):
205 return jax_frontend.numpy.bitwise_or(other, self)
206
207 def __xor__(self, other):
208 return jax_frontend.lax.bitwise_xor(self, other)
209
210 def __rxor__(self, other):
211 return jax_frontend.lax.bitwise_xor(other, self)
212
213 def __invert__(self):
214 return jax_frontend.lax.bitwise_not(self)
215
216 def __lshift__(self, other):
217 return jax_frontend.lax.shift_left(self, other)
218
219 def __rlshift__(self, other):
220 return jax_frontend.lax.shift_left(other, self)
221
222 def __rshift__(self, other):
223 return jax_frontend.lax.shift_right_logical(self, other)
224
225 def __rrshift__(self, other):
226 return jax_frontend.lax.shift_right_logical(other, self)
227
228 def __getitem__(self, idx):
229 return self.at[idx].get()
230
231 def __setitem__(self, idx, val):
232 raise ivy.utils.exceptions.IvyException(
233 "ivy.functional.frontends.jax.DeviceArray object doesn't support assignment"
234 )
235
236 def __iter__(self):
237 ndim = len(self.shape)
238 if ndim == 0:
239 raise TypeError("iteration over a 0-d devicearray not supported")
240 for i in range(self.shape[0]):
241 yield self[i]
242
243 def round(self, decimals=0):
244 return jax_frontend.numpy.round(self, decimals)
245
[end of ivy/functional/frontends/jax/devicearray.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/jax/devicearray.py b/ivy/functional/frontends/jax/devicearray.py
--- a/ivy/functional/frontends/jax/devicearray.py
+++ b/ivy/functional/frontends/jax/devicearray.py
@@ -41,6 +41,10 @@
def at(self):
return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)
+ @property
+ def T(self):
+ return self.ivy_array.T
+
# Instance Methods #
# ---------------- #
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/devicearray.py b/ivy/functional/frontends/jax/devicearray.py\n--- a/ivy/functional/frontends/jax/devicearray.py\n+++ b/ivy/functional/frontends/jax/devicearray.py\n@@ -41,6 +41,10 @@\n def at(self):\n return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)\n \n+ @property\n+ def T(self):\n+ return self.ivy_array.T\n+\n # Instance Methods #\n # ---------------- #\n", "issue": "T\n\n", "before_files": [{"content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.jax as jax_frontend\n\n\nclass DeviceArray:\n def __init__(self, array, weak_type=False):\n self._ivy_array = array if isinstance(array, ivy.Array) else ivy.array(array)\n self.weak_type = weak_type\n\n def __repr__(self):\n main = (\n str(self.ivy_array.__repr__())\n .replace(\"ivy.array\", \"ivy.frontends.jax.DeviceArray\")\n .replace(\")\", \"\")\n + \", dtype=\"\n + str(self.ivy_array.dtype)\n )\n if self.weak_type:\n return main + \", weak_type=True)\"\n return main + \")\"\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def dtype(self):\n return self.ivy_array.dtype\n\n @property\n def shape(self):\n return self.ivy_array.shape\n\n @property\n def at(self):\n return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)\n\n # Instance Methods #\n # ---------------- #\n\n def all(self, *, axis=None, out=None, keepdims=False):\n return jax_frontend.numpy.all(\n self._ivy_array, axis=axis, keepdims=keepdims, out=out\n )\n\n def argmax(\n self,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n ):\n return jax_frontend.numpy.argmax(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n )\n\n def conj(self, /):\n return jax_frontend.numpy.conj(self._ivy_array)\n\n def conjugate(self, /):\n return jax_frontend.numpy.conjugate(self._ivy_array)\n\n def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, where=None):\n return jax_frontend.numpy.mean(\n self._ivy_array,\n axis=axis,\n dtype=dtype,\n out=out,\n keepdims=keepdims,\n where=where,\n )\n\n def cumprod(self, axis=None, dtype=None, out=None):\n return jax_frontend.numpy.cumprod(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def cumsum(self, axis=None, dtype=None, out=None):\n return jax_frontend.numpy.cumsum(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def nonzero(self, *, size=None, fill_value=None):\n return jax_frontend.numpy.nonzero(\n self,\n size=size,\n fill_value=fill_value,\n )\n\n def ravel(self, order=\"C\"):\n return jax_frontend.numpy.ravel(\n self,\n order=order,\n )\n\n def sort(self, axis=-1, order=None):\n return jax_frontend.numpy.sort(\n self,\n axis=axis,\n order=order,\n )\n\n def __add__(self, other):\n return jax_frontend.numpy.add(self, other)\n\n def __radd__(self, other):\n return jax_frontend.numpy.add(other, self)\n\n def __sub__(self, other):\n return jax_frontend.lax.sub(self, other)\n\n def __rsub__(self, other):\n return jax_frontend.lax.sub(other, self)\n\n def __mul__(self, other):\n return jax_frontend.lax.mul(self, other)\n\n def __rmul__(self, other):\n return jax_frontend.lax.mul(other, self)\n\n def __div__(self, other):\n return jax_frontend.numpy.divide(self, other)\n\n def __rdiv__(self, other):\n return jax_frontend.numpy.divide(other, self)\n\n def __mod__(self, other):\n return jax_frontend.numpy.mod(self, other)\n\n def __rmod__(self, other):\n return jax_frontend.numpy.mod(other, self)\n\n def __truediv__(self, other):\n return jax_frontend.numpy.divide(self, other)\n\n def __rtruediv__(self, other):\n return jax_frontend.numpy.divide(other, self)\n\n def __matmul__(self, other):\n return jax_frontend.numpy.dot(self, other)\n\n def __rmatmul__(self, other):\n return jax_frontend.numpy.dot(other, self)\n\n def __pos__(self):\n return self\n\n def __neg__(self):\n return jax_frontend.lax.neg(self)\n\n def __eq__(self, other):\n return jax_frontend.lax.eq(self, other)\n\n def __ne__(self, other):\n return jax_frontend.lax.ne(self, other)\n\n def __lt__(self, other):\n return jax_frontend.lax.lt(self, other)\n\n def __le__(self, other):\n return jax_frontend.lax.le(self, other)\n\n def __gt__(self, other):\n return jax_frontend.lax.gt(self, other)\n\n def __ge__(self, other):\n return jax_frontend.lax.ge(self, other)\n\n def __abs__(self):\n return jax_frontend.numpy.abs(self)\n\n def __pow__(self, other):\n return jax_frontend.lax.pow(self, other)\n\n def __rpow__(self, other):\n other = ivy.asarray(other)\n return jax_frontend.lax.pow(other, self)\n\n def __and__(self, other):\n return jax_frontend.numpy.bitwise_and(self, other)\n\n def __rand__(self, other):\n return jax_frontend.numpy.bitwise_and(other, self)\n\n def __or__(self, other):\n return jax_frontend.numpy.bitwise_or(self, other)\n\n def __ror__(self, other):\n return jax_frontend.numpy.bitwise_or(other, self)\n\n def __xor__(self, other):\n return jax_frontend.lax.bitwise_xor(self, other)\n\n def __rxor__(self, other):\n return jax_frontend.lax.bitwise_xor(other, self)\n\n def __invert__(self):\n return jax_frontend.lax.bitwise_not(self)\n\n def __lshift__(self, other):\n return jax_frontend.lax.shift_left(self, other)\n\n def __rlshift__(self, other):\n return jax_frontend.lax.shift_left(other, self)\n\n def __rshift__(self, other):\n return jax_frontend.lax.shift_right_logical(self, other)\n\n def __rrshift__(self, other):\n return jax_frontend.lax.shift_right_logical(other, self)\n\n def __getitem__(self, idx):\n return self.at[idx].get()\n\n def __setitem__(self, idx, val):\n raise ivy.utils.exceptions.IvyException(\n \"ivy.functional.frontends.jax.DeviceArray object doesn't support assignment\"\n )\n\n def __iter__(self):\n ndim = len(self.shape)\n if ndim == 0:\n raise TypeError(\"iteration over a 0-d devicearray not supported\")\n for i in range(self.shape[0]):\n yield self[i]\n\n def round(self, decimals=0):\n return jax_frontend.numpy.round(self, decimals)\n", "path": "ivy/functional/frontends/jax/devicearray.py"}]} | 2,775 | 125 |
gh_patches_debug_22503 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10012 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
</issue>
<code>
[start of dynamic_programming/minimum_partition.py]
1 """
2 Partition a set into two subsets such that the difference of subset sums is minimum
3 """
4
5
6 def find_min(arr):
7 n = len(arr)
8 s = sum(arr)
9
10 dp = [[False for x in range(s + 1)] for y in range(n + 1)]
11
12 for i in range(1, n + 1):
13 dp[i][0] = True
14
15 for i in range(1, s + 1):
16 dp[0][i] = False
17
18 for i in range(1, n + 1):
19 for j in range(1, s + 1):
20 dp[i][j] = dp[i][j - 1]
21
22 if arr[i - 1] <= j:
23 dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
24
25 for j in range(int(s / 2), -1, -1):
26 if dp[n][j] is True:
27 diff = s - 2 * j
28 break
29
30 return diff
31
[end of dynamic_programming/minimum_partition.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py
--- a/dynamic_programming/minimum_partition.py
+++ b/dynamic_programming/minimum_partition.py
@@ -3,13 +3,25 @@
"""
-def find_min(arr):
+def find_min(arr: list[int]) -> int:
+ """
+ >>> find_min([1, 2, 3, 4, 5])
+ 1
+ >>> find_min([5, 5, 5, 5, 5])
+ 5
+ >>> find_min([5, 5, 5, 5])
+ 0
+ >>> find_min([3])
+ 3
+ >>> find_min([])
+ 0
+ """
n = len(arr)
s = sum(arr)
dp = [[False for x in range(s + 1)] for y in range(n + 1)]
- for i in range(1, n + 1):
+ for i in range(n + 1):
dp[i][0] = True
for i in range(1, s + 1):
@@ -17,7 +29,7 @@
for i in range(1, n + 1):
for j in range(1, s + 1):
- dp[i][j] = dp[i][j - 1]
+ dp[i][j] = dp[i - 1][j]
if arr[i - 1] <= j:
dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
@@ -28,3 +40,9 @@
break
return diff
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
| {"golden_diff": "diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py\n--- a/dynamic_programming/minimum_partition.py\n+++ b/dynamic_programming/minimum_partition.py\n@@ -3,13 +3,25 @@\n \"\"\"\n \n \n-def find_min(arr):\n+def find_min(arr: list[int]) -> int:\n+ \"\"\"\n+ >>> find_min([1, 2, 3, 4, 5])\n+ 1\n+ >>> find_min([5, 5, 5, 5, 5])\n+ 5\n+ >>> find_min([5, 5, 5, 5])\n+ 0\n+ >>> find_min([3])\n+ 3\n+ >>> find_min([])\n+ 0\n+ \"\"\"\n n = len(arr)\n s = sum(arr)\n \n dp = [[False for x in range(s + 1)] for y in range(n + 1)]\n \n- for i in range(1, n + 1):\n+ for i in range(n + 1):\n dp[i][0] = True\n \n for i in range(1, s + 1):\n@@ -17,7 +29,7 @@\n \n for i in range(1, n + 1):\n for j in range(1, s + 1):\n- dp[i][j] = dp[i][j - 1]\n+ dp[i][j] = dp[i - 1][j]\n \n if arr[i - 1] <= j:\n dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]\n@@ -28,3 +40,9 @@\n break\n \n return diff\n+\n+\n+if __name__ == \"__main__\":\n+ from doctest import testmod\n+\n+ testmod()\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "\"\"\"\nPartition a set into two subsets such that the difference of subset sums is minimum\n\"\"\"\n\n\ndef find_min(arr):\n n = len(arr)\n s = sum(arr)\n\n dp = [[False for x in range(s + 1)] for y in range(n + 1)]\n\n for i in range(1, n + 1):\n dp[i][0] = True\n\n for i in range(1, s + 1):\n dp[0][i] = False\n\n for i in range(1, n + 1):\n for j in range(1, s + 1):\n dp[i][j] = dp[i][j - 1]\n\n if arr[i - 1] <= j:\n dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]\n\n for j in range(int(s / 2), -1, -1):\n if dp[n][j] is True:\n diff = s - 2 * j\n break\n\n return diff\n", "path": "dynamic_programming/minimum_partition.py"}]} | 1,668 | 416 |
gh_patches_debug_17712 | rasdani/github-patches | git_diff | pypa__virtualenv-1509 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Integration with virtualenvwrapper
Congratulations on the rewrite.
I notice that `virtualenvwrapper` doesn't integrate with the new beta:
```
$ mkvirtualenv foo
ERROR: Environment '/Users/brettmz/.virtualenvs/foo' does not contain an activate script.
```
Sure enough - I believe it is looking for a script `activate` - but the script file now depends on the shell - `activate.sh` etc.
It would be good if this could work somehow - would it be reasonable to create / link a default `activate` script for this case? Or should virtualenvwrapper be updated?
</issue>
<code>
[start of src/virtualenv/activation/bash/__init__.py]
1 from __future__ import absolute_import, unicode_literals
2
3 from virtualenv.util.path import Path
4
5 from ..via_template import ViaTemplateActivator
6
7
8 class BashActivator(ViaTemplateActivator):
9 @classmethod
10 def supports(cls, interpreter):
11 return interpreter.os != "nt"
12
13 def templates(self):
14 yield Path("activate.sh")
15
[end of src/virtualenv/activation/bash/__init__.py]
[start of src/virtualenv/activation/via_template.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 import sys
5 from abc import ABCMeta, abstractmethod
6
7 import six
8
9 from .activator import Activator
10
11 if sys.version_info >= (3, 7):
12 from importlib.resources import read_text
13 else:
14 from importlib_resources import read_text
15
16
17 @six.add_metaclass(ABCMeta)
18 class ViaTemplateActivator(Activator):
19 @abstractmethod
20 def templates(self):
21 raise NotImplementedError
22
23 def generate(self, creator):
24 dest_folder = creator.bin_dir
25 replacements = self.replacements(creator, dest_folder)
26 self._generate(replacements, self.templates(), dest_folder, creator)
27 if self.flag_prompt is not None:
28 creator.pyenv_cfg["prompt"] = self.flag_prompt
29
30 def replacements(self, creator, dest_folder):
31 return {
32 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
33 "__VIRTUAL_ENV__": six.ensure_text(str(creator.dest)),
34 "__VIRTUAL_NAME__": creator.env_name,
35 "__BIN_NAME__": six.ensure_text(str(creator.bin_dir.relative_to(creator.dest))),
36 "__PATH_SEP__": six.ensure_text(os.pathsep),
37 }
38
39 def _generate(self, replacements, templates, to_folder, creator):
40 for template in templates:
41 text = self.instantiate_template(replacements, template, creator)
42 (to_folder / template).write_text(text, encoding="utf-8")
43
44 def instantiate_template(self, replacements, template, creator):
45 # read text and do replacements
46 text = read_text(self.__module__, str(template), encoding="utf-8", errors="strict")
47 for key, value in replacements.items():
48 value = self._repr_unicode(creator, value)
49 text = text.replace(key, value)
50 return text
51
52 @staticmethod
53 def _repr_unicode(creator, value):
54 # by default we just let it be unicode
55 return value
56
[end of src/virtualenv/activation/via_template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/activation/bash/__init__.py b/src/virtualenv/activation/bash/__init__.py
--- a/src/virtualenv/activation/bash/__init__.py
+++ b/src/virtualenv/activation/bash/__init__.py
@@ -12,3 +12,6 @@
def templates(self):
yield Path("activate.sh")
+
+ def as_name(self, template):
+ return template.stem
diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py
--- a/src/virtualenv/activation/via_template.py
+++ b/src/virtualenv/activation/via_template.py
@@ -39,7 +39,11 @@
def _generate(self, replacements, templates, to_folder, creator):
for template in templates:
text = self.instantiate_template(replacements, template, creator)
- (to_folder / template).write_text(text, encoding="utf-8")
+ dest = to_folder / self.as_name(template)
+ dest.write_text(text, encoding="utf-8")
+
+ def as_name(self, template):
+ return template.name
def instantiate_template(self, replacements, template, creator):
# read text and do replacements
| {"golden_diff": "diff --git a/src/virtualenv/activation/bash/__init__.py b/src/virtualenv/activation/bash/__init__.py\n--- a/src/virtualenv/activation/bash/__init__.py\n+++ b/src/virtualenv/activation/bash/__init__.py\n@@ -12,3 +12,6 @@\n \n def templates(self):\n yield Path(\"activate.sh\")\n+\n+ def as_name(self, template):\n+ return template.stem\ndiff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -39,7 +39,11 @@\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n- (to_folder / template).write_text(text, encoding=\"utf-8\")\n+ dest = to_folder / self.as_name(template)\n+ dest.write_text(text, encoding=\"utf-8\")\n+\n+ def as_name(self, template):\n+ return template.name\n \n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n", "issue": "Integration with virtualenvwrapper\nCongratulations on the rewrite.\r\n\r\nI notice that `virtualenvwrapper` doesn't integrate with the new beta:\r\n\r\n```\r\n$ mkvirtualenv foo\r\nERROR: Environment '/Users/brettmz/.virtualenvs/foo' does not contain an activate script.\r\n```\r\n\r\nSure enough - I believe it is looking for a script `activate` - but the script file now depends on the shell - `activate.sh` etc. \r\n\r\nIt would be good if this could work somehow - would it be reasonable to create / link a default `activate` script for this case? Or should virtualenvwrapper be updated?\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom virtualenv.util.path import Path\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass BashActivator(ViaTemplateActivator):\n @classmethod\n def supports(cls, interpreter):\n return interpreter.os != \"nt\"\n\n def templates(self):\n yield Path(\"activate.sh\")\n", "path": "src/virtualenv/activation/bash/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nimport six\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\[email protected]_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": six.ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": six.ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": six.ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n (to_folder / template).write_text(text, encoding=\"utf-8\")\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]} | 1,324 | 276 |
gh_patches_debug_30204 | rasdani/github-patches | git_diff | ipython__ipython-9854 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot close last figure window
After updating ipython to 5.0 (from macports), I cannot close last figure window (neither with button in window bar, nor with 'close()'). The mouse cursor becomes spinning wheel inside the window, but I can still type command in the terminal window.
However, if I type clf(), the figure window closes and a new one appears.
This happens only when there is only one figure window remaining.
Here's my system (OS X El Capitan, with python/ipython etc. installed via macports) :
```
[~] $ python -c "import IPython; print(IPython.sys_info())"
{'commit_hash': u'<not found>',
'commit_source': '(none found)',
'default_encoding': 'UTF-8',
'ipython_path': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython',
'ipython_version': '5.0.0',
'os_name': 'posix',
'platform': 'Darwin-15.5.0-x86_64-i386-64bit',
'sys_executable': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python',
'sys_platform': 'darwin',
'sys_version': '2.7.12 (default, Jun 29 2016, 12:52:38) \n[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)]'}
```
</issue>
<code>
[start of IPython/terminal/pt_inputhooks/osx.py]
1 """Inputhook for OS X
2
3 Calls NSApp / CoreFoundation APIs via ctypes.
4 """
5
6 # obj-c boilerplate from appnope, used under BSD 2-clause
7
8 import ctypes
9 import ctypes.util
10
11 objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))
12
13 void_p = ctypes.c_void_p
14
15 objc.objc_getClass.restype = void_p
16 objc.sel_registerName.restype = void_p
17 objc.objc_msgSend.restype = void_p
18 objc.objc_msgSend.argtypes = [void_p, void_p]
19
20 msg = objc.objc_msgSend
21
22 def _utf8(s):
23 """ensure utf8 bytes"""
24 if not isinstance(s, bytes):
25 s = s.encode('utf8')
26 return s
27
28 def n(name):
29 """create a selector name (for ObjC methods)"""
30 return objc.sel_registerName(_utf8(name))
31
32 def C(classname):
33 """get an ObjC Class by name"""
34 return objc.objc_getClass(_utf8(classname))
35
36 # end obj-c boilerplate from appnope
37
38 # CoreFoundation C-API calls we will use:
39 CoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))
40
41 CFFileDescriptorCreate = CoreFoundation.CFFileDescriptorCreate
42 CFFileDescriptorCreate.restype = void_p
43 CFFileDescriptorCreate.argtypes = [void_p, ctypes.c_int, ctypes.c_bool, void_p]
44
45 CFFileDescriptorGetNativeDescriptor = CoreFoundation.CFFileDescriptorGetNativeDescriptor
46 CFFileDescriptorGetNativeDescriptor.restype = ctypes.c_int
47 CFFileDescriptorGetNativeDescriptor.argtypes = [void_p]
48
49 CFFileDescriptorEnableCallBacks = CoreFoundation.CFFileDescriptorEnableCallBacks
50 CFFileDescriptorEnableCallBacks.restype = None
51 CFFileDescriptorEnableCallBacks.argtypes = [void_p, ctypes.c_ulong]
52
53 CFFileDescriptorCreateRunLoopSource = CoreFoundation.CFFileDescriptorCreateRunLoopSource
54 CFFileDescriptorCreateRunLoopSource.restype = void_p
55 CFFileDescriptorCreateRunLoopSource.argtypes = [void_p, void_p, void_p]
56
57 CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent
58 CFRunLoopGetCurrent.restype = void_p
59
60 CFRunLoopAddSource = CoreFoundation.CFRunLoopAddSource
61 CFRunLoopAddSource.restype = None
62 CFRunLoopAddSource.argtypes = [void_p, void_p, void_p]
63
64 CFRelease = CoreFoundation.CFRelease
65 CFRelease.restype = None
66 CFRelease.argtypes = [void_p]
67
68 CFFileDescriptorInvalidate = CoreFoundation.CFFileDescriptorInvalidate
69 CFFileDescriptorInvalidate.restype = None
70 CFFileDescriptorInvalidate.argtypes = [void_p]
71
72 # From CFFileDescriptor.h
73 kCFFileDescriptorReadCallBack = 1
74 kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes')
75
76
77 def _NSApp():
78 """Return the global NSApplication instance (NSApp)"""
79 return msg(C('NSApplication'), n('sharedApplication'))
80
81
82 def _wake(NSApp):
83 """Wake the Application"""
84 event = msg(C('NSEvent'),
85 n('otherEventWithType:location:modifierFlags:'
86 'timestamp:windowNumber:context:subtype:data1:data2:'),
87 15, # Type
88 0, # location
89 0, # flags
90 0, # timestamp
91 0, # window
92 None, # context
93 0, # subtype
94 0, # data1
95 0, # data2
96 )
97 msg(NSApp, n('postEvent:atStart:'), void_p(event), True)
98
99
100 def _input_callback(fdref, flags, info):
101 """Callback to fire when there's input to be read"""
102 CFFileDescriptorInvalidate(fdref)
103 CFRelease(fdref)
104 NSApp = _NSApp()
105 msg(NSApp, n('stop:'), NSApp)
106 _wake(NSApp)
107
108 _c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p, void_p)
109 _c_input_callback = _c_callback_func_type(_input_callback)
110
111
112 def _stop_on_read(fd):
113 """Register callback to stop eventloop when there's data on fd"""
114 fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)
115 CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)
116 source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)
117 loop = CFRunLoopGetCurrent()
118 CFRunLoopAddSource(loop, source, kCFRunLoopCommonModes)
119 CFRelease(source)
120
121
122 def inputhook(context):
123 """Inputhook for Cocoa (NSApp)"""
124 NSApp = _NSApp()
125 window_count = msg(
126 msg(NSApp, n('windows')),
127 n('count')
128 )
129 if not window_count:
130 return
131 _stop_on_read(context.fileno())
132 msg(NSApp, n('run'))
133
134
[end of IPython/terminal/pt_inputhooks/osx.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/terminal/pt_inputhooks/osx.py b/IPython/terminal/pt_inputhooks/osx.py
--- a/IPython/terminal/pt_inputhooks/osx.py
+++ b/IPython/terminal/pt_inputhooks/osx.py
@@ -7,6 +7,7 @@
import ctypes
import ctypes.util
+from threading import Event
objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))
@@ -97,8 +98,11 @@
msg(NSApp, n('postEvent:atStart:'), void_p(event), True)
+_triggered = Event()
+
def _input_callback(fdref, flags, info):
"""Callback to fire when there's input to be read"""
+ _triggered.set()
CFFileDescriptorInvalidate(fdref)
CFRelease(fdref)
NSApp = _NSApp()
@@ -111,6 +115,7 @@
def _stop_on_read(fd):
"""Register callback to stop eventloop when there's data on fd"""
+ _triggered.clear()
fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)
CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)
source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)
@@ -130,4 +135,9 @@
return
_stop_on_read(context.fileno())
msg(NSApp, n('run'))
-
+ if not _triggered.is_set():
+ # app closed without firing callback,
+ # probably due to last window being closed.
+ # Run the loop manually in this case,
+ # since there may be events still to process (#9734)
+ CoreFoundation.CFRunLoopRun()
| {"golden_diff": "diff --git a/IPython/terminal/pt_inputhooks/osx.py b/IPython/terminal/pt_inputhooks/osx.py\n--- a/IPython/terminal/pt_inputhooks/osx.py\n+++ b/IPython/terminal/pt_inputhooks/osx.py\n@@ -7,6 +7,7 @@\n \n import ctypes\n import ctypes.util\n+from threading import Event\n \n objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))\n \n@@ -97,8 +98,11 @@\n msg(NSApp, n('postEvent:atStart:'), void_p(event), True)\n \n \n+_triggered = Event()\n+\n def _input_callback(fdref, flags, info):\n \"\"\"Callback to fire when there's input to be read\"\"\"\n+ _triggered.set()\n CFFileDescriptorInvalidate(fdref)\n CFRelease(fdref)\n NSApp = _NSApp()\n@@ -111,6 +115,7 @@\n \n def _stop_on_read(fd):\n \"\"\"Register callback to stop eventloop when there's data on fd\"\"\"\n+ _triggered.clear()\n fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)\n CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)\n source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)\n@@ -130,4 +135,9 @@\n return\n _stop_on_read(context.fileno())\n msg(NSApp, n('run'))\n-\n+ if not _triggered.is_set():\n+ # app closed without firing callback,\n+ # probably due to last window being closed.\n+ # Run the loop manually in this case,\n+ # since there may be events still to process (#9734)\n+ CoreFoundation.CFRunLoopRun()\n", "issue": "Cannot close last figure window\nAfter updating ipython to 5.0 (from macports), I cannot close last figure window (neither with button in window bar, nor with 'close()'). The mouse cursor becomes spinning wheel inside the window, but I can still type command in the terminal window.\n\nHowever, if I type clf(), the figure window closes and a new one appears.\n\nThis happens only when there is only one figure window remaining. \n\nHere's my system (OS X El Capitan, with python/ipython etc. installed via macports) :\n\n```\n[~] $ python -c \"import IPython; print(IPython.sys_info())\"\n{'commit_hash': u'<not found>',\n 'commit_source': '(none found)',\n 'default_encoding': 'UTF-8',\n 'ipython_path': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython',\n 'ipython_version': '5.0.0',\n 'os_name': 'posix',\n 'platform': 'Darwin-15.5.0-x86_64-i386-64bit',\n 'sys_executable': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python',\n 'sys_platform': 'darwin',\n 'sys_version': '2.7.12 (default, Jun 29 2016, 12:52:38) \\n[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)]'}\n```\n\n", "before_files": [{"content": "\"\"\"Inputhook for OS X\n\nCalls NSApp / CoreFoundation APIs via ctypes.\n\"\"\"\n\n# obj-c boilerplate from appnope, used under BSD 2-clause\n\nimport ctypes\nimport ctypes.util\n\nobjc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))\n\nvoid_p = ctypes.c_void_p\n\nobjc.objc_getClass.restype = void_p\nobjc.sel_registerName.restype = void_p\nobjc.objc_msgSend.restype = void_p\nobjc.objc_msgSend.argtypes = [void_p, void_p]\n\nmsg = objc.objc_msgSend\n\ndef _utf8(s):\n \"\"\"ensure utf8 bytes\"\"\"\n if not isinstance(s, bytes):\n s = s.encode('utf8')\n return s\n\ndef n(name):\n \"\"\"create a selector name (for ObjC methods)\"\"\"\n return objc.sel_registerName(_utf8(name))\n\ndef C(classname):\n \"\"\"get an ObjC Class by name\"\"\"\n return objc.objc_getClass(_utf8(classname))\n\n# end obj-c boilerplate from appnope\n\n# CoreFoundation C-API calls we will use:\nCoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))\n\nCFFileDescriptorCreate = CoreFoundation.CFFileDescriptorCreate\nCFFileDescriptorCreate.restype = void_p\nCFFileDescriptorCreate.argtypes = [void_p, ctypes.c_int, ctypes.c_bool, void_p]\n\nCFFileDescriptorGetNativeDescriptor = CoreFoundation.CFFileDescriptorGetNativeDescriptor\nCFFileDescriptorGetNativeDescriptor.restype = ctypes.c_int\nCFFileDescriptorGetNativeDescriptor.argtypes = [void_p]\n\nCFFileDescriptorEnableCallBacks = CoreFoundation.CFFileDescriptorEnableCallBacks\nCFFileDescriptorEnableCallBacks.restype = None\nCFFileDescriptorEnableCallBacks.argtypes = [void_p, ctypes.c_ulong]\n\nCFFileDescriptorCreateRunLoopSource = CoreFoundation.CFFileDescriptorCreateRunLoopSource\nCFFileDescriptorCreateRunLoopSource.restype = void_p\nCFFileDescriptorCreateRunLoopSource.argtypes = [void_p, void_p, void_p]\n\nCFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent\nCFRunLoopGetCurrent.restype = void_p\n\nCFRunLoopAddSource = CoreFoundation.CFRunLoopAddSource\nCFRunLoopAddSource.restype = None\nCFRunLoopAddSource.argtypes = [void_p, void_p, void_p]\n\nCFRelease = CoreFoundation.CFRelease\nCFRelease.restype = None\nCFRelease.argtypes = [void_p]\n\nCFFileDescriptorInvalidate = CoreFoundation.CFFileDescriptorInvalidate\nCFFileDescriptorInvalidate.restype = None\nCFFileDescriptorInvalidate.argtypes = [void_p]\n\n# From CFFileDescriptor.h\nkCFFileDescriptorReadCallBack = 1\nkCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes')\n\n\ndef _NSApp():\n \"\"\"Return the global NSApplication instance (NSApp)\"\"\"\n return msg(C('NSApplication'), n('sharedApplication'))\n\n\ndef _wake(NSApp):\n \"\"\"Wake the Application\"\"\"\n event = msg(C('NSEvent'),\n n('otherEventWithType:location:modifierFlags:'\n 'timestamp:windowNumber:context:subtype:data1:data2:'),\n 15, # Type\n 0, # location\n 0, # flags\n 0, # timestamp\n 0, # window\n None, # context\n 0, # subtype\n 0, # data1\n 0, # data2\n )\n msg(NSApp, n('postEvent:atStart:'), void_p(event), True)\n\n\ndef _input_callback(fdref, flags, info):\n \"\"\"Callback to fire when there's input to be read\"\"\"\n CFFileDescriptorInvalidate(fdref)\n CFRelease(fdref)\n NSApp = _NSApp()\n msg(NSApp, n('stop:'), NSApp)\n _wake(NSApp)\n\n_c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p, void_p)\n_c_input_callback = _c_callback_func_type(_input_callback)\n\n\ndef _stop_on_read(fd):\n \"\"\"Register callback to stop eventloop when there's data on fd\"\"\"\n fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)\n CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)\n source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)\n loop = CFRunLoopGetCurrent()\n CFRunLoopAddSource(loop, source, kCFRunLoopCommonModes)\n CFRelease(source)\n\n\ndef inputhook(context):\n \"\"\"Inputhook for Cocoa (NSApp)\"\"\"\n NSApp = _NSApp()\n window_count = msg(\n msg(NSApp, n('windows')),\n n('count')\n )\n if not window_count:\n return\n _stop_on_read(context.fileno())\n msg(NSApp, n('run'))\n\n", "path": "IPython/terminal/pt_inputhooks/osx.py"}]} | 2,255 | 393 |
gh_patches_debug_40491 | rasdani/github-patches | git_diff | CTFd__CTFd-298 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Advice on E-mail Mechanism
It seems that when E-mail server and E-mail confirmation are enabled, CTFd will send a new E-mail to the E-mail address input by the new user every time he or she logged in without confirmation yet (At least my SMTP server does like that, I will be apologize if it is my own wrong configuration with my own server). I think it will be better that only one E-mail will be sent at the first time new user signs up, and provide a button allowing the unconfirmed user to receive a new confirmation E-mail if needed.
Anyway, Thanks for your excellent project!
</issue>
<code>
[start of CTFd/auth.py]
1 import logging
2 import os
3 import re
4 import time
5 import urllib
6
7 from flask import current_app as app, render_template, request, redirect, url_for, session, Blueprint
8 from itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature
9 from passlib.hash import bcrypt_sha256
10
11 from CTFd.models import db, Teams
12 from CTFd import utils
13
14 auth = Blueprint('auth', __name__)
15
16
17 @auth.route('/confirm', methods=['POST', 'GET'])
18 @auth.route('/confirm/<data>', methods=['GET'])
19 def confirm_user(data=None):
20 if not utils.get_config('verify_emails'):
21 return redirect(url_for('challenges.challenges_view'))
22 if data and request.method == "GET": # User is confirming email account
23 try:
24 s = Signer(app.config['SECRET_KEY'])
25 email = s.unsign(urllib.unquote_plus(data.decode('base64')))
26 except BadSignature:
27 return render_template('confirm.html', errors=['Your confirmation link seems wrong'])
28 except:
29 return render_template('confirm.html', errors=['Your link appears broken, please try again.'])
30 team = Teams.query.filter_by(email=email).first_or_404()
31 team.verified = True
32 db.session.commit()
33 logger = logging.getLogger('regs')
34 logger.warn("[{0}] {1} confirmed {2}".format(time.strftime("%m/%d/%Y %X"), team.name.encode('utf-8'), team.email.encode('utf-8')))
35 db.session.close()
36 if utils.authed():
37 return redirect(url_for('challenges.challenges_view'))
38 return redirect(url_for('auth.login'))
39 if not data and request.method == "GET": # User has been directed to the confirm page because his account is not verified
40 if not utils.authed():
41 return redirect(url_for('auth.login'))
42 team = Teams.query.filter_by(id=session['id']).first_or_404()
43 if team.verified:
44 return redirect(url_for('views.profile'))
45 else:
46 utils.verify_email(team.email)
47 return render_template('confirm.html', team=team)
48
49
50 @auth.route('/reset_password', methods=['POST', 'GET'])
51 @auth.route('/reset_password/<data>', methods=['POST', 'GET'])
52 def reset_password(data=None):
53 if data is not None and request.method == "GET":
54 return render_template('reset_password.html', mode='set')
55 if data is not None and request.method == "POST":
56 try:
57 s = TimedSerializer(app.config['SECRET_KEY'])
58 name = s.loads(urllib.unquote_plus(data.decode('base64')), max_age=1800)
59 except BadTimeSignature:
60 return render_template('reset_password.html', errors=['Your link has expired'])
61 except:
62 return render_template('reset_password.html', errors=['Your link appears broken, please try again.'])
63 team = Teams.query.filter_by(name=name).first_or_404()
64 team.password = bcrypt_sha256.encrypt(request.form['password'].strip())
65 db.session.commit()
66 db.session.close()
67 return redirect(url_for('auth.login'))
68
69 if request.method == 'POST':
70 email = request.form['email'].strip()
71 team = Teams.query.filter_by(email=email).first()
72 if not team:
73 return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])
74 s = TimedSerializer(app.config['SECRET_KEY'])
75 token = s.dumps(team.name)
76 text = """
77 Did you initiate a password reset?
78
79 {0}/{1}
80
81 """.format(url_for('auth.reset_password', _external=True), urllib.quote_plus(token.encode('base64')))
82
83 utils.sendmail(email, text)
84
85 return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])
86 return render_template('reset_password.html')
87
88
89 @auth.route('/register', methods=['POST', 'GET'])
90 def register():
91 if not utils.can_register():
92 return redirect(url_for('auth.login'))
93 if request.method == 'POST':
94 errors = []
95 name = request.form['name']
96 email = request.form['email']
97 password = request.form['password']
98
99 name_len = len(name) == 0
100 names = Teams.query.add_columns('name', 'id').filter_by(name=name).first()
101 emails = Teams.query.add_columns('email', 'id').filter_by(email=email).first()
102 pass_short = len(password) == 0
103 pass_long = len(password) > 128
104 valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", request.form['email'])
105
106 if not valid_email:
107 errors.append("That email doesn't look right")
108 if names:
109 errors.append('That team name is already taken')
110 if emails:
111 errors.append('That email has already been used')
112 if pass_short:
113 errors.append('Pick a longer password')
114 if pass_long:
115 errors.append('Pick a shorter password')
116 if name_len:
117 errors.append('Pick a longer team name')
118
119 if len(errors) > 0:
120 return render_template('register.html', errors=errors, name=request.form['name'], email=request.form['email'], password=request.form['password'])
121 else:
122 with app.app_context():
123 team = Teams(name, email.lower(), password)
124 db.session.add(team)
125 db.session.commit()
126 db.session.flush()
127
128 session['username'] = team.name
129 session['id'] = team.id
130 session['admin'] = team.admin
131 session['nonce'] = utils.sha512(os.urandom(10))
132
133 if utils.can_send_mail() and utils.get_config('verify_emails'): # Confirming users is enabled and we can send email.
134 db.session.close()
135 logger = logging.getLogger('regs')
136 logger.warn("[{0}] {1} registered (UNCONFIRMED) with {2}".format(time.strftime("%m/%d/%Y %X"),
137 request.form['name'].encode('utf-8'),
138 request.form['email'].encode('utf-8')))
139 return redirect(url_for('auth.confirm_user'))
140 else: # Don't care about confirming users
141 if utils.can_send_mail(): # We want to notify the user that they have registered.
142 utils.sendmail(request.form['email'], "You've successfully registered for {}".format(utils.get_config('ctf_name')))
143
144 db.session.close()
145
146 logger = logging.getLogger('regs')
147 logger.warn("[{0}] {1} registered with {2}".format(time.strftime("%m/%d/%Y %X"), request.form['name'].encode('utf-8'), request.form['email'].encode('utf-8')))
148 return redirect(url_for('challenges.challenges_view'))
149 else:
150 return render_template('register.html')
151
152
153 @auth.route('/login', methods=['POST', 'GET'])
154 def login():
155 if request.method == 'POST':
156 errors = []
157 name = request.form['name']
158 team = Teams.query.filter_by(name=name).first()
159 if team:
160 if team and bcrypt_sha256.verify(request.form['password'], team.password):
161 try:
162 session.regenerate() # NO SESSION FIXATION FOR YOU
163 except:
164 pass # TODO: Some session objects don't implement regenerate :(
165 session['username'] = team.name
166 session['id'] = team.id
167 session['admin'] = team.admin
168 session['nonce'] = utils.sha512(os.urandom(10))
169 db.session.close()
170
171 logger = logging.getLogger('logins')
172 logger.warn("[{0}] {1} logged in".format(time.strftime("%m/%d/%Y %X"), session['username'].encode('utf-8')))
173
174 if request.args.get('next') and utils.is_safe_url(request.args.get('next')):
175 return redirect(request.args.get('next'))
176 return redirect(url_for('challenges.challenges_view'))
177 else: # This user exists but the password is wrong
178 errors.append("Your username or password is incorrect")
179 db.session.close()
180 return render_template('login.html', errors=errors)
181 else: # This user just doesn't exist
182 errors.append("Your username or password is incorrect")
183 db.session.close()
184 return render_template('login.html', errors=errors)
185 else:
186 db.session.close()
187 return render_template('login.html')
188
189
190 @auth.route('/logout')
191 def logout():
192 if utils.authed():
193 session.clear()
194 return redirect(url_for('views.static_html'))
195
[end of CTFd/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/auth.py b/CTFd/auth.py
--- a/CTFd/auth.py
+++ b/CTFd/auth.py
@@ -18,8 +18,11 @@
@auth.route('/confirm/<data>', methods=['GET'])
def confirm_user(data=None):
if not utils.get_config('verify_emails'):
+ # If the CTF doesn't care about confirming email addresses then redierct to challenges
return redirect(url_for('challenges.challenges_view'))
- if data and request.method == "GET": # User is confirming email account
+
+ # User is confirming email account
+ if data and request.method == "GET":
try:
s = Signer(app.config['SECRET_KEY'])
email = s.unsign(urllib.unquote_plus(data.decode('base64')))
@@ -36,15 +39,28 @@
if utils.authed():
return redirect(url_for('challenges.challenges_view'))
return redirect(url_for('auth.login'))
- if not data and request.method == "GET": # User has been directed to the confirm page because his account is not verified
- if not utils.authed():
- return redirect(url_for('auth.login'))
- team = Teams.query.filter_by(id=session['id']).first_or_404()
- if team.verified:
- return redirect(url_for('views.profile'))
- else:
- utils.verify_email(team.email)
- return render_template('confirm.html', team=team)
+
+ # User is trying to start or restart the confirmation flow
+ if not utils.authed():
+ return redirect(url_for('auth.login'))
+
+ team = Teams.query.filter_by(id=session['id']).first_or_404()
+
+ if data is None:
+ if request.method == "POST":
+ # User wants to resend their confirmation email
+ if team.verified:
+ return redirect(url_for('views.profile'))
+ else:
+ utils.verify_email(team.email)
+ return render_template('confirm.html', team=team, infos=['Your confirmation email has been resent!'])
+ elif request.method == "GET":
+ # User has been directed to the confirm page
+ team = Teams.query.filter_by(id=session['id']).first_or_404()
+ if team.verified:
+ # If user is already verified, redirect to their profile
+ return redirect(url_for('views.profile'))
+ return render_template('confirm.html', team=team)
@auth.route('/reset_password', methods=['POST', 'GET'])
@@ -136,6 +152,9 @@
logger.warn("[{0}] {1} registered (UNCONFIRMED) with {2}".format(time.strftime("%m/%d/%Y %X"),
request.form['name'].encode('utf-8'),
request.form['email'].encode('utf-8')))
+
+ utils.verify_email(team.email)
+
return redirect(url_for('auth.confirm_user'))
else: # Don't care about confirming users
if utils.can_send_mail(): # We want to notify the user that they have registered.
| {"golden_diff": "diff --git a/CTFd/auth.py b/CTFd/auth.py\n--- a/CTFd/auth.py\n+++ b/CTFd/auth.py\n@@ -18,8 +18,11 @@\n @auth.route('/confirm/<data>', methods=['GET'])\n def confirm_user(data=None):\n if not utils.get_config('verify_emails'):\n+ # If the CTF doesn't care about confirming email addresses then redierct to challenges\n return redirect(url_for('challenges.challenges_view'))\n- if data and request.method == \"GET\": # User is confirming email account\n+\n+ # User is confirming email account\n+ if data and request.method == \"GET\":\n try:\n s = Signer(app.config['SECRET_KEY'])\n email = s.unsign(urllib.unquote_plus(data.decode('base64')))\n@@ -36,15 +39,28 @@\n if utils.authed():\n return redirect(url_for('challenges.challenges_view'))\n return redirect(url_for('auth.login'))\n- if not data and request.method == \"GET\": # User has been directed to the confirm page because his account is not verified\n- if not utils.authed():\n- return redirect(url_for('auth.login'))\n- team = Teams.query.filter_by(id=session['id']).first_or_404()\n- if team.verified:\n- return redirect(url_for('views.profile'))\n- else:\n- utils.verify_email(team.email)\n- return render_template('confirm.html', team=team)\n+\n+ # User is trying to start or restart the confirmation flow\n+ if not utils.authed():\n+ return redirect(url_for('auth.login'))\n+\n+ team = Teams.query.filter_by(id=session['id']).first_or_404()\n+\n+ if data is None:\n+ if request.method == \"POST\":\n+ # User wants to resend their confirmation email\n+ if team.verified:\n+ return redirect(url_for('views.profile'))\n+ else:\n+ utils.verify_email(team.email)\n+ return render_template('confirm.html', team=team, infos=['Your confirmation email has been resent!'])\n+ elif request.method == \"GET\":\n+ # User has been directed to the confirm page\n+ team = Teams.query.filter_by(id=session['id']).first_or_404()\n+ if team.verified:\n+ # If user is already verified, redirect to their profile\n+ return redirect(url_for('views.profile'))\n+ return render_template('confirm.html', team=team)\n \n \n @auth.route('/reset_password', methods=['POST', 'GET'])\n@@ -136,6 +152,9 @@\n logger.warn(\"[{0}] {1} registered (UNCONFIRMED) with {2}\".format(time.strftime(\"%m/%d/%Y %X\"),\n request.form['name'].encode('utf-8'),\n request.form['email'].encode('utf-8')))\n+\n+ utils.verify_email(team.email)\n+\n return redirect(url_for('auth.confirm_user'))\n else: # Don't care about confirming users\n if utils.can_send_mail(): # We want to notify the user that they have registered.\n", "issue": "Advice on E-mail Mechanism\nIt seems that when E-mail server and E-mail confirmation are enabled, CTFd will send a new E-mail to the E-mail address input by the new user every time he or she logged in without confirmation yet (At least my SMTP server does like that, I will be apologize if it is my own wrong configuration with my own server). I think it will be better that only one E-mail will be sent at the first time new user signs up, and provide a button allowing the unconfirmed user to receive a new confirmation E-mail if needed.\r\n\r\nAnyway, Thanks for your excellent project!\n", "before_files": [{"content": "import logging\nimport os\nimport re\nimport time\nimport urllib\n\nfrom flask import current_app as app, render_template, request, redirect, url_for, session, Blueprint\nfrom itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature\nfrom passlib.hash import bcrypt_sha256\n\nfrom CTFd.models import db, Teams\nfrom CTFd import utils\n\nauth = Blueprint('auth', __name__)\n\n\[email protected]('/confirm', methods=['POST', 'GET'])\[email protected]('/confirm/<data>', methods=['GET'])\ndef confirm_user(data=None):\n if not utils.get_config('verify_emails'):\n return redirect(url_for('challenges.challenges_view'))\n if data and request.method == \"GET\": # User is confirming email account\n try:\n s = Signer(app.config['SECRET_KEY'])\n email = s.unsign(urllib.unquote_plus(data.decode('base64')))\n except BadSignature:\n return render_template('confirm.html', errors=['Your confirmation link seems wrong'])\n except:\n return render_template('confirm.html', errors=['Your link appears broken, please try again.'])\n team = Teams.query.filter_by(email=email).first_or_404()\n team.verified = True\n db.session.commit()\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} confirmed {2}\".format(time.strftime(\"%m/%d/%Y %X\"), team.name.encode('utf-8'), team.email.encode('utf-8')))\n db.session.close()\n if utils.authed():\n return redirect(url_for('challenges.challenges_view'))\n return redirect(url_for('auth.login'))\n if not data and request.method == \"GET\": # User has been directed to the confirm page because his account is not verified\n if not utils.authed():\n return redirect(url_for('auth.login'))\n team = Teams.query.filter_by(id=session['id']).first_or_404()\n if team.verified:\n return redirect(url_for('views.profile'))\n else:\n utils.verify_email(team.email)\n return render_template('confirm.html', team=team)\n\n\[email protected]('/reset_password', methods=['POST', 'GET'])\[email protected]('/reset_password/<data>', methods=['POST', 'GET'])\ndef reset_password(data=None):\n if data is not None and request.method == \"GET\":\n return render_template('reset_password.html', mode='set')\n if data is not None and request.method == \"POST\":\n try:\n s = TimedSerializer(app.config['SECRET_KEY'])\n name = s.loads(urllib.unquote_plus(data.decode('base64')), max_age=1800)\n except BadTimeSignature:\n return render_template('reset_password.html', errors=['Your link has expired'])\n except:\n return render_template('reset_password.html', errors=['Your link appears broken, please try again.'])\n team = Teams.query.filter_by(name=name).first_or_404()\n team.password = bcrypt_sha256.encrypt(request.form['password'].strip())\n db.session.commit()\n db.session.close()\n return redirect(url_for('auth.login'))\n\n if request.method == 'POST':\n email = request.form['email'].strip()\n team = Teams.query.filter_by(email=email).first()\n if not team:\n return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])\n s = TimedSerializer(app.config['SECRET_KEY'])\n token = s.dumps(team.name)\n text = \"\"\"\nDid you initiate a password reset?\n\n{0}/{1}\n\n\"\"\".format(url_for('auth.reset_password', _external=True), urllib.quote_plus(token.encode('base64')))\n\n utils.sendmail(email, text)\n\n return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])\n return render_template('reset_password.html')\n\n\[email protected]('/register', methods=['POST', 'GET'])\ndef register():\n if not utils.can_register():\n return redirect(url_for('auth.login'))\n if request.method == 'POST':\n errors = []\n name = request.form['name']\n email = request.form['email']\n password = request.form['password']\n\n name_len = len(name) == 0\n names = Teams.query.add_columns('name', 'id').filter_by(name=name).first()\n emails = Teams.query.add_columns('email', 'id').filter_by(email=email).first()\n pass_short = len(password) == 0\n pass_long = len(password) > 128\n valid_email = re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", request.form['email'])\n\n if not valid_email:\n errors.append(\"That email doesn't look right\")\n if names:\n errors.append('That team name is already taken')\n if emails:\n errors.append('That email has already been used')\n if pass_short:\n errors.append('Pick a longer password')\n if pass_long:\n errors.append('Pick a shorter password')\n if name_len:\n errors.append('Pick a longer team name')\n\n if len(errors) > 0:\n return render_template('register.html', errors=errors, name=request.form['name'], email=request.form['email'], password=request.form['password'])\n else:\n with app.app_context():\n team = Teams(name, email.lower(), password)\n db.session.add(team)\n db.session.commit()\n db.session.flush()\n\n session['username'] = team.name\n session['id'] = team.id\n session['admin'] = team.admin\n session['nonce'] = utils.sha512(os.urandom(10))\n\n if utils.can_send_mail() and utils.get_config('verify_emails'): # Confirming users is enabled and we can send email.\n db.session.close()\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} registered (UNCONFIRMED) with {2}\".format(time.strftime(\"%m/%d/%Y %X\"),\n request.form['name'].encode('utf-8'),\n request.form['email'].encode('utf-8')))\n return redirect(url_for('auth.confirm_user'))\n else: # Don't care about confirming users\n if utils.can_send_mail(): # We want to notify the user that they have registered.\n utils.sendmail(request.form['email'], \"You've successfully registered for {}\".format(utils.get_config('ctf_name')))\n\n db.session.close()\n\n logger = logging.getLogger('regs')\n logger.warn(\"[{0}] {1} registered with {2}\".format(time.strftime(\"%m/%d/%Y %X\"), request.form['name'].encode('utf-8'), request.form['email'].encode('utf-8')))\n return redirect(url_for('challenges.challenges_view'))\n else:\n return render_template('register.html')\n\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n errors = []\n name = request.form['name']\n team = Teams.query.filter_by(name=name).first()\n if team:\n if team and bcrypt_sha256.verify(request.form['password'], team.password):\n try:\n session.regenerate() # NO SESSION FIXATION FOR YOU\n except:\n pass # TODO: Some session objects don't implement regenerate :(\n session['username'] = team.name\n session['id'] = team.id\n session['admin'] = team.admin\n session['nonce'] = utils.sha512(os.urandom(10))\n db.session.close()\n\n logger = logging.getLogger('logins')\n logger.warn(\"[{0}] {1} logged in\".format(time.strftime(\"%m/%d/%Y %X\"), session['username'].encode('utf-8')))\n\n if request.args.get('next') and utils.is_safe_url(request.args.get('next')):\n return redirect(request.args.get('next'))\n return redirect(url_for('challenges.challenges_view'))\n else: # This user exists but the password is wrong\n errors.append(\"Your username or password is incorrect\")\n db.session.close()\n return render_template('login.html', errors=errors)\n else: # This user just doesn't exist\n errors.append(\"Your username or password is incorrect\")\n db.session.close()\n return render_template('login.html', errors=errors)\n else:\n db.session.close()\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n if utils.authed():\n session.clear()\n return redirect(url_for('views.static_html'))\n", "path": "CTFd/auth.py"}]} | 3,006 | 687 |
gh_patches_debug_41219 | rasdani/github-patches | git_diff | beeware__toga-863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Progressbar crash on start
## Expected Behavior
<!--- If you're describing a bug, tell us what you expect to happen. -->
Progress bar to load
<!--- If you're requesting a new feature, tell us why you'd like this feature. -->
## Current Behavior
<!--- If you're describing a bug, what currently happens? -->
```
python -m progressbar
Unhandled Exception: Python.Runtime.PythonException: TypeError : 'NoneType' value cannot be converted to System.Int32
at Python.Runtime.Dispatcher.Dispatch(ArrayList args)
at __System_Threading_ThreadStartDispatcher.Invoke()
at System.Threading.ThreadHelper.ThreadStart_Context(Object state)
at System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)
at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)
at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state)
at System.Threading.ThreadHelper.ThreadStart()
```
## Steps to reproduce
<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->
1. launch progressbar
## Your Environment
<!--- Provide details on your current environment you found the bug in -->
* Python Version (list the specific version number) 3.6.5
* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)
- [ ] macOS - version:
- [ ] Linux - distro: - version:
- [ * ] Windows - version: Win10 1607
- [ ] Other - name: - version:
* Toga Target (the type of app you are trying to generate)
- [ ] android
- [ ] cocoa
- [ ] django
- [ ] gtk
- [ ] iOS
- [ ] tvOS
- [ ] watchOS
- [ * ] winforms
- [ ] win32
- [ ] Other (please specify)
</issue>
<code>
[start of src/winforms/toga_winforms/widgets/progressbar.py]
1 from travertino.size import at_least
2
3 from toga_winforms.libs import WinForms
4
5 from .base import Widget
6
7
8 class ProgressBar(Widget):
9 def create(self):
10 self.native = WinForms.ProgressBar()
11
12 def start(self):
13 '''Not supported for WinForms implementation'''
14 self.interface.factory.not_implemented('ProgressBar.start()')
15
16 # possible implementation (not tested):
17 # self.native.Style = ProgressBarStyle.Marquee
18
19 def stop(self):
20 '''Not supported for WinForms implementation'''
21 self.interface.factory.not_implemented('ProgressBar.stop()')
22
23 # possible implementation (not tested):
24 # self.native.Style = ProgressBarStyle.Continuous
25
26 def set_max(self, value):
27 self.native.Maximum = value
28
29 def set_value(self, value):
30 self.native.Value = value
31
32 def rehint(self):
33 # Height must be non-zero
34 # Set a sensible min-width
35 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
36 self.interface.intrinsic.height = self.native.PreferredSize.Height
37
[end of src/winforms/toga_winforms/widgets/progressbar.py]
[start of examples/progressbar/progressbar/app.py]
1 import toga
2 from toga.style import Pack
3 from toga.constants import ROW, COLUMN
4
5
6 class ProgressBarApp(toga.App):
7
8 def startup(self):
9 # Main window of the application with title and size
10 self.main_window = toga.MainWindow(title=self.name, size=(500, 500))
11
12 # the user may change the value with +/- buttons
13 self.progress_adder = toga.ProgressBar()
14
15 # the user may switch between "running" mode and a set value
16 self.progress_runner = toga.ProgressBar(max=None)
17
18 # set up common styles
19 label_style = Pack(flex=1, padding_right=24)
20 row_box_style = Pack(direction=ROW, padding=24)
21 col_box_style = Pack(direction=COLUMN, padding=24)
22
23 # Add the content on the main window
24 self.main_window.content = toga.Box(style=col_box_style, children=[
25 toga.Box(style=col_box_style, children=[
26 toga.Label("Use the +/- buttons to change the progress",
27 style=label_style),
28
29 self.progress_adder,
30
31 toga.Box(children=[
32 toga.Button("+", on_press=self.increase_progress,
33 style=Pack(flex=1)),
34 toga.Button("-", on_press=self.decrease_progress,
35 style=Pack(flex=1)),
36 ]),
37
38 toga.Switch("Toggle running mode", on_toggle=self.toggle_running)
39 ]),
40
41 toga.Box(style=row_box_style, children=[
42 toga.Label("default ProgressBar", style=label_style),
43 toga.ProgressBar(),
44 ]),
45
46 toga.Box(style=row_box_style, children=[
47 toga.Label("disabled ProgressBar", style=label_style),
48 toga.ProgressBar(max=None, running=False),
49 ]),
50
51 toga.Box(style=row_box_style, children=[
52 toga.Label("indeterminate ProgressBar", style=label_style),
53 toga.ProgressBar(max=None, running=True),
54 ]),
55
56 toga.Box(style=row_box_style, children=[
57 toga.Label("determinate ProgressBar", style=label_style),
58 toga.ProgressBar(max=1, running=False, value=0.5),
59 ]),
60
61 toga.Box(style=row_box_style, children=[
62 toga.Label("running determinate ProgressBar", style=label_style),
63 toga.ProgressBar(max=1, running=True, value=0.5),
64 ]),
65 ])
66
67 self.main_window.show()
68
69 def increase_progress(self, button, **kw):
70 if not self.progress_adder.is_running:
71 self.progress_adder.value += 0.1 * self.progress_adder.max
72
73 def decrease_progress(self, button, **kw):
74 if not self.progress_adder.is_running:
75 self.progress_adder.value -= 0.1 * self.progress_adder.max
76
77 def toggle_running(self, switch, **kw):
78 if switch.is_on:
79 self.progress_adder.max = None
80 self.progress_adder.start()
81 else:
82 self.progress_adder.max = 1
83 self.progress_adder.stop()
84
85
86 def main():
87 # App name and namespace
88 return ProgressBarApp('ProgressBar', 'org.beeware.examples.progressbar')
89
[end of examples/progressbar/progressbar/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/progressbar/progressbar/app.py b/examples/progressbar/progressbar/app.py
--- a/examples/progressbar/progressbar/app.py
+++ b/examples/progressbar/progressbar/app.py
@@ -2,6 +2,8 @@
from toga.style import Pack
from toga.constants import ROW, COLUMN
+MAX_PROGRESSBAR_VALUE = 100
+
class ProgressBarApp(toga.App):
@@ -10,10 +12,7 @@
self.main_window = toga.MainWindow(title=self.name, size=(500, 500))
# the user may change the value with +/- buttons
- self.progress_adder = toga.ProgressBar()
-
- # the user may switch between "running" mode and a set value
- self.progress_runner = toga.ProgressBar(max=None)
+ self.progress_adder = toga.ProgressBar(max=MAX_PROGRESSBAR_VALUE)
# set up common styles
label_style = Pack(flex=1, padding_right=24)
@@ -55,12 +54,18 @@
toga.Box(style=row_box_style, children=[
toga.Label("determinate ProgressBar", style=label_style),
- toga.ProgressBar(max=1, running=False, value=0.5),
+ toga.ProgressBar(
+ max=MAX_PROGRESSBAR_VALUE,
+ running=False,
+ value=0.5 * MAX_PROGRESSBAR_VALUE),
]),
toga.Box(style=row_box_style, children=[
toga.Label("running determinate ProgressBar", style=label_style),
- toga.ProgressBar(max=1, running=True, value=0.5),
+ toga.ProgressBar(
+ max=MAX_PROGRESSBAR_VALUE,
+ running=True,
+ value=0.5 * MAX_PROGRESSBAR_VALUE),
]),
])
@@ -79,7 +84,7 @@
self.progress_adder.max = None
self.progress_adder.start()
else:
- self.progress_adder.max = 1
+ self.progress_adder.max = MAX_PROGRESSBAR_VALUE
self.progress_adder.stop()
diff --git a/src/winforms/toga_winforms/widgets/progressbar.py b/src/winforms/toga_winforms/widgets/progressbar.py
--- a/src/winforms/toga_winforms/widgets/progressbar.py
+++ b/src/winforms/toga_winforms/widgets/progressbar.py
@@ -10,21 +10,31 @@
self.native = WinForms.ProgressBar()
def start(self):
- '''Not supported for WinForms implementation'''
- self.interface.factory.not_implemented('ProgressBar.start()')
-
- # possible implementation (not tested):
- # self.native.Style = ProgressBarStyle.Marquee
+ self.set_running_style()
def stop(self):
- '''Not supported for WinForms implementation'''
- self.interface.factory.not_implemented('ProgressBar.stop()')
+ self.set_stopping_style()
- # possible implementation (not tested):
- # self.native.Style = ProgressBarStyle.Continuous
+ @property
+ def max(self):
+ return self.interface.max
def set_max(self, value):
- self.native.Maximum = value
+ if value is not None:
+ self.native.Maximum = value
+ if self.interface.is_running:
+ self.set_running_style()
+ else:
+ self.set_stopping_style()
+
+ def set_running_style(self):
+ if self.max is None:
+ self.native.Style = WinForms.ProgressBarStyle.Marquee
+ else:
+ self.native.Style = WinForms.ProgressBarStyle.Blocks
+
+ def set_stopping_style(self):
+ self.native.Style = WinForms.ProgressBarStyle.Continuous
def set_value(self, value):
self.native.Value = value
| {"golden_diff": "diff --git a/examples/progressbar/progressbar/app.py b/examples/progressbar/progressbar/app.py\n--- a/examples/progressbar/progressbar/app.py\n+++ b/examples/progressbar/progressbar/app.py\n@@ -2,6 +2,8 @@\n from toga.style import Pack\n from toga.constants import ROW, COLUMN\n \n+MAX_PROGRESSBAR_VALUE = 100\n+\n \n class ProgressBarApp(toga.App):\n \n@@ -10,10 +12,7 @@\n self.main_window = toga.MainWindow(title=self.name, size=(500, 500))\n \n # the user may change the value with +/- buttons\n- self.progress_adder = toga.ProgressBar()\n-\n- # the user may switch between \"running\" mode and a set value\n- self.progress_runner = toga.ProgressBar(max=None)\n+ self.progress_adder = toga.ProgressBar(max=MAX_PROGRESSBAR_VALUE)\n \n # set up common styles\n label_style = Pack(flex=1, padding_right=24)\n@@ -55,12 +54,18 @@\n \n toga.Box(style=row_box_style, children=[\n toga.Label(\"determinate ProgressBar\", style=label_style),\n- toga.ProgressBar(max=1, running=False, value=0.5),\n+ toga.ProgressBar(\n+ max=MAX_PROGRESSBAR_VALUE,\n+ running=False,\n+ value=0.5 * MAX_PROGRESSBAR_VALUE),\n ]),\n \n toga.Box(style=row_box_style, children=[\n toga.Label(\"running determinate ProgressBar\", style=label_style),\n- toga.ProgressBar(max=1, running=True, value=0.5),\n+ toga.ProgressBar(\n+ max=MAX_PROGRESSBAR_VALUE,\n+ running=True,\n+ value=0.5 * MAX_PROGRESSBAR_VALUE),\n ]),\n ])\n \n@@ -79,7 +84,7 @@\n self.progress_adder.max = None\n self.progress_adder.start()\n else:\n- self.progress_adder.max = 1\n+ self.progress_adder.max = MAX_PROGRESSBAR_VALUE\n self.progress_adder.stop()\n \n \ndiff --git a/src/winforms/toga_winforms/widgets/progressbar.py b/src/winforms/toga_winforms/widgets/progressbar.py\n--- a/src/winforms/toga_winforms/widgets/progressbar.py\n+++ b/src/winforms/toga_winforms/widgets/progressbar.py\n@@ -10,21 +10,31 @@\n self.native = WinForms.ProgressBar()\n \n def start(self):\n- '''Not supported for WinForms implementation'''\n- self.interface.factory.not_implemented('ProgressBar.start()')\n-\n- # possible implementation (not tested):\n- # self.native.Style = ProgressBarStyle.Marquee\n+ self.set_running_style()\n \n def stop(self):\n- '''Not supported for WinForms implementation'''\n- self.interface.factory.not_implemented('ProgressBar.stop()')\n+ self.set_stopping_style()\n \n- # possible implementation (not tested):\n- # self.native.Style = ProgressBarStyle.Continuous\n+ @property\n+ def max(self):\n+ return self.interface.max\n \n def set_max(self, value):\n- self.native.Maximum = value\n+ if value is not None:\n+ self.native.Maximum = value\n+ if self.interface.is_running:\n+ self.set_running_style()\n+ else:\n+ self.set_stopping_style()\n+\n+ def set_running_style(self):\n+ if self.max is None:\n+ self.native.Style = WinForms.ProgressBarStyle.Marquee\n+ else:\n+ self.native.Style = WinForms.ProgressBarStyle.Blocks\n+\n+ def set_stopping_style(self):\n+ self.native.Style = WinForms.ProgressBarStyle.Continuous\n \n def set_value(self, value):\n self.native.Value = value\n", "issue": "Progressbar crash on start\n## Expected Behavior\r\n<!--- If you're describing a bug, tell us what you expect to happen. -->\r\nProgress bar to load\r\n<!--- If you're requesting a new feature, tell us why you'd like this feature. -->\r\n\r\n\r\n## Current Behavior\r\n<!--- If you're describing a bug, what currently happens? -->\r\n```\r\npython -m progressbar\r\nUnhandled Exception: Python.Runtime.PythonException: TypeError : 'NoneType' value cannot be converted to System.Int32\r\n at Python.Runtime.Dispatcher.Dispatch(ArrayList args)\r\n at __System_Threading_ThreadStartDispatcher.Invoke()\r\n at System.Threading.ThreadHelper.ThreadStart_Context(Object state)\r\n at System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)\r\n at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)\r\n at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state)\r\n at System.Threading.ThreadHelper.ThreadStart()\r\n```\r\n## Steps to reproduce\r\n<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->\r\n1. launch progressbar\r\n\r\n## Your Environment\r\n<!--- Provide details on your current environment you found the bug in -->\r\n\r\n* Python Version (list the specific version number) 3.6.5\r\n\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n - [ ] macOS - version: \r\n - [ ] Linux - distro: - version:\r\n - [ * ] Windows - version: Win10 1607\r\n - [ ] Other - name: - version:\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n \r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django \r\n - [ ] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [ * ] winforms \r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass ProgressBar(Widget):\n def create(self):\n self.native = WinForms.ProgressBar()\n\n def start(self):\n '''Not supported for WinForms implementation'''\n self.interface.factory.not_implemented('ProgressBar.start()')\n\n # possible implementation (not tested):\n # self.native.Style = ProgressBarStyle.Marquee\n\n def stop(self):\n '''Not supported for WinForms implementation'''\n self.interface.factory.not_implemented('ProgressBar.stop()')\n\n # possible implementation (not tested):\n # self.native.Style = ProgressBarStyle.Continuous\n\n def set_max(self, value):\n self.native.Maximum = value\n\n def set_value(self, value):\n self.native.Value = value\n\n def rehint(self):\n # Height must be non-zero\n # Set a sensible min-width\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = self.native.PreferredSize.Height\n", "path": "src/winforms/toga_winforms/widgets/progressbar.py"}, {"content": "import toga\nfrom toga.style import Pack\nfrom toga.constants import ROW, COLUMN\n\n\nclass ProgressBarApp(toga.App):\n\n def startup(self):\n # Main window of the application with title and size\n self.main_window = toga.MainWindow(title=self.name, size=(500, 500))\n\n # the user may change the value with +/- buttons\n self.progress_adder = toga.ProgressBar()\n\n # the user may switch between \"running\" mode and a set value\n self.progress_runner = toga.ProgressBar(max=None)\n\n # set up common styles\n label_style = Pack(flex=1, padding_right=24)\n row_box_style = Pack(direction=ROW, padding=24)\n col_box_style = Pack(direction=COLUMN, padding=24)\n\n # Add the content on the main window\n self.main_window.content = toga.Box(style=col_box_style, children=[\n toga.Box(style=col_box_style, children=[\n toga.Label(\"Use the +/- buttons to change the progress\",\n style=label_style),\n\n self.progress_adder,\n\n toga.Box(children=[\n toga.Button(\"+\", on_press=self.increase_progress,\n style=Pack(flex=1)),\n toga.Button(\"-\", on_press=self.decrease_progress,\n style=Pack(flex=1)),\n ]),\n\n toga.Switch(\"Toggle running mode\", on_toggle=self.toggle_running)\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"default ProgressBar\", style=label_style),\n toga.ProgressBar(),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"disabled ProgressBar\", style=label_style),\n toga.ProgressBar(max=None, running=False),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"indeterminate ProgressBar\", style=label_style),\n toga.ProgressBar(max=None, running=True),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"determinate ProgressBar\", style=label_style),\n toga.ProgressBar(max=1, running=False, value=0.5),\n ]),\n\n toga.Box(style=row_box_style, children=[\n toga.Label(\"running determinate ProgressBar\", style=label_style),\n toga.ProgressBar(max=1, running=True, value=0.5),\n ]),\n ])\n\n self.main_window.show()\n\n def increase_progress(self, button, **kw):\n if not self.progress_adder.is_running:\n self.progress_adder.value += 0.1 * self.progress_adder.max\n\n def decrease_progress(self, button, **kw):\n if not self.progress_adder.is_running:\n self.progress_adder.value -= 0.1 * self.progress_adder.max\n\n def toggle_running(self, switch, **kw):\n if switch.is_on:\n self.progress_adder.max = None\n self.progress_adder.start()\n else:\n self.progress_adder.max = 1\n self.progress_adder.stop()\n\n\ndef main():\n # App name and namespace\n return ProgressBarApp('ProgressBar', 'org.beeware.examples.progressbar')\n", "path": "examples/progressbar/progressbar/app.py"}]} | 2,161 | 814 |
gh_patches_debug_25048 | rasdani/github-patches | git_diff | mosaicml__composer-1493 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HuggingFace model should update word embeddings dimension according to tokenizer
** To reproduce
Steps to reproduce the behavior:
Right now if I call
```
model = transformers.AutoModelForCausalLM.from_pretrained('bigscience/bloom-560m')
tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')
model = HuggingFaceModel(model=model, tokenizer=tokenizer)
```
The bloom model will have a [250880, 1024] dim word embedding matrix and a vocab size of 250880 even though a gpt2 tokenizer with vocab size of 50257 is used.
## Expected behavior
The class HuggingFaceModel needs to update word embedding matrix if a tokenizer is supplied.
</issue>
<code>
[start of composer/models/huggingface.py]
1 # Copyright 2022 MosaicML Composer authors
2 # SPDX-License-Identifier: Apache-2.0
3
4 """A wrapper class that converts 🤗 Transformers models to composer models"""
5
6 from __future__ import annotations
7
8 from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
9
10 from torchmetrics import Metric
11
12 from composer.models.base import ComposerModel
13 from composer.utils.import_helpers import MissingConditionalImportError
14
15 if TYPE_CHECKING:
16 import transformers
17
18 __all__ = ['HuggingFaceModel']
19
20
21 class HuggingFaceModel(ComposerModel):
22 """
23 A wrapper class that converts 🤗 Transformers models to composer models.
24
25 Args:
26 model (transformers.PreTrainedModel): A 🤗 Transformers model.
27 tokenizer (transformers.PreTrainedTokenizer): Tokenizer used to prepare the dataset and validate model inputs during training. Default ``None``.
28 use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False``
29 metrics (list[Metric], optional): list of torchmetrics to apply to the output of `validate`. Default: ``None``.
30 .. warning:: This wrapper is designed to work with 🤗 datasets that define a `labels` column.
31
32 Example:
33
34 .. testcode::
35
36 import transformers
37 from composer.models import HuggingFaceModel
38
39 hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)
40 model = HuggingFaceModel(hf_model)
41 """
42
43 def __init__(self,
44 model: transformers.PreTrainedModel,
45 tokenizer: Optional[Union[transformers.PreTrainedTokenizer,
46 transformers.PreTrainedTokenizerFast]] = None,
47 use_logits: Optional[bool] = False,
48 metrics: Optional[List[Metric]] = None) -> None:
49 try:
50 import transformers
51 except ImportError as e:
52 raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
53
54 super().__init__()
55 self.model = model
56 self.config = model.config
57
58 # the set of inputs that a model expects inferred from the model type or
59 # tokenizer if provided
60 if tokenizer is None:
61 if isinstance(self.model.base_model, transformers.GPT2Model):
62 self.model_inputs = {'input_ids', 'attention_mask'}
63 elif isinstance(self.model.base_model, transformers.BertModel):
64 self.model_inputs = {'input_ids', 'attention_mask', 'token_type_ids'}
65 else:
66 assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'
67 self.model_inputs = set(tokenizer.model_input_names)
68
69 self.use_logits = use_logits
70
71 self.train_metrics = None
72 self.val_metrics = None
73
74 if metrics:
75 self.train_metrics = {metric.__class__.__name__: metric for metric in metrics}
76 self.val_metrics = {metric.__class__.__name__: metric for metric in metrics}
77
78 self.labels = None # set in eval_forward() if exists
79
80 def forward(self, batch):
81 for key in self.model_inputs:
82 if key not in batch.keys():
83 raise ValueError(f'Batch missing key: {key}')
84
85 output = self.model(**batch) # type: ignore (thirdparty)
86 return output
87
88 def loss(self, outputs, batch):
89 return outputs['loss']
90
91 def eval_forward(self, batch, outputs: Optional[Any] = None):
92 output = outputs if outputs else self.forward(batch)
93 if self.use_logits:
94 self.labels = batch.pop('labels')
95 output = output['logits']
96
97 # if we are in the single class case, then remove the classes dimension
98 if output.shape[1] == 1:
99 output = output.squeeze(dim=1)
100
101 return output
102
103 def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:
104 if is_train:
105 metrics = self.train_metrics
106 else:
107 metrics = self.val_metrics
108
109 return metrics if metrics else {}
110
111 def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:
112 metric.update(outputs, self.labels)
113
114 def get_model_inputs(self):
115 """Returns a set of inputs that the model expects in the forward pass.
116 If an algorithm wants to interact with the model inputs (for instance,
117 popping the labels for a custom loss fn, or adding attention head masks
118 for head pruning, it must access self.set_model_inputs().
119 Returns:
120 model_inputs: The set of keys that are expected in the Mapping used to compute the forward pass.
121 """
122
123 return self.model_inputs
124
[end of composer/models/huggingface.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/composer/models/huggingface.py b/composer/models/huggingface.py
--- a/composer/models/huggingface.py
+++ b/composer/models/huggingface.py
@@ -5,6 +5,7 @@
from __future__ import annotations
+import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from torchmetrics import Metric
@@ -15,6 +16,8 @@
if TYPE_CHECKING:
import transformers
+log = logging.getLogger(__name__)
+
__all__ = ['HuggingFaceModel']
@@ -66,6 +69,13 @@
assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'
self.model_inputs = set(tokenizer.model_input_names)
+ if self.config.vocab_size != len(tokenizer):
+ # set model's word embedding matrix and final lm_head to vocab size according to tokenizer
+ log.warning(
+ f'The number of tokens in the tokenizer and the number of tokens in the model are different.'
+ f' Resizing the model tokenizer to {len(tokenizer)} from {self.config.vocab_size}.')
+ self.model.resize_token_embeddings(len(tokenizer))
+
self.use_logits = use_logits
self.train_metrics = None
| {"golden_diff": "diff --git a/composer/models/huggingface.py b/composer/models/huggingface.py\n--- a/composer/models/huggingface.py\n+++ b/composer/models/huggingface.py\n@@ -5,6 +5,7 @@\n \n from __future__ import annotations\n \n+import logging\n from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n \n from torchmetrics import Metric\n@@ -15,6 +16,8 @@\n if TYPE_CHECKING:\n import transformers\n \n+log = logging.getLogger(__name__)\n+\n __all__ = ['HuggingFaceModel']\n \n \n@@ -66,6 +69,13 @@\n assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'\n self.model_inputs = set(tokenizer.model_input_names)\n \n+ if self.config.vocab_size != len(tokenizer):\n+ # set model's word embedding matrix and final lm_head to vocab size according to tokenizer\n+ log.warning(\n+ f'The number of tokens in the tokenizer and the number of tokens in the model are different.'\n+ f' Resizing the model tokenizer to {len(tokenizer)} from {self.config.vocab_size}.')\n+ self.model.resize_token_embeddings(len(tokenizer))\n+\n self.use_logits = use_logits\n \n self.train_metrics = None\n", "issue": "HuggingFace model should update word embeddings dimension according to tokenizer\n** To reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nRight now if I call \r\n\r\n```\r\nmodel = transformers.AutoModelForCausalLM.from_pretrained('bigscience/bloom-560m')\r\ntokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')\r\n\r\nmodel = HuggingFaceModel(model=model, tokenizer=tokenizer)\r\n```\r\n\r\nThe bloom model will have a [250880, 1024] dim word embedding matrix and a vocab size of 250880 even though a gpt2 tokenizer with vocab size of 50257 is used.\r\n\r\n## Expected behavior\r\n\r\nThe class HuggingFaceModel needs to update word embedding matrix if a tokenizer is supplied.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"A wrapper class that converts \ud83e\udd17 Transformers models to composer models\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n\nfrom torchmetrics import Metric\n\nfrom composer.models.base import ComposerModel\nfrom composer.utils.import_helpers import MissingConditionalImportError\n\nif TYPE_CHECKING:\n import transformers\n\n__all__ = ['HuggingFaceModel']\n\n\nclass HuggingFaceModel(ComposerModel):\n \"\"\"\n A wrapper class that converts \ud83e\udd17 Transformers models to composer models.\n\n Args:\n model (transformers.PreTrainedModel): A \ud83e\udd17 Transformers model.\n tokenizer (transformers.PreTrainedTokenizer): Tokenizer used to prepare the dataset and validate model inputs during training. Default ``None``.\n use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False``\n metrics (list[Metric], optional): list of torchmetrics to apply to the output of `validate`. Default: ``None``.\n .. warning:: This wrapper is designed to work with \ud83e\udd17 datasets that define a `labels` column.\n\n Example:\n\n .. testcode::\n\n import transformers\n from composer.models import HuggingFaceModel\n\n hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\n model = HuggingFaceModel(hf_model)\n \"\"\"\n\n def __init__(self,\n model: transformers.PreTrainedModel,\n tokenizer: Optional[Union[transformers.PreTrainedTokenizer,\n transformers.PreTrainedTokenizerFast]] = None,\n use_logits: Optional[bool] = False,\n metrics: Optional[List[Metric]] = None) -> None:\n try:\n import transformers\n except ImportError as e:\n raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e\n\n super().__init__()\n self.model = model\n self.config = model.config\n\n # the set of inputs that a model expects inferred from the model type or\n # tokenizer if provided\n if tokenizer is None:\n if isinstance(self.model.base_model, transformers.GPT2Model):\n self.model_inputs = {'input_ids', 'attention_mask'}\n elif isinstance(self.model.base_model, transformers.BertModel):\n self.model_inputs = {'input_ids', 'attention_mask', 'token_type_ids'}\n else:\n assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'\n self.model_inputs = set(tokenizer.model_input_names)\n\n self.use_logits = use_logits\n\n self.train_metrics = None\n self.val_metrics = None\n\n if metrics:\n self.train_metrics = {metric.__class__.__name__: metric for metric in metrics}\n self.val_metrics = {metric.__class__.__name__: metric for metric in metrics}\n\n self.labels = None # set in eval_forward() if exists\n\n def forward(self, batch):\n for key in self.model_inputs:\n if key not in batch.keys():\n raise ValueError(f'Batch missing key: {key}')\n\n output = self.model(**batch) # type: ignore (thirdparty)\n return output\n\n def loss(self, outputs, batch):\n return outputs['loss']\n\n def eval_forward(self, batch, outputs: Optional[Any] = None):\n output = outputs if outputs else self.forward(batch)\n if self.use_logits:\n self.labels = batch.pop('labels')\n output = output['logits']\n\n # if we are in the single class case, then remove the classes dimension\n if output.shape[1] == 1:\n output = output.squeeze(dim=1)\n\n return output\n\n def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:\n if is_train:\n metrics = self.train_metrics\n else:\n metrics = self.val_metrics\n\n return metrics if metrics else {}\n\n def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:\n metric.update(outputs, self.labels)\n\n def get_model_inputs(self):\n \"\"\"Returns a set of inputs that the model expects in the forward pass.\n If an algorithm wants to interact with the model inputs (for instance,\n popping the labels for a custom loss fn, or adding attention head masks\n for head pruning, it must access self.set_model_inputs().\n Returns:\n model_inputs: The set of keys that are expected in the Mapping used to compute the forward pass.\n \"\"\"\n\n return self.model_inputs\n", "path": "composer/models/huggingface.py"}]} | 1,988 | 281 |
gh_patches_debug_17906 | rasdani/github-patches | git_diff | openshift__openshift-ansible-8685 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix multimaster OpenStack deployment failure
When trying to deploy multimaster OpenStack with nsupdate and either no
load balancer or OpenStack LBaaS, the creation of the DNS records would
fail.
This is because it was still relying on the old `lb` inventory group for
all multimaster scenarios, rather than just in the
`openshift_openstack_use_lbaas_load_balancer` case.
So we define a new inventory variable
`openshift_openstack_private_api_ip` (which is already defined in the
Heat templates) and use that instead.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1588428
</issue>
<code>
[start of playbooks/openstack/inventory.py]
1 #!/usr/bin/env python
2 """
3 This is an Ansible dynamic inventory for OpenStack.
4
5 It requires your OpenStack credentials to be set in clouds.yaml or your shell
6 environment.
7
8 """
9
10 from __future__ import print_function
11
12 from collections import Mapping
13 import json
14 import os
15
16 from keystoneauth1.exceptions.catalog import EndpointNotFound
17 import shade
18
19
20 def base_openshift_inventory(cluster_hosts):
21 '''Set the base openshift inventory.'''
22 inventory = {}
23
24 masters = [server.name for server in cluster_hosts
25 if server.metadata['host-type'] == 'master']
26
27 etcd = [server.name for server in cluster_hosts
28 if server.metadata['host-type'] == 'etcd']
29 if not etcd:
30 etcd = masters
31
32 infra_hosts = [server.name for server in cluster_hosts
33 if server.metadata['host-type'] == 'node' and
34 server.metadata['sub-host-type'] == 'infra']
35
36 app = [server.name for server in cluster_hosts
37 if server.metadata['host-type'] == 'node' and
38 server.metadata['sub-host-type'] == 'app']
39
40 cns = [server.name for server in cluster_hosts
41 if server.metadata['host-type'] == 'cns']
42
43 nodes = list(set(masters + infra_hosts + app + cns))
44
45 dns = [server.name for server in cluster_hosts
46 if server.metadata['host-type'] == 'dns']
47
48 load_balancers = [server.name for server in cluster_hosts
49 if server.metadata['host-type'] == 'lb']
50
51 osev3 = list(set(nodes + etcd + load_balancers))
52
53 inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
54 inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
55 inventory['masters'] = {'hosts': masters}
56 inventory['etcd'] = {'hosts': etcd}
57 inventory['nodes'] = {'hosts': nodes}
58 inventory['infra_hosts'] = {'hosts': infra_hosts}
59 inventory['app'] = {'hosts': app}
60 inventory['glusterfs'] = {'hosts': cns}
61 inventory['dns'] = {'hosts': dns}
62 inventory['lb'] = {'hosts': load_balancers}
63 inventory['localhost'] = {'ansible_connection': 'local'}
64
65 return inventory
66
67
68 def get_docker_storage_mountpoints(volumes):
69 '''Check volumes to see if they're being used for docker storage'''
70 docker_storage_mountpoints = {}
71 for volume in volumes:
72 if volume.metadata.get('purpose') == "openshift_docker_storage":
73 for attachment in volume.attachments:
74 if attachment.server_id in docker_storage_mountpoints:
75 docker_storage_mountpoints[attachment.server_id].append(attachment.device)
76 else:
77 docker_storage_mountpoints[attachment.server_id] = [attachment.device]
78 return docker_storage_mountpoints
79
80
81 def _get_hostvars(server, docker_storage_mountpoints):
82 ssh_ip_address = server.public_v4 or server.private_v4
83 hostvars = {
84 'ansible_host': ssh_ip_address
85 }
86
87 public_v4 = server.public_v4 or server.private_v4
88 if public_v4:
89 hostvars['public_v4'] = server.public_v4
90 hostvars['openshift_public_ip'] = server.public_v4
91 # TODO(shadower): what about multiple networks?
92 if server.private_v4:
93 hostvars['private_v4'] = server.private_v4
94 hostvars['openshift_ip'] = server.private_v4
95
96 # NOTE(shadower): Yes, we set both hostname and IP to the private
97 # IP address for each node. OpenStack doesn't resolve nodes by
98 # name at all, so using a hostname here would require an internal
99 # DNS which would complicate the setup and potentially introduce
100 # performance issues.
101 hostvars['openshift_hostname'] = server.metadata.get(
102 'openshift_hostname', server.private_v4)
103 hostvars['openshift_public_hostname'] = server.name
104
105 if server.metadata['host-type'] == 'cns':
106 hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
107
108 node_labels = server.metadata.get('node_labels')
109 # NOTE(shadower): the node_labels value must be a dict not string
110 if not isinstance(node_labels, Mapping):
111 node_labels = json.loads(node_labels)
112
113 if node_labels:
114 hostvars['openshift_node_labels'] = node_labels
115
116 # check for attached docker storage volumes
117 if 'os-extended-volumes:volumes_attached' in server:
118 if server.id in docker_storage_mountpoints:
119 hostvars['docker_storage_mountpoints'] = ' '.join(
120 docker_storage_mountpoints[server.id])
121 return hostvars
122
123
124 def build_inventory():
125 '''Build the dynamic inventory.'''
126 cloud = shade.openstack_cloud()
127
128 # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
129 # environment variable.
130 cluster_hosts = [
131 server for server in cloud.list_servers()
132 if 'metadata' in server and 'clusterid' in server.metadata]
133
134 inventory = base_openshift_inventory(cluster_hosts)
135
136 for server in cluster_hosts:
137 if 'group' in server.metadata:
138 group = server.metadata.get('group')
139 if group not in inventory:
140 inventory[group] = {'hosts': []}
141 inventory[group]['hosts'].append(server.name)
142
143 inventory['_meta'] = {'hostvars': {}}
144
145 # Some clouds don't have Cinder. That's okay:
146 try:
147 volumes = cloud.list_volumes()
148 except EndpointNotFound:
149 volumes = []
150
151 # cinder volumes used for docker storage
152 docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
153 for server in cluster_hosts:
154 inventory['_meta']['hostvars'][server.name] = _get_hostvars(
155 server,
156 docker_storage_mountpoints)
157
158 stout = _get_stack_outputs(cloud)
159 if stout is not None:
160 try:
161 inventory['localhost'].update({
162 'openshift_openstack_api_lb_provider':
163 stout['api_lb_provider'],
164 'openshift_openstack_api_lb_port_id':
165 stout['api_lb_vip_port_id'],
166 'openshift_openstack_api_lb_sg_id':
167 stout['api_lb_sg_id']})
168 except KeyError:
169 pass # Not an API load balanced deployment
170
171 try:
172 inventory['OSEv3']['vars'][
173 'openshift_master_cluster_hostname'] = stout['private_api_ip']
174 except KeyError:
175 pass # Internal LB not specified
176
177 inventory['localhost']['openshift_openstack_public_api_ip'] = \
178 stout.get('public_api_ip')
179 inventory['localhost']['openshift_openstack_public_router_ip'] = \
180 stout.get('public_router_ip')
181
182 try:
183 inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
184 except KeyError:
185 pass # Not a kuryr deployment
186 return inventory
187
188
189 def _get_stack_outputs(cloud_client):
190 """Returns a dictionary with the stack outputs"""
191 cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
192
193 stack = cloud_client.get_stack(cluster_name)
194 if stack is None or stack['stack_status'] not in (
195 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
196 return None
197
198 data = {}
199 for output in stack['outputs']:
200 data[output['output_key']] = output['output_value']
201 return data
202
203
204 def _get_kuryr_vars(cloud_client, data):
205 """Returns a dictionary of Kuryr variables resulting of heat stacking"""
206 settings = {}
207 settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
208 settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
209 settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
210 settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
211 settings['kuryr_openstack_pod_project_id'] = (
212 cloud_client.current_project_id)
213 settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
214
215 settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
216 settings['kuryr_openstack_username'] = cloud_client.auth['username']
217 settings['kuryr_openstack_password'] = cloud_client.auth['password']
218 if 'user_domain_id' in cloud_client.auth:
219 settings['kuryr_openstack_user_domain_name'] = (
220 cloud_client.auth['user_domain_id'])
221 else:
222 settings['kuryr_openstack_user_domain_name'] = (
223 cloud_client.auth['user_domain_name'])
224 # FIXME(apuimedo): consolidate kuryr controller credentials into the same
225 # vars the openstack playbook uses.
226 settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
227 if 'project_domain_id' in cloud_client.auth:
228 settings['kuryr_openstack_project_domain_name'] = (
229 cloud_client.auth['project_domain_id'])
230 else:
231 settings['kuryr_openstack_project_domain_name'] = (
232 cloud_client.auth['project_domain_name'])
233 return settings
234
235
236 if __name__ == '__main__':
237 print(json.dumps(build_inventory(), indent=4, sort_keys=True))
238
[end of playbooks/openstack/inventory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py
--- a/playbooks/openstack/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -9,7 +9,6 @@
from __future__ import print_function
-from collections import Mapping
import json
import os
@@ -105,13 +104,8 @@
if server.metadata['host-type'] == 'cns':
hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
- node_labels = server.metadata.get('node_labels')
- # NOTE(shadower): the node_labels value must be a dict not string
- if not isinstance(node_labels, Mapping):
- node_labels = json.loads(node_labels)
-
- if node_labels:
- hostvars['openshift_node_labels'] = node_labels
+ group_name = server.metadata.get('openshift_node_group_name')
+ hostvars['openshift_node_group_name'] = group_name
# check for attached docker storage volumes
if 'os-extended-volumes:volumes_attached' in server:
| {"golden_diff": "diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py\n--- a/playbooks/openstack/inventory.py\n+++ b/playbooks/openstack/inventory.py\n@@ -9,7 +9,6 @@\n \n from __future__ import print_function\n \n-from collections import Mapping\n import json\n import os\n \n@@ -105,13 +104,8 @@\n if server.metadata['host-type'] == 'cns':\n hostvars['glusterfs_devices'] = ['/dev/nvme0n1']\n \n- node_labels = server.metadata.get('node_labels')\n- # NOTE(shadower): the node_labels value must be a dict not string\n- if not isinstance(node_labels, Mapping):\n- node_labels = json.loads(node_labels)\n-\n- if node_labels:\n- hostvars['openshift_node_labels'] = node_labels\n+ group_name = server.metadata.get('openshift_node_group_name')\n+ hostvars['openshift_node_group_name'] = group_name\n \n # check for attached docker storage volumes\n if 'os-extended-volumes:volumes_attached' in server:\n", "issue": "Fix multimaster OpenStack deployment failure\nWhen trying to deploy multimaster OpenStack with nsupdate and either no\r\nload balancer or OpenStack LBaaS, the creation of the DNS records would\r\nfail.\r\n\r\nThis is because it was still relying on the old `lb` inventory group for\r\nall multimaster scenarios, rather than just in the\r\n`openshift_openstack_use_lbaas_load_balancer` case.\r\n\r\nSo we define a new inventory variable\r\n`openshift_openstack_private_api_ip` (which is already defined in the\r\nHeat templates) and use that instead.\r\n\r\nFixes: https://bugzilla.redhat.com/show_bug.cgi?id=1588428\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis is an Ansible dynamic inventory for OpenStack.\n\nIt requires your OpenStack credentials to be set in clouds.yaml or your shell\nenvironment.\n\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom collections import Mapping\nimport json\nimport os\n\nfrom keystoneauth1.exceptions.catalog import EndpointNotFound\nimport shade\n\n\ndef base_openshift_inventory(cluster_hosts):\n '''Set the base openshift inventory.'''\n inventory = {}\n\n masters = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'master']\n\n etcd = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'etcd']\n if not etcd:\n etcd = masters\n\n infra_hosts = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'node' and\n server.metadata['sub-host-type'] == 'infra']\n\n app = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'node' and\n server.metadata['sub-host-type'] == 'app']\n\n cns = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'cns']\n\n nodes = list(set(masters + infra_hosts + app + cns))\n\n dns = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'dns']\n\n load_balancers = [server.name for server in cluster_hosts\n if server.metadata['host-type'] == 'lb']\n\n osev3 = list(set(nodes + etcd + load_balancers))\n\n inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}\n inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}\n inventory['masters'] = {'hosts': masters}\n inventory['etcd'] = {'hosts': etcd}\n inventory['nodes'] = {'hosts': nodes}\n inventory['infra_hosts'] = {'hosts': infra_hosts}\n inventory['app'] = {'hosts': app}\n inventory['glusterfs'] = {'hosts': cns}\n inventory['dns'] = {'hosts': dns}\n inventory['lb'] = {'hosts': load_balancers}\n inventory['localhost'] = {'ansible_connection': 'local'}\n\n return inventory\n\n\ndef get_docker_storage_mountpoints(volumes):\n '''Check volumes to see if they're being used for docker storage'''\n docker_storage_mountpoints = {}\n for volume in volumes:\n if volume.metadata.get('purpose') == \"openshift_docker_storage\":\n for attachment in volume.attachments:\n if attachment.server_id in docker_storage_mountpoints:\n docker_storage_mountpoints[attachment.server_id].append(attachment.device)\n else:\n docker_storage_mountpoints[attachment.server_id] = [attachment.device]\n return docker_storage_mountpoints\n\n\ndef _get_hostvars(server, docker_storage_mountpoints):\n ssh_ip_address = server.public_v4 or server.private_v4\n hostvars = {\n 'ansible_host': ssh_ip_address\n }\n\n public_v4 = server.public_v4 or server.private_v4\n if public_v4:\n hostvars['public_v4'] = server.public_v4\n hostvars['openshift_public_ip'] = server.public_v4\n # TODO(shadower): what about multiple networks?\n if server.private_v4:\n hostvars['private_v4'] = server.private_v4\n hostvars['openshift_ip'] = server.private_v4\n\n # NOTE(shadower): Yes, we set both hostname and IP to the private\n # IP address for each node. OpenStack doesn't resolve nodes by\n # name at all, so using a hostname here would require an internal\n # DNS which would complicate the setup and potentially introduce\n # performance issues.\n hostvars['openshift_hostname'] = server.metadata.get(\n 'openshift_hostname', server.private_v4)\n hostvars['openshift_public_hostname'] = server.name\n\n if server.metadata['host-type'] == 'cns':\n hostvars['glusterfs_devices'] = ['/dev/nvme0n1']\n\n node_labels = server.metadata.get('node_labels')\n # NOTE(shadower): the node_labels value must be a dict not string\n if not isinstance(node_labels, Mapping):\n node_labels = json.loads(node_labels)\n\n if node_labels:\n hostvars['openshift_node_labels'] = node_labels\n\n # check for attached docker storage volumes\n if 'os-extended-volumes:volumes_attached' in server:\n if server.id in docker_storage_mountpoints:\n hostvars['docker_storage_mountpoints'] = ' '.join(\n docker_storage_mountpoints[server.id])\n return hostvars\n\n\ndef build_inventory():\n '''Build the dynamic inventory.'''\n cloud = shade.openstack_cloud()\n\n # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`\n # environment variable.\n cluster_hosts = [\n server for server in cloud.list_servers()\n if 'metadata' in server and 'clusterid' in server.metadata]\n\n inventory = base_openshift_inventory(cluster_hosts)\n\n for server in cluster_hosts:\n if 'group' in server.metadata:\n group = server.metadata.get('group')\n if group not in inventory:\n inventory[group] = {'hosts': []}\n inventory[group]['hosts'].append(server.name)\n\n inventory['_meta'] = {'hostvars': {}}\n\n # Some clouds don't have Cinder. That's okay:\n try:\n volumes = cloud.list_volumes()\n except EndpointNotFound:\n volumes = []\n\n # cinder volumes used for docker storage\n docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)\n for server in cluster_hosts:\n inventory['_meta']['hostvars'][server.name] = _get_hostvars(\n server,\n docker_storage_mountpoints)\n\n stout = _get_stack_outputs(cloud)\n if stout is not None:\n try:\n inventory['localhost'].update({\n 'openshift_openstack_api_lb_provider':\n stout['api_lb_provider'],\n 'openshift_openstack_api_lb_port_id':\n stout['api_lb_vip_port_id'],\n 'openshift_openstack_api_lb_sg_id':\n stout['api_lb_sg_id']})\n except KeyError:\n pass # Not an API load balanced deployment\n\n try:\n inventory['OSEv3']['vars'][\n 'openshift_master_cluster_hostname'] = stout['private_api_ip']\n except KeyError:\n pass # Internal LB not specified\n\n inventory['localhost']['openshift_openstack_public_api_ip'] = \\\n stout.get('public_api_ip')\n inventory['localhost']['openshift_openstack_public_router_ip'] = \\\n stout.get('public_router_ip')\n\n try:\n inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)\n except KeyError:\n pass # Not a kuryr deployment\n return inventory\n\n\ndef _get_stack_outputs(cloud_client):\n \"\"\"Returns a dictionary with the stack outputs\"\"\"\n cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')\n\n stack = cloud_client.get_stack(cluster_name)\n if stack is None or stack['stack_status'] not in (\n 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):\n return None\n\n data = {}\n for output in stack['outputs']:\n data[output['output_key']] = output['output_value']\n return data\n\n\ndef _get_kuryr_vars(cloud_client, data):\n \"\"\"Returns a dictionary of Kuryr variables resulting of heat stacking\"\"\"\n settings = {}\n settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']\n settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']\n settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']\n settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']\n settings['kuryr_openstack_pod_project_id'] = (\n cloud_client.current_project_id)\n settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']\n\n settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']\n settings['kuryr_openstack_username'] = cloud_client.auth['username']\n settings['kuryr_openstack_password'] = cloud_client.auth['password']\n if 'user_domain_id' in cloud_client.auth:\n settings['kuryr_openstack_user_domain_name'] = (\n cloud_client.auth['user_domain_id'])\n else:\n settings['kuryr_openstack_user_domain_name'] = (\n cloud_client.auth['user_domain_name'])\n # FIXME(apuimedo): consolidate kuryr controller credentials into the same\n # vars the openstack playbook uses.\n settings['kuryr_openstack_project_id'] = cloud_client.current_project_id\n if 'project_domain_id' in cloud_client.auth:\n settings['kuryr_openstack_project_domain_name'] = (\n cloud_client.auth['project_domain_id'])\n else:\n settings['kuryr_openstack_project_domain_name'] = (\n cloud_client.auth['project_domain_name'])\n return settings\n\n\nif __name__ == '__main__':\n print(json.dumps(build_inventory(), indent=4, sort_keys=True))\n", "path": "playbooks/openstack/inventory.py"}]} | 3,313 | 244 |
gh_patches_debug_16540 | rasdani/github-patches | git_diff | Kinto__kinto-1343 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`kinto create-user` doesn't override the password if the user already exists.
</issue>
<code>
[start of kinto/plugins/accounts/scripts.py]
1 import logging
2 import getpass
3
4 import transaction as current_transaction
5 from pyramid.settings import asbool
6
7 from .utils import hash_password
8 from .views import AccountIdGenerator
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def create_user(env, username=None, password=None):
15 """Administrative command to create a new user."""
16 registry = env['registry']
17 settings = registry.settings
18 readonly_mode = asbool(settings.get('readonly', False))
19 if readonly_mode:
20 message = 'Cannot create a user with a readonly server.'
21 logger.error(message)
22 return 51
23
24 if 'kinto.plugins.accounts' not in settings['includes']:
25 message = 'Cannot create a user when the accounts plugin is not installed.'
26 logger.error(message)
27 return 52
28
29 try:
30 validator = AccountIdGenerator()
31 if username is None:
32 username = input('Username: ')
33 while not validator.match(username):
34 print('{} is not a valid username.')
35 print('Username should match {0!r}, please try again.'.format(validator.regexp))
36 username = input('Username: ')
37
38 if password is None:
39 while True: # The user didn't entered twice the same password
40 password = getpass.getpass('Please enter a password for {}: '.format(username))
41 confirm = getpass.getpass('Please confirm the password: '.format(username))
42
43 if password != confirm:
44 print('Sorry, passwords do not match, please try again.')
45 else:
46 break
47 except EOFError:
48 print('User creation aborted')
49 return 53
50
51 print("Creating user '{}'".format(username))
52 record = {'id': username, 'password': hash_password(password)}
53 registry.storage.create(collection_id='account',
54 parent_id=username,
55 record=record,
56 ignore_conflict=True)
57 registry.permission.add_principal_to_ace('/accounts/{}'.format(username),
58 'write',
59 'account:{}'.format(username))
60
61 current_transaction.commit()
62
63 return 0
64
[end of kinto/plugins/accounts/scripts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py
--- a/kinto/plugins/accounts/scripts.py
+++ b/kinto/plugins/accounts/scripts.py
@@ -50,10 +50,10 @@
print("Creating user '{}'".format(username))
record = {'id': username, 'password': hash_password(password)}
- registry.storage.create(collection_id='account',
+ registry.storage.update(collection_id='account',
parent_id=username,
- record=record,
- ignore_conflict=True)
+ object_id=username,
+ record=record)
registry.permission.add_principal_to_ace('/accounts/{}'.format(username),
'write',
'account:{}'.format(username))
| {"golden_diff": "diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py\n--- a/kinto/plugins/accounts/scripts.py\n+++ b/kinto/plugins/accounts/scripts.py\n@@ -50,10 +50,10 @@\n \n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n- registry.storage.create(collection_id='account',\n+ registry.storage.update(collection_id='account',\n parent_id=username,\n- record=record,\n- ignore_conflict=True)\n+ object_id=username,\n+ record=record)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n", "issue": "`kinto create-user` doesn't override the password if the user already exists.\n\n", "before_files": [{"content": "import logging\nimport getpass\n\nimport transaction as current_transaction\nfrom pyramid.settings import asbool\n\nfrom .utils import hash_password\nfrom .views import AccountIdGenerator\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_user(env, username=None, password=None):\n \"\"\"Administrative command to create a new user.\"\"\"\n registry = env['registry']\n settings = registry.settings\n readonly_mode = asbool(settings.get('readonly', False))\n if readonly_mode:\n message = 'Cannot create a user with a readonly server.'\n logger.error(message)\n return 51\n\n if 'kinto.plugins.accounts' not in settings['includes']:\n message = 'Cannot create a user when the accounts plugin is not installed.'\n logger.error(message)\n return 52\n\n try:\n validator = AccountIdGenerator()\n if username is None:\n username = input('Username: ')\n while not validator.match(username):\n print('{} is not a valid username.')\n print('Username should match {0!r}, please try again.'.format(validator.regexp))\n username = input('Username: ')\n\n if password is None:\n while True: # The user didn't entered twice the same password\n password = getpass.getpass('Please enter a password for {}: '.format(username))\n confirm = getpass.getpass('Please confirm the password: '.format(username))\n\n if password != confirm:\n print('Sorry, passwords do not match, please try again.')\n else:\n break\n except EOFError:\n print('User creation aborted')\n return 53\n\n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n registry.storage.create(collection_id='account',\n parent_id=username,\n record=record,\n ignore_conflict=True)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n\n current_transaction.commit()\n\n return 0\n", "path": "kinto/plugins/accounts/scripts.py"}]} | 1,103 | 153 |
gh_patches_debug_61213 | rasdani/github-patches | git_diff | scikit-hep__pyhf-483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TensorFlow einsum behavior change
# Description
In the test suite the `test_einsum[tensorflow]` [test is failing](https://travis-ci.org/diana-hep/pyhf/jobs/548493214#L689-L714) for `tensorflow` `v1.14.0`.
# Expected Behavior
`test_einsum[tensorflow]` passes
# Actual Behavior
```
backend = (<pyhf.tensor.tensorflow_backend.tensorflow_backend object at 0x7f11de50be10>, None)
def test_einsum(backend):
tb = pyhf.tensorlib
x = np.arange(20).reshape(5, 4).tolist()
if isinstance(pyhf.tensorlib, pyhf.tensor.mxnet_backend):
with pytest.raises(NotImplementedError):
assert tb.einsum('ij->ji', [1, 2, 3])
else:
> assert np.all(tb.tolist(tb.einsum('ij->ji', x)) == np.asarray(x).T.tolist())
tests/test_tensor.py:200:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
pyhf/tensor/tensorflow_backend.py:260: in einsum
return tf.einsum(subscripts, *operands)
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow/python/ops/special_math_ops.py:255: in einsum
input_shapes = [x.get_shape() for x in inputs]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.0 = <list_iterator object at 0x7f11cc06bb38>
> input_shapes = [x.get_shape() for x in inputs]
E AttributeError: 'list' object has no attribute 'get_shape'
```
# Steps to Reproduce
Run the test suite.
```
pytest -s tests/test_tensor.py
```
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4 from os import path
5 import sys
6
7 this_directory = path.abspath(path.dirname(__file__))
8 if sys.version_info.major < 3:
9 from io import open
10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
11 long_description = readme_md.read()
12
13 extras_require = {
14 'tensorflow': [
15 'tensorflow~=1.13',
16 'tensorflow-probability~=0.5',
17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
18 'setuptools<=39.1.0',
19 ],
20 'torch': ['torch~=1.0'],
21 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],
22 # 'dask': [
23 # 'dask[array]'
24 # ],
25 'xmlio': ['uproot'],
26 'minuit': ['iminuit'],
27 'develop': [
28 'pyflakes',
29 'pytest~=3.5',
30 'pytest-cov>=2.5.1',
31 'pytest-mock',
32 'pytest-benchmark[histogram]',
33 'pytest-console-scripts',
34 'python-coveralls',
35 'coverage>=4.0', # coveralls
36 'matplotlib',
37 'jupyter',
38 'nbdime',
39 'uproot~=3.3',
40 'papermill~=1.0',
41 'nteract-scrapbook~=0.2',
42 'graphviz',
43 'bumpversion',
44 'sphinx',
45 'sphinxcontrib-bibtex',
46 'sphinxcontrib-napoleon',
47 'sphinx_rtd_theme',
48 'nbsphinx',
49 'sphinx-issues',
50 'm2r',
51 'jsonpatch',
52 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
53 'pre-commit',
54 'black;python_version>="3.6"', # Black is Python3 only
55 'twine',
56 ],
57 }
58 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
59
60
61 def _is_test_pypi():
62 """
63 Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and
64 set to true (c.f. .travis.yml)
65
66 The use_scm_version kwarg accepts a callable for the local_scheme
67 configuration parameter with argument "version". This can be replaced
68 with a lambda as the desired version structure is {next_version}.dev{distance}
69 c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy
70
71 As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version
72 controlled through bumpversion is used.
73 """
74 from os import getenv
75
76 return (
77 {'local_scheme': lambda version: ''}
78 if getenv('TESTPYPI_UPLOAD') == 'true'
79 else False
80 )
81
82
83 setup(
84 name='pyhf',
85 version='0.1.0',
86 description='(partial) pure python histfactory implementation',
87 long_description=long_description,
88 long_description_content_type='text/markdown',
89 url='https://github.com/diana-hep/pyhf',
90 author='Lukas Heinrich',
91 author_email='[email protected]',
92 license='Apache',
93 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
94 classifiers=[
95 "Programming Language :: Python :: 2",
96 "Programming Language :: Python :: 2.7",
97 "Programming Language :: Python :: 3",
98 "Programming Language :: Python :: 3.6",
99 "Programming Language :: Python :: 3.7",
100 ],
101 packages=find_packages(),
102 include_package_data=True,
103 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
104 install_requires=[
105 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
106 'click>=6.0', # for console scripts,
107 'tqdm', # for readxml
108 'six', # for modifiers
109 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
110 'jsonpatch',
111 'pyyaml', # for parsing CLI equal-delimited options
112 ],
113 extras_require=extras_require,
114 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
115 dependency_links=[],
116 use_scm_version=_is_test_pypi(),
117 )
118
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
extras_require = {
'tensorflow': [
- 'tensorflow~=1.13',
+ 'tensorflow~=1.14',
'tensorflow-probability~=0.5',
'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
'setuptools<=39.1.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n \n extras_require = {\n 'tensorflow': [\n- 'tensorflow~=1.13',\n+ 'tensorflow~=1.14',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n", "issue": "TensorFlow einsum behavior change\n# Description\r\n\r\nIn the test suite the `test_einsum[tensorflow]` [test is failing](https://travis-ci.org/diana-hep/pyhf/jobs/548493214#L689-L714) for `tensorflow` `v1.14.0`.\r\n\r\n# Expected Behavior\r\n\r\n`test_einsum[tensorflow]` passes\r\n\r\n# Actual Behavior\r\n\r\n```\r\nbackend = (<pyhf.tensor.tensorflow_backend.tensorflow_backend object at 0x7f11de50be10>, None)\r\n def test_einsum(backend):\r\n tb = pyhf.tensorlib\r\n x = np.arange(20).reshape(5, 4).tolist()\r\n \r\n if isinstance(pyhf.tensorlib, pyhf.tensor.mxnet_backend):\r\n with pytest.raises(NotImplementedError):\r\n assert tb.einsum('ij->ji', [1, 2, 3])\r\n else:\r\n> assert np.all(tb.tolist(tb.einsum('ij->ji', x)) == np.asarray(x).T.tolist())\r\ntests/test_tensor.py:200: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\npyhf/tensor/tensorflow_backend.py:260: in einsum\r\n return tf.einsum(subscripts, *operands)\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow/python/ops/special_math_ops.py:255: in einsum\r\n input_shapes = [x.get_shape() for x in inputs]\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n.0 = <list_iterator object at 0x7f11cc06bb38>\r\n> input_shapes = [x.get_shape() for x in inputs]\r\nE AttributeError: 'list' object has no attribute 'get_shape'\r\n```\r\n\r\n# Steps to Reproduce\r\n\r\nRun the test suite.\r\n\r\n```\r\npytest -s tests/test_tensor.py\r\n```\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.1.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}]} | 2,410 | 130 |
gh_patches_debug_22380 | rasdani/github-patches | git_diff | getsentry__sentry-python-355 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0.7.10 CeleryIntegration captures task Ignore exception
Similar to #252, I've noticed that the Sentry client with the CeleryIntegration is capturing task Ignore exceptions, which are often used with dynamic tasks (see also https://github.com/celery/celery/issues/3437). I believe that since Retries are ignored, the `Ignore` exception should also be ignored.
The exception in Sentry is showing `celery/app/task.py in replace at line 894`:
```python
raise Ignore('Replaced by new task')
```
celery: 4.3
sentry-sdk: 0.7.10
python: 3.6
</issue>
<code>
[start of sentry_sdk/integrations/celery.py]
1 from __future__ import absolute_import
2
3 import sys
4
5 from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore
6
7 from sentry_sdk.hub import Hub
8 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
9 from sentry_sdk._compat import reraise
10 from sentry_sdk.integrations import Integration
11 from sentry_sdk.integrations.logging import ignore_logger
12
13
14 class CeleryIntegration(Integration):
15 identifier = "celery"
16
17 @staticmethod
18 def setup_once():
19 import celery.app.trace as trace # type: ignore
20
21 old_build_tracer = trace.build_tracer
22
23 def sentry_build_tracer(name, task, *args, **kwargs):
24 # Need to patch both methods because older celery sometimes
25 # short-circuits to task.run if it thinks it's safe.
26 task.__call__ = _wrap_task_call(task, task.__call__)
27 task.run = _wrap_task_call(task, task.run)
28 return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
29
30 trace.build_tracer = sentry_build_tracer
31
32 _patch_worker_exit()
33
34 # This logger logs every status of every task that ran on the worker.
35 # Meaning that every task's breadcrumbs are full of stuff like "Task
36 # <foo> raised unexpected <bar>".
37 ignore_logger("celery.worker.job")
38
39
40 def _wrap_tracer(task, f):
41 # Need to wrap tracer for pushing the scope before prerun is sent, and
42 # popping it after postrun is sent.
43 #
44 # This is the reason we don't use signals for hooking in the first place.
45 # Also because in Celery 3, signal dispatch returns early if one handler
46 # crashes.
47 def _inner(*args, **kwargs):
48 hub = Hub.current
49 if hub.get_integration(CeleryIntegration) is None:
50 return f(*args, **kwargs)
51
52 with hub.push_scope() as scope:
53 scope._name = "celery"
54 scope.clear_breadcrumbs()
55 scope.add_event_processor(_make_event_processor(task, *args, **kwargs))
56
57 return f(*args, **kwargs)
58
59 return _inner
60
61
62 def _wrap_task_call(task, f):
63 # Need to wrap task call because the exception is caught before we get to
64 # see it. Also celery's reported stacktrace is untrustworthy.
65 def _inner(*args, **kwargs):
66 try:
67 return f(*args, **kwargs)
68 except Exception:
69 exc_info = sys.exc_info()
70 with capture_internal_exceptions():
71 _capture_exception(task, exc_info)
72 reraise(*exc_info)
73
74 return _inner
75
76
77 def _make_event_processor(task, uuid, args, kwargs, request=None):
78 def event_processor(event, hint):
79 with capture_internal_exceptions():
80 event["transaction"] = task.name
81
82 with capture_internal_exceptions():
83 extra = event.setdefault("extra", {})
84 extra["celery-job"] = {
85 "task_name": task.name,
86 "args": args,
87 "kwargs": kwargs,
88 }
89
90 if "exc_info" in hint:
91 with capture_internal_exceptions():
92 if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
93 event["fingerprint"] = [
94 "celery",
95 "SoftTimeLimitExceeded",
96 getattr(task, "name", task),
97 ]
98
99 return event
100
101 return event_processor
102
103
104 def _capture_exception(task, exc_info):
105 hub = Hub.current
106
107 if hub.get_integration(CeleryIntegration) is None:
108 return
109 if isinstance(exc_info[1], Retry):
110 return
111 if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
112 return
113
114 event, hint = event_from_exception(
115 exc_info,
116 client_options=hub.client.options,
117 mechanism={"type": "celery", "handled": False},
118 )
119
120 hub.capture_event(event, hint=hint)
121
122
123 def _patch_worker_exit():
124 # Need to flush queue before worker shutdown because a crashing worker will
125 # call os._exit
126 from billiard.pool import Worker # type: ignore
127
128 old_workloop = Worker.workloop
129
130 def sentry_workloop(*args, **kwargs):
131 try:
132 return old_workloop(*args, **kwargs)
133 finally:
134 with capture_internal_exceptions():
135 hub = Hub.current
136 if hub.get_integration(CeleryIntegration) is not None:
137 hub.flush()
138
139 Worker.workloop = sentry_workloop
140
[end of sentry_sdk/integrations/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py
--- a/sentry_sdk/integrations/celery.py
+++ b/sentry_sdk/integrations/celery.py
@@ -2,7 +2,12 @@
import sys
-from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore
+from celery.exceptions import ( # type: ignore
+ SoftTimeLimitExceeded,
+ Retry,
+ Ignore,
+ Reject,
+)
from sentry_sdk.hub import Hub
from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
@@ -11,6 +16,9 @@
from sentry_sdk.integrations.logging import ignore_logger
+CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)
+
+
class CeleryIntegration(Integration):
identifier = "celery"
@@ -106,7 +114,7 @@
if hub.get_integration(CeleryIntegration) is None:
return
- if isinstance(exc_info[1], Retry):
+ if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):
return
if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
return
| {"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -2,7 +2,12 @@\n \n import sys\n \n-from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore\n+from celery.exceptions import ( # type: ignore\n+ SoftTimeLimitExceeded,\n+ Retry,\n+ Ignore,\n+ Reject,\n+)\n \n from sentry_sdk.hub import Hub\n from sentry_sdk.utils import capture_internal_exceptions, event_from_exception\n@@ -11,6 +16,9 @@\n from sentry_sdk.integrations.logging import ignore_logger\n \n \n+CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)\n+\n+\n class CeleryIntegration(Integration):\n identifier = \"celery\"\n \n@@ -106,7 +114,7 @@\n \n if hub.get_integration(CeleryIntegration) is None:\n return\n- if isinstance(exc_info[1], Retry):\n+ if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n", "issue": "0.7.10 CeleryIntegration captures task Ignore exception\nSimilar to #252, I've noticed that the Sentry client with the CeleryIntegration is capturing task Ignore exceptions, which are often used with dynamic tasks (see also https://github.com/celery/celery/issues/3437). I believe that since Retries are ignored, the `Ignore` exception should also be ignored.\r\n\r\nThe exception in Sentry is showing `celery/app/task.py in replace at line 894`:\r\n```python\r\nraise Ignore('Replaced by new task')\r\n```\r\n\r\ncelery: 4.3\r\nsentry-sdk: 0.7.10\r\npython: 3.6\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n import celery.app.trace as trace # type: ignore\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task, task.__call__)\n task.run = _wrap_task_call(task, task.run)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n _patch_worker_exit()\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n with capture_internal_exceptions():\n _capture_exception(task, exc_info)\n reraise(*exc_info)\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n event[\"transaction\"] = task.name\n\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception(task, exc_info):\n hub = Hub.current\n\n if hub.get_integration(CeleryIntegration) is None:\n return\n if isinstance(exc_info[1], Retry):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _patch_worker_exit():\n # Need to flush queue before worker shutdown because a crashing worker will\n # call os._exit\n from billiard.pool import Worker # type: ignore\n\n old_workloop = Worker.workloop\n\n def sentry_workloop(*args, **kwargs):\n try:\n return old_workloop(*args, **kwargs)\n finally:\n with capture_internal_exceptions():\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.flush()\n\n Worker.workloop = sentry_workloop\n", "path": "sentry_sdk/integrations/celery.py"}]} | 2,006 | 282 |
gh_patches_debug_52038 | rasdani/github-patches | git_diff | rasterio__rasterio-926 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow type annotation support for rasterio data types
Recently I've been adding PEP 484 type annotations to my projects and rasterio has proved quite ugly to use as is, the main annoyance being that `rasterio.open` may return objects belonging to three classes (`DatasetReader`, `DatasetWriter` and `BufferedDatasetWriter`) with a non-public base `DatasetBase`, so right now the only option to annotate a generic dataset is:
```
from typing import Union
from rasterio import io
def myfunc(
ds: Union[io.DatasetReader, io.DatasetWriter, io.BufferedDatasetWriter]
) -> None:
pass
```
Something like exposing the DatasetBase class inside rasterio would make the above much more readable:
```
import rasterio
def myfunc(ds: rasterio.DatasetBase) -> None:
pass
```
Would you consider restructuring a bit the dataset classes so that they are easier to use in type annotations? I guess I'm the first one having such a need :)
</issue>
<code>
[start of rasterio/io.py]
1 """Classes capable of reading and writing datasets
2
3 Instances of these classes are called dataset objects.
4 """
5
6 import logging
7 import math
8 import uuid
9 import warnings
10
11 from rasterio._base import (
12 get_dataset_driver, driver_can_create, driver_can_create_copy)
13 from rasterio._io import (
14 DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,
15 MemoryFileBase)
16 from rasterio import enums, windows
17 from rasterio.env import Env
18 from rasterio.transform import guard_transform, xy, rowcol
19
20
21 log = logging.getLogger(__name__)
22
23
24 class TransformMethodsMixin(object):
25 """Mixin providing methods for calculations related
26 to transforming between rows and columns of the raster
27 array and the coordinates.
28
29 These methods are wrappers for the functionality in
30 `rasterio.transform` module.
31
32 A subclass with this mixin MUST provide a `transform`
33 property.
34 """
35
36 def xy(self, row, col, offset="center"):
37 """Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.
38 The pixel's center is returned by default, but a corner can be returned
39 by setting `offset` to one of `ul, ur, ll, lr`.
40
41 Parameters
42 ----------
43 row : int
44 Pixel row.
45 col : int
46 Pixel column.
47 offset : str, optional
48 Determines if the returned coordinates are for the center of the
49 pixel or for a corner.
50
51 Returns
52 -------
53 tuple
54 ``(x, y)``
55 """
56 return xy(self.transform, row, col, offset=offset)
57
58 def ul(self, row, col):
59 """Returns the coordinates (x, y) of the upper left corner of a
60 pixel at `row` and `col` in the units of the dataset's
61 coordinate reference system.
62
63 Deprecated; Use `xy(row, col, offset='ul')` instead.
64 """
65 warnings.warn("ul method is deprecated. Use xy(row, col, offset='ul')",
66 DeprecationWarning)
67 return xy(self.transform, row, col, offset='ul')
68
69 def index(self, x, y, op=math.floor, precision=6):
70 """
71 Returns the (row, col) index of the pixel containing (x, y) given a
72 coordinate reference system.
73
74 Use an epsilon, magnitude determined by the precision parameter
75 and sign determined by the op function:
76 positive for floor, negative for ceil.
77
78 Parameters
79 ----------
80 x : float
81 x value in coordinate reference system
82 y : float
83 y value in coordinate reference system
84 op : function, optional (default: math.floor)
85 Function to convert fractional pixels to whole numbers (floor,
86 ceiling, round)
87 precision : int, optional (default: 6)
88 Decimal places of precision in indexing, as in `round()`.
89
90 Returns
91 -------
92 tuple
93 (row index, col index)
94 """
95 return rowcol(self.transform, x, y, op=op, precision=precision)
96
97
98 class WindowMethodsMixin(object):
99 """Mixin providing methods for window-related calculations.
100 These methods are wrappers for the functionality in
101 `rasterio.windows` module.
102
103 A subclass with this mixin MUST provide the following
104 properties: `transform`, `height` and `width`
105 """
106
107 def window(self, left, bottom, right, top, boundless=False):
108 """Get the window corresponding to the bounding coordinates.
109
110 Parameters
111 ----------
112 left : float
113 Left (west) bounding coordinate
114 bottom : float
115 Bottom (south) bounding coordinate
116 right : float
117 Right (east) bounding coordinate
118 top : float
119 Top (north) bounding coordinate
120 boundless: boolean, optional
121 If boundless is False, window is limited
122 to extent of this dataset.
123
124 Returns
125 -------
126 window: tuple
127 ((row_start, row_stop), (col_start, col_stop))
128 corresponding to the bounding coordinates
129
130 """
131
132 transform = guard_transform(self.transform)
133 return windows.from_bounds(
134 left, bottom, right, top, transform=transform,
135 height=self.height, width=self.width, boundless=boundless)
136
137 def window_transform(self, window):
138 """Get the affine transform for a dataset window.
139
140 Parameters
141 ----------
142 window: tuple
143 Dataset window tuple
144
145 Returns
146 -------
147 transform: Affine
148 The affine transform matrix for the given window
149 """
150
151 transform = guard_transform(self.transform)
152 return windows.transform(window, transform)
153
154 def window_bounds(self, window):
155 """Get the bounds of a window
156
157 Parameters
158 ----------
159 window: tuple
160 Dataset window tuple
161
162 Returns
163 -------
164 bounds : tuple
165 x_min, y_min, x_max, y_max for the given window
166 """
167
168 transform = guard_transform(self.transform)
169 return windows.bounds(window, transform)
170
171
172 class DatasetReader(DatasetReaderBase, WindowMethodsMixin,
173 TransformMethodsMixin):
174 """An unbuffered data and metadata reader"""
175
176 def __repr__(self):
177 return "<{} DatasetReader name='{}' mode='{}'>".format(
178 self.closed and 'closed' or 'open', self.name, self.mode)
179
180
181 class DatasetWriter(DatasetWriterBase, WindowMethodsMixin,
182 TransformMethodsMixin):
183 """An unbuffered data and metadata writer. Its methods write data
184 directly to disk.
185 """
186
187 def __repr__(self):
188 return "<{} DatasetWriter name='{}' mode='{}'>".format(
189 self.closed and 'closed' or 'open', self.name, self.mode)
190
191
192 class BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,
193 TransformMethodsMixin):
194 """Maintains data and metadata in a buffer, writing to disk or
195 network only when `close()` is called.
196
197 This allows incremental updates to datasets using formats that don't
198 otherwise support updates, such as JPEG.
199 """
200
201 def __repr__(self):
202 return "<{} BufferedDatasetWriter name='{}' mode='{}'>".format(
203 self.closed and 'closed' or 'open', self.name, self.mode)
204
205
206 class MemoryFile(MemoryFileBase):
207 """A BytesIO-like object, backed by an in-memory file.
208
209 This allows formatted files to be read and written without I/O.
210
211 A MemoryFile created with initial bytes becomes immutable. A
212 MemoryFile created without initial bytes may be written to using
213 either file-like or dataset interfaces.
214
215 Examples
216 --------
217
218 A GeoTIFF can be loaded in memory and accessed using the GeoTIFF
219 format driver
220
221 >>> with open('tests/data/RGB.byte.tif', 'rb') as f, \
222 ... MemoryFile(f.read()) as memfile:
223 ... with memfile.open() as src:
224 ... pprint.pprint(src.profile)
225 ...
226 {'count': 3,
227 'crs': CRS({'init': 'epsg:32618'}),
228 'driver': 'GTiff',
229 'dtype': 'uint8',
230 'height': 718,
231 'interleave': 'pixel',
232 'nodata': 0.0,
233 'tiled': False,
234 'transform': Affine(300.0379266750948, 0.0, 101985.0,
235 0.0, -300.041782729805, 2826915.0),
236 'width': 791}
237
238 """
239
240 def open(self, driver=None, width=None, height=None,
241 count=None, crs=None, transform=None, dtype=None, nodata=None,
242 **kwargs):
243 """Open the file and return a Rasterio dataset object.
244
245 If data has already been written, the file is opened in 'r+'
246 mode. Otherwise, the file is opened in 'w' mode.
247 """
248 with Env():
249 if self.closed:
250 raise IOError("I/O operation on closed file.")
251 if self.exists():
252 s = get_writer_for_path(self.name)(self.name, 'r+')
253 else:
254 s = get_writer_for_driver(
255 driver)(self.name, 'w', driver=driver, width=width,
256 height=height, count=count, crs=crs,
257 transform=transform, dtype=dtype, nodata=nodata,
258 **kwargs)
259 s.start()
260 return s
261
262 def __enter__(self):
263 return self
264
265 def __exit__(self, *args, **kwargs):
266 self.close()
267
268
269 def get_writer_for_driver(driver):
270 """Return the writer class appropriate for the specified driver."""
271 cls = None
272 if driver_can_create(driver):
273 cls = DatasetWriter
274 elif driver_can_create_copy(driver): # pragma: no branch
275 cls = BufferedDatasetWriter
276 return cls
277
278
279 def get_writer_for_path(path):
280 """Return the writer class appropriate for the existing dataset."""
281 driver = get_dataset_driver(path)
282 return get_writer_for_driver(driver)
283
[end of rasterio/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/io.py b/rasterio/io.py
--- a/rasterio/io.py
+++ b/rasterio/io.py
@@ -9,7 +9,7 @@
import warnings
from rasterio._base import (
- get_dataset_driver, driver_can_create, driver_can_create_copy)
+ DatasetBase, get_dataset_driver, driver_can_create, driver_can_create_copy)
from rasterio._io import (
DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,
MemoryFileBase)
| {"golden_diff": "diff --git a/rasterio/io.py b/rasterio/io.py\n--- a/rasterio/io.py\n+++ b/rasterio/io.py\n@@ -9,7 +9,7 @@\n import warnings\n \n from rasterio._base import (\n- get_dataset_driver, driver_can_create, driver_can_create_copy)\n+ DatasetBase, get_dataset_driver, driver_can_create, driver_can_create_copy)\n from rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\n", "issue": "Allow type annotation support for rasterio data types\nRecently I've been adding PEP 484 type annotations to my projects and rasterio has proved quite ugly to use as is, the main annoyance being that `rasterio.open` may return objects belonging to three classes (`DatasetReader`, `DatasetWriter` and `BufferedDatasetWriter`) with a non-public base `DatasetBase`, so right now the only option to annotate a generic dataset is:\r\n```\r\nfrom typing import Union\r\nfrom rasterio import io\r\n\r\ndef myfunc(\r\n ds: Union[io.DatasetReader, io.DatasetWriter, io.BufferedDatasetWriter]\r\n) -> None:\r\n pass\r\n```\r\n\r\nSomething like exposing the DatasetBase class inside rasterio would make the above much more readable:\r\n```\r\nimport rasterio\r\n\r\ndef myfunc(ds: rasterio.DatasetBase) -> None:\r\n pass\r\n```\r\n\r\nWould you consider restructuring a bit the dataset classes so that they are easier to use in type annotations? I guess I'm the first one having such a need :)\n", "before_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\nimport logging\nimport math\nimport uuid\nimport warnings\n\nfrom rasterio._base import (\n get_dataset_driver, driver_can_create, driver_can_create_copy)\nfrom rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\nfrom rasterio import enums, windows\nfrom rasterio.env import Env\nfrom rasterio.transform import guard_transform, xy, rowcol\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TransformMethodsMixin(object):\n \"\"\"Mixin providing methods for calculations related\n to transforming between rows and columns of the raster\n array and the coordinates.\n\n These methods are wrappers for the functionality in\n `rasterio.transform` module.\n\n A subclass with this mixin MUST provide a `transform`\n property.\n \"\"\"\n\n def xy(self, row, col, offset=\"center\"):\n \"\"\"Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.\n The pixel's center is returned by default, but a corner can be returned\n by setting `offset` to one of `ul, ur, ll, lr`.\n\n Parameters\n ----------\n row : int\n Pixel row.\n col : int\n Pixel column.\n offset : str, optional\n Determines if the returned coordinates are for the center of the\n pixel or for a corner.\n\n Returns\n -------\n tuple\n ``(x, y)``\n \"\"\"\n return xy(self.transform, row, col, offset=offset)\n\n def ul(self, row, col):\n \"\"\"Returns the coordinates (x, y) of the upper left corner of a\n pixel at `row` and `col` in the units of the dataset's\n coordinate reference system.\n\n Deprecated; Use `xy(row, col, offset='ul')` instead.\n \"\"\"\n warnings.warn(\"ul method is deprecated. Use xy(row, col, offset='ul')\",\n DeprecationWarning)\n return xy(self.transform, row, col, offset='ul')\n\n def index(self, x, y, op=math.floor, precision=6):\n \"\"\"\n Returns the (row, col) index of the pixel containing (x, y) given a\n coordinate reference system.\n\n Use an epsilon, magnitude determined by the precision parameter\n and sign determined by the op function:\n positive for floor, negative for ceil.\n\n Parameters\n ----------\n x : float\n x value in coordinate reference system\n y : float\n y value in coordinate reference system\n op : function, optional (default: math.floor)\n Function to convert fractional pixels to whole numbers (floor,\n ceiling, round)\n precision : int, optional (default: 6)\n Decimal places of precision in indexing, as in `round()`.\n\n Returns\n -------\n tuple\n (row index, col index)\n \"\"\"\n return rowcol(self.transform, x, y, op=op, precision=precision)\n\n\nclass WindowMethodsMixin(object):\n \"\"\"Mixin providing methods for window-related calculations.\n These methods are wrappers for the functionality in\n `rasterio.windows` module.\n\n A subclass with this mixin MUST provide the following\n properties: `transform`, `height` and `width`\n \"\"\"\n\n def window(self, left, bottom, right, top, boundless=False):\n \"\"\"Get the window corresponding to the bounding coordinates.\n\n Parameters\n ----------\n left : float\n Left (west) bounding coordinate\n bottom : float\n Bottom (south) bounding coordinate\n right : float\n Right (east) bounding coordinate\n top : float\n Top (north) bounding coordinate\n boundless: boolean, optional\n If boundless is False, window is limited\n to extent of this dataset.\n\n Returns\n -------\n window: tuple\n ((row_start, row_stop), (col_start, col_stop))\n corresponding to the bounding coordinates\n\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.from_bounds(\n left, bottom, right, top, transform=transform,\n height=self.height, width=self.width, boundless=boundless)\n\n def window_transform(self, window):\n \"\"\"Get the affine transform for a dataset window.\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n transform: Affine\n The affine transform matrix for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.transform(window, transform)\n\n def window_bounds(self, window):\n \"\"\"Get the bounds of a window\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n bounds : tuple\n x_min, y_min, x_max, y_max for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.bounds(window, transform)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass MemoryFile(MemoryFileBase):\n \"\"\"A BytesIO-like object, backed by an in-memory file.\n\n This allows formatted files to be read and written without I/O.\n\n A MemoryFile created with initial bytes becomes immutable. A\n MemoryFile created without initial bytes may be written to using\n either file-like or dataset interfaces.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, \\\n ... MemoryFile(f.read()) as memfile:\n ... with memfile.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n\n def open(self, driver=None, width=None, height=None,\n count=None, crs=None, transform=None, dtype=None, nodata=None,\n **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n If data has already been written, the file is opened in 'r+'\n mode. Otherwise, the file is opened in 'w' mode.\n \"\"\"\n with Env():\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n if self.exists():\n s = get_writer_for_path(self.name)(self.name, 'r+')\n else:\n s = get_writer_for_driver(\n driver)(self.name, 'w', driver=driver, width=width,\n height=height, count=count, crs=crs,\n transform=transform, dtype=dtype, nodata=nodata,\n **kwargs)\n s.start()\n return s\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self.close()\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}]} | 3,477 | 111 |
gh_patches_debug_57548 | rasdani/github-patches | git_diff | nextcloud__appstore-89 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
What categories do we need?
Currently allowed categories include:
- multimedia
- pim
- tools
- games
Anything else?
</issue>
<code>
[start of nextcloudappstore/core/models.py]
1 from django.conf import settings # type: ignore
2 from django.contrib.auth.models import User # type: ignore
3 from django.db.models import ManyToManyField, ForeignKey, \
4 URLField, IntegerField, CharField, CASCADE, TextField, \
5 DateTimeField, Model, BooleanField # type: ignore
6 from django.utils.translation import ugettext_lazy as _ # type: ignore
7 from parler.models import TranslatedFields, TranslatableModel # type: ignore
8
9
10 class App(TranslatableModel):
11 id = CharField(max_length=128, unique=True, primary_key=True,
12 verbose_name=_('Id'),
13 help_text=_('app id, identical to folder name'))
14 categories = ManyToManyField('Category', verbose_name=_('Category'))
15 translations = TranslatedFields(
16 name=CharField(max_length=128, verbose_name=_('Name'),
17 help_text=_('Rendered app name for users')),
18 description=TextField(verbose_name=_('Description'), help_text=_(
19 'Will be rendered as Markdown'))
20 )
21 # resources
22 user_docs = URLField(max_length=256, blank=True,
23 verbose_name=_('User documentation url'))
24 admin_docs = URLField(max_length=256, blank=True,
25 verbose_name=_('Admin documentation url'))
26 developer_docs = URLField(max_length=256, blank=True,
27 verbose_name=_('Developer documentation url'))
28 issue_tracker = URLField(max_length=256, blank=True,
29 verbose_name=_('Issue tracker url'))
30 website = URLField(max_length=256, blank=True, verbose_name=_('Homepage'))
31 created = DateTimeField(auto_now_add=True, editable=False,
32 verbose_name=_('Created at'))
33 last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,
34 verbose_name=_('Updated at'))
35 owner = ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('App owner'),
36 on_delete=CASCADE, related_name='owned_apps')
37 co_maintainers = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,
38 verbose_name=_('Co-Maintainers'),
39 related_name='co_maintained_apps')
40 recommendations = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,
41 verbose_name=_('Recommendations'),
42 related_name='recommended_apps')
43 featured = BooleanField(verbose_name=_('Featured'), default=False)
44
45 class Meta:
46 verbose_name = _('App')
47 verbose_name_plural = _('Apps')
48
49 def __str__(self) -> str:
50 return self.name
51
52 def can_update(self, user: User) -> bool:
53 return self.owner == user or user in self.co_maintainers.all()
54
55 def can_delete(self, user: User) -> bool:
56 return self.owner == user
57
58
59 class AppRelease(Model):
60 version = CharField(max_length=128, verbose_name=_('Version'),
61 help_text=_('Version follows Semantic Versioning'))
62 app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),
63 related_name='releases')
64 # dependencies
65 php_extensions = ManyToManyField('PhpExtension', blank=True,
66 through='PhpExtensionDependency',
67 verbose_name=_(
68 'PHP extension dependency'))
69 databases = ManyToManyField('Database', blank=True,
70 through='DatabaseDependency',
71 verbose_name=_('Database dependency'))
72 licenses = ManyToManyField('License', verbose_name=_('License'))
73 shell_commands = ManyToManyField('ShellCommand', blank=True,
74 verbose_name=_(
75 'Shell command dependency'))
76 php_version_spec = CharField(max_length=128,
77 verbose_name=_('PHP version requirement'))
78 platform_version_spec = CharField(max_length=128, verbose_name=_(
79 'Platform version requirement'))
80 min_int_size = IntegerField(blank=True, default=32,
81 verbose_name=_('Minimum Integer Bits'),
82 help_text=_('e.g. 32 for 32bit Integers'))
83 checksum = CharField(max_length=64, verbose_name=_('SHA256 checksum'))
84 download = URLField(max_length=256, blank=True,
85 verbose_name=_('Archive download Url'))
86 created = DateTimeField(auto_now_add=True, editable=False,
87 verbose_name=_('Created at'))
88 last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,
89 verbose_name=_('Updated at'))
90
91 class Meta:
92 verbose_name = _('App Release')
93 verbose_name_plural = _('App Releases')
94 unique_together = (('app', 'version'),)
95 ordering = ['-version']
96
97 def can_update(self, user: User) -> bool:
98 return self.app.owner == user or user in self.app.co_maintainers.all()
99
100 def can_delete(self, user: User) -> bool:
101 return self.can_update(user)
102
103 def __str__(self) -> str:
104 return '%s %s' % (self.app, self.version)
105
106
107 class Screenshot(Model):
108 url = URLField(max_length=256, verbose_name=_('Image url'))
109 app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),
110 related_name='screenshots')
111 ordering = IntegerField(verbose_name=_('Ordering'))
112
113 class Meta:
114 verbose_name = _('Screenshot')
115 verbose_name_plural = _('Screenshots')
116 ordering = ['ordering']
117
118 def __str__(self) -> str:
119 return self.url
120
121
122 class ShellCommand(Model):
123 name = CharField(max_length=128, unique=True, primary_key=True,
124 verbose_name=_('Shell Command'),
125 help_text=_(
126 'Name of a required shell command, e.g. grep'))
127
128 class Meta:
129 verbose_name = _('Shell Command')
130 verbose_name_plural = _('Shell Commands')
131
132 def __str__(self) -> str:
133 return self.name
134
135
136 class Category(TranslatableModel):
137 id = CharField(max_length=128, unique=True, primary_key=True,
138 verbose_name=_('Id'),
139 help_text=_(
140 'Category id which is used to identify a '
141 'category. Used to identify categories when '
142 'uploading an app'))
143 created = DateTimeField(auto_now_add=True, editable=False,
144 verbose_name=_('Created at'))
145 last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,
146 verbose_name=_('Updated at'))
147 translations = TranslatedFields(
148 name=CharField(max_length=128, help_text=_(
149 'Category name which will be presented to the user'),
150 verbose_name=_('Name')),
151 description=TextField(verbose_name=_('Description'),
152 help_text=_('Will be rendered as Markdown'))
153 )
154
155 class Meta:
156 verbose_name = _('Category')
157 verbose_name_plural = _('Categories')
158
159 def __str__(self) -> str:
160 return self.name
161
162
163 class License(Model):
164 id = CharField(max_length=128, unique=True, primary_key=True,
165 verbose_name=_('Id'),
166 help_text=_(
167 'Key which is used to identify a license'))
168 name = CharField(max_length=128, verbose_name=_('Name'),
169 help_text=_(
170 'License name which will be presented to '
171 'the user'))
172
173 class Meta:
174 verbose_name = _('License')
175 verbose_name_plural = _('Licenses')
176
177 def __str__(self) -> str:
178 return self.name
179
180
181 class Database(Model):
182 id = CharField(max_length=128, unique=True, primary_key=True,
183 verbose_name=_('Id'),
184 help_text=_('Key which is used to identify a database'))
185 name = CharField(max_length=128, verbose_name=_('Name'),
186 help_text=_(
187 'Database name which will be presented to the user'))
188
189 class Meta:
190 verbose_name = _('Database')
191 verbose_name_plural = _('Databases')
192
193 def __str__(self) -> str:
194 return self.name
195
196
197 class DatabaseDependency(Model):
198 app_release = ForeignKey('AppRelease', on_delete=CASCADE,
199 verbose_name=_('App release'),
200 related_name='databasedependencies')
201 database = ForeignKey('Database', related_name='releasedependencies',
202 on_delete=CASCADE, verbose_name=_('Database'))
203 version_spec = CharField(max_length=128,
204 verbose_name=_('Database version requirement'))
205
206 class Meta:
207 verbose_name = _('Database Dependency')
208 verbose_name_plural = _('Database Dependencies')
209 unique_together = (('app_release', 'database', 'version_spec'),)
210
211 def __str__(self) -> str:
212 return '%s: %s %s' % (self.app_release, self.database,
213 self.version_spec)
214
215
216 class PhpExtension(Model):
217 id = CharField(max_length=128, unique=True, help_text=_('e.g. libxml'),
218 primary_key=True, verbose_name=_('PHP extension'))
219
220 class Meta:
221 verbose_name = _('PHP Extension')
222 verbose_name_plural = _('PHP Extensions')
223
224 def __str__(self) -> str:
225 return self.id
226
227
228 class PhpExtensionDependency(Model):
229 app_release = ForeignKey('AppRelease', on_delete=CASCADE,
230 verbose_name=_('App Release'),
231 related_name='phpextensiondependencies')
232 php_extension = ForeignKey('PhpExtension', on_delete=CASCADE,
233 verbose_name=_('PHP Extension'),
234 related_name='releasedependencies')
235 version_spec = CharField(max_length=128,
236 verbose_name=_('Extension version requirement'))
237
238 class Meta:
239 verbose_name = _('PHP Extension Dependency')
240 verbose_name_plural = _('PHP Extension Dependencies')
241 unique_together = (('app_release', 'php_extension', 'version_spec'),)
242
243 def __str__(self) -> str:
244 return '%s: %s %s' % (self.app_release.app, self.php_extension,
245 self.version_spec)
246
[end of nextcloudappstore/core/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nextcloudappstore/core/models.py b/nextcloudappstore/core/models.py
--- a/nextcloudappstore/core/models.py
+++ b/nextcloudappstore/core/models.py
@@ -155,6 +155,7 @@
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
+ ordering = ['id']
def __str__(self) -> str:
return self.name
| {"golden_diff": "diff --git a/nextcloudappstore/core/models.py b/nextcloudappstore/core/models.py\n--- a/nextcloudappstore/core/models.py\n+++ b/nextcloudappstore/core/models.py\n@@ -155,6 +155,7 @@\n class Meta:\n verbose_name = _('Category')\n verbose_name_plural = _('Categories')\n+ ordering = ['id']\n \n def __str__(self) -> str:\n return self.name\n", "issue": "What categories do we need?\nCurrently allowed categories include:\n- multimedia\n- pim\n- tools\n- games\n\nAnything else?\n\n", "before_files": [{"content": "from django.conf import settings # type: ignore\nfrom django.contrib.auth.models import User # type: ignore\nfrom django.db.models import ManyToManyField, ForeignKey, \\\n URLField, IntegerField, CharField, CASCADE, TextField, \\\n DateTimeField, Model, BooleanField # type: ignore\nfrom django.utils.translation import ugettext_lazy as _ # type: ignore\nfrom parler.models import TranslatedFields, TranslatableModel # type: ignore\n\n\nclass App(TranslatableModel):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_('app id, identical to folder name'))\n categories = ManyToManyField('Category', verbose_name=_('Category'))\n translations = TranslatedFields(\n name=CharField(max_length=128, verbose_name=_('Name'),\n help_text=_('Rendered app name for users')),\n description=TextField(verbose_name=_('Description'), help_text=_(\n 'Will be rendered as Markdown'))\n )\n # resources\n user_docs = URLField(max_length=256, blank=True,\n verbose_name=_('User documentation url'))\n admin_docs = URLField(max_length=256, blank=True,\n verbose_name=_('Admin documentation url'))\n developer_docs = URLField(max_length=256, blank=True,\n verbose_name=_('Developer documentation url'))\n issue_tracker = URLField(max_length=256, blank=True,\n verbose_name=_('Issue tracker url'))\n website = URLField(max_length=256, blank=True, verbose_name=_('Homepage'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n owner = ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('App owner'),\n on_delete=CASCADE, related_name='owned_apps')\n co_maintainers = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,\n verbose_name=_('Co-Maintainers'),\n related_name='co_maintained_apps')\n recommendations = ManyToManyField(settings.AUTH_USER_MODEL, blank=True,\n verbose_name=_('Recommendations'),\n related_name='recommended_apps')\n featured = BooleanField(verbose_name=_('Featured'), default=False)\n\n class Meta:\n verbose_name = _('App')\n verbose_name_plural = _('Apps')\n\n def __str__(self) -> str:\n return self.name\n\n def can_update(self, user: User) -> bool:\n return self.owner == user or user in self.co_maintainers.all()\n\n def can_delete(self, user: User) -> bool:\n return self.owner == user\n\n\nclass AppRelease(Model):\n version = CharField(max_length=128, verbose_name=_('Version'),\n help_text=_('Version follows Semantic Versioning'))\n app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),\n related_name='releases')\n # dependencies\n php_extensions = ManyToManyField('PhpExtension', blank=True,\n through='PhpExtensionDependency',\n verbose_name=_(\n 'PHP extension dependency'))\n databases = ManyToManyField('Database', blank=True,\n through='DatabaseDependency',\n verbose_name=_('Database dependency'))\n licenses = ManyToManyField('License', verbose_name=_('License'))\n shell_commands = ManyToManyField('ShellCommand', blank=True,\n verbose_name=_(\n 'Shell command dependency'))\n php_version_spec = CharField(max_length=128,\n verbose_name=_('PHP version requirement'))\n platform_version_spec = CharField(max_length=128, verbose_name=_(\n 'Platform version requirement'))\n min_int_size = IntegerField(blank=True, default=32,\n verbose_name=_('Minimum Integer Bits'),\n help_text=_('e.g. 32 for 32bit Integers'))\n checksum = CharField(max_length=64, verbose_name=_('SHA256 checksum'))\n download = URLField(max_length=256, blank=True,\n verbose_name=_('Archive download Url'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n\n class Meta:\n verbose_name = _('App Release')\n verbose_name_plural = _('App Releases')\n unique_together = (('app', 'version'),)\n ordering = ['-version']\n\n def can_update(self, user: User) -> bool:\n return self.app.owner == user or user in self.app.co_maintainers.all()\n\n def can_delete(self, user: User) -> bool:\n return self.can_update(user)\n\n def __str__(self) -> str:\n return '%s %s' % (self.app, self.version)\n\n\nclass Screenshot(Model):\n url = URLField(max_length=256, verbose_name=_('Image url'))\n app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'),\n related_name='screenshots')\n ordering = IntegerField(verbose_name=_('Ordering'))\n\n class Meta:\n verbose_name = _('Screenshot')\n verbose_name_plural = _('Screenshots')\n ordering = ['ordering']\n\n def __str__(self) -> str:\n return self.url\n\n\nclass ShellCommand(Model):\n name = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Shell Command'),\n help_text=_(\n 'Name of a required shell command, e.g. grep'))\n\n class Meta:\n verbose_name = _('Shell Command')\n verbose_name_plural = _('Shell Commands')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass Category(TranslatableModel):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_(\n 'Category id which is used to identify a '\n 'category. Used to identify categories when '\n 'uploading an app'))\n created = DateTimeField(auto_now_add=True, editable=False,\n verbose_name=_('Created at'))\n last_modified = DateTimeField(auto_now=True, editable=False, db_index=True,\n verbose_name=_('Updated at'))\n translations = TranslatedFields(\n name=CharField(max_length=128, help_text=_(\n 'Category name which will be presented to the user'),\n verbose_name=_('Name')),\n description=TextField(verbose_name=_('Description'),\n help_text=_('Will be rendered as Markdown'))\n )\n\n class Meta:\n verbose_name = _('Category')\n verbose_name_plural = _('Categories')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass License(Model):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_(\n 'Key which is used to identify a license'))\n name = CharField(max_length=128, verbose_name=_('Name'),\n help_text=_(\n 'License name which will be presented to '\n 'the user'))\n\n class Meta:\n verbose_name = _('License')\n verbose_name_plural = _('Licenses')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass Database(Model):\n id = CharField(max_length=128, unique=True, primary_key=True,\n verbose_name=_('Id'),\n help_text=_('Key which is used to identify a database'))\n name = CharField(max_length=128, verbose_name=_('Name'),\n help_text=_(\n 'Database name which will be presented to the user'))\n\n class Meta:\n verbose_name = _('Database')\n verbose_name_plural = _('Databases')\n\n def __str__(self) -> str:\n return self.name\n\n\nclass DatabaseDependency(Model):\n app_release = ForeignKey('AppRelease', on_delete=CASCADE,\n verbose_name=_('App release'),\n related_name='databasedependencies')\n database = ForeignKey('Database', related_name='releasedependencies',\n on_delete=CASCADE, verbose_name=_('Database'))\n version_spec = CharField(max_length=128,\n verbose_name=_('Database version requirement'))\n\n class Meta:\n verbose_name = _('Database Dependency')\n verbose_name_plural = _('Database Dependencies')\n unique_together = (('app_release', 'database', 'version_spec'),)\n\n def __str__(self) -> str:\n return '%s: %s %s' % (self.app_release, self.database,\n self.version_spec)\n\n\nclass PhpExtension(Model):\n id = CharField(max_length=128, unique=True, help_text=_('e.g. libxml'),\n primary_key=True, verbose_name=_('PHP extension'))\n\n class Meta:\n verbose_name = _('PHP Extension')\n verbose_name_plural = _('PHP Extensions')\n\n def __str__(self) -> str:\n return self.id\n\n\nclass PhpExtensionDependency(Model):\n app_release = ForeignKey('AppRelease', on_delete=CASCADE,\n verbose_name=_('App Release'),\n related_name='phpextensiondependencies')\n php_extension = ForeignKey('PhpExtension', on_delete=CASCADE,\n verbose_name=_('PHP Extension'),\n related_name='releasedependencies')\n version_spec = CharField(max_length=128,\n verbose_name=_('Extension version requirement'))\n\n class Meta:\n verbose_name = _('PHP Extension Dependency')\n verbose_name_plural = _('PHP Extension Dependencies')\n unique_together = (('app_release', 'php_extension', 'version_spec'),)\n\n def __str__(self) -> str:\n return '%s: %s %s' % (self.app_release.app, self.php_extension,\n self.version_spec)\n", "path": "nextcloudappstore/core/models.py"}]} | 3,260 | 100 |
gh_patches_debug_9738 | rasdani/github-patches | git_diff | modin-project__modin-6337 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ray is incompatible with pydantic>=2.0
We should pin `pydantic<2.0` to workaround the issues on Ray side.
Example [from](https://github.com/modin-project/modin/actions/runs/5425526005/jobs/9866377841):
```bash
File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/__init__.py", line 1, in <module>
from ray.util.state.api import (
File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/api.py", line 17, in <module>
from ray.util.state.common import (
File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/common.py", line 120, in <module>
@dataclass(init=True)
File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/pydantic/dataclasses.py", line 139, in dataclass
assert init is False, 'pydantic.dataclasses.dataclass only supports init=False'
AssertionError: pydantic.dataclasses.dataclass only supports init=False
```
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 import versioneer
3
4 with open("README.md", "r", encoding="utf-8") as fh:
5 long_description = fh.read()
6
7 dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
8 # ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100
9 ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"]
10 unidist_deps = ["unidist[mpi]>=0.2.1"]
11 remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
12 spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
13 sql_deps = ["dfsql>=0.4.2", "pyparsing<=2.4.7"]
14 all_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps
15
16 # Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.
17 # This file provides the "import pandas before Ray init" feature if specific
18 # environment variable is set (see https://github.com/modin-project/modin/issues/4564).
19 cmdclass = versioneer.get_cmdclass()
20 extra_files = ["modin-autoimport-pandas.pth"]
21
22
23 class AddPthFileBuild(cmdclass["build_py"]):
24 def _get_data_files(self):
25 return (super()._get_data_files() or []) + [
26 (".", ".", self.build_lib, extra_files)
27 ]
28
29
30 class AddPthFileSDist(cmdclass["sdist"]):
31 def make_distribution(self):
32 self.filelist.extend(extra_files)
33 return super().make_distribution()
34
35
36 cmdclass["build_py"] = AddPthFileBuild
37 cmdclass["sdist"] = AddPthFileSDist
38
39 setup(
40 name="modin",
41 version=versioneer.get_version(),
42 cmdclass=cmdclass,
43 description="Modin: Make your pandas code run faster by changing one line of code.",
44 packages=find_packages(exclude=["scripts", "scripts.*"]),
45 include_package_data=True,
46 license="Apache 2",
47 url="https://github.com/modin-project/modin",
48 long_description=long_description,
49 long_description_content_type="text/markdown",
50 install_requires=[
51 "pandas>=2,<2.1",
52 "packaging",
53 "numpy>=1.18.5",
54 "fsspec",
55 "psutil",
56 ],
57 extras_require={
58 # can be installed by pip install modin[dask]
59 "dask": dask_deps,
60 "ray": ray_deps,
61 "unidist": unidist_deps,
62 "remote": remote_deps,
63 "spreadsheet": spreadsheet_deps,
64 "sql": sql_deps,
65 "all": all_deps,
66 },
67 python_requires=">=3.8",
68 )
69
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,8 @@
dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100
-ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"]
+# pydantic<2: https://github.com/modin-project/modin/issues/6336
+ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow", "pydantic<2"]
unidist_deps = ["unidist[mpi]>=0.2.1"]
remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,7 +6,8 @@\n \n dask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n # ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\n-ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\n+# pydantic<2: https://github.com/modin-project/modin/issues/6336\n+ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\", \"pydantic<2\"]\n unidist_deps = [\"unidist[mpi]>=0.2.1\"]\n remote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\n spreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\n", "issue": "Ray is incompatible with pydantic>=2.0\nWe should pin `pydantic<2.0` to workaround the issues on Ray side.\r\n\r\n\r\nExample [from](https://github.com/modin-project/modin/actions/runs/5425526005/jobs/9866377841):\r\n```bash\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/__init__.py\", line 1, in <module>\r\n from ray.util.state.api import (\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/api.py\", line 17, in <module>\r\n from ray.util.state.common import (\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/common.py\", line 120, in <module>\r\n @dataclass(init=True)\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/pydantic/dataclasses.py\", line 139, in dataclass\r\n assert init is False, 'pydantic.dataclasses.dataclass only supports init=False'\r\nAssertionError: pydantic.dataclasses.dataclass only supports init=False\r\n```\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\nray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]} | 1,582 | 225 |
gh_patches_debug_23237 | rasdani/github-patches | git_diff | joke2k__faker-956 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pyfloat, pydecimal cannot be negative when left_digits=0
I am trying to fake random floats in the range `(-1, 1)`. Currently, `pyfloat` and `pydecimal` will never be negative as long as `left_digits=0`, even if `positive=False`.
This seems to be because [the `sign` (-1, 1) is multiplied by only the left digits](https://github.com/joke2k/faker/blob/6e32e07/faker/providers/python/__init__.py#L62) (which in this case is 0, canceling out the `sign`), rather than by the entire numeric value.
### Steps to reproduce
```python
>>> import faker
>>> fake = faker.Faker()
>>> any(fake.pyfloat(left_digits=0, positive=False) < 0 for _ in range(10000))
False
```
### Expected behavior
I'd expect approximately half of the floats to be negative, when `positive` is not `True`
</issue>
<code>
[start of faker/providers/python/__init__.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 from decimal import Decimal
6 import sys
7
8 import six
9
10 from .. import BaseProvider
11
12
13 class Provider(BaseProvider):
14 def pybool(self):
15 return self.random_int(0, 1) == 1
16
17 def pystr(self, min_chars=None, max_chars=20):
18 """
19 Generates a random string of upper and lowercase letters.
20 :type min_chars: int
21 :type max_chars: int
22 :return: String. Random of random length between min and max characters.
23 """
24 if min_chars is None:
25 return "".join(self.random_letters(length=max_chars))
26 else:
27 assert (
28 max_chars >= min_chars), "Maximum length must be greater than or equal to minium length"
29 return "".join(
30 self.random_letters(
31 length=self.generator.random.randint(min_chars, max_chars),
32 ),
33 )
34
35 def pyfloat(self, left_digits=None, right_digits=None, positive=False,
36 min_value=None, max_value=None):
37
38 if left_digits is not None and left_digits < 0:
39 raise ValueError(
40 'A float number cannot have less than 0 digits in its '
41 'integer part')
42 if right_digits is not None and right_digits < 0:
43 raise ValueError(
44 'A float number cannot have less than 0 digits in its '
45 'fractional part')
46 if left_digits == 0 and right_digits == 0:
47 raise ValueError(
48 'A float number cannot have less than 0 digits in total')
49 if None not in (min_value, max_value) and min_value > max_value:
50 raise ValueError('Min value cannot be greater than max value')
51
52 left_digits = left_digits if left_digits is not None else (
53 self.random_int(1, sys.float_info.dig))
54 right_digits = right_digits if right_digits is not None else (
55 self.random_int(0, sys.float_info.dig - left_digits))
56 sign = 1 if positive else self.random_element((-1, 1))
57
58 if (min_value is not None) or (max_value is not None):
59 if min_value is None:
60 min_value = max_value - self.random_int()
61 if max_value is None:
62 max_value = min_value + self.random_int()
63
64 left_number = self.random_int(min_value, max_value)
65 else:
66 left_number = sign * self.random_number(left_digits)
67
68 return float("{0}.{1}".format(
69 left_number,
70 self.random_number(right_digits),
71 ))
72
73 def pyint(self, min=0, max=9999, step=1):
74 return self.generator.random_int(min, max, step=step)
75
76 def pydecimal(self, left_digits=None, right_digits=None, positive=False,
77 min_value=None, max_value=None):
78
79 float_ = self.pyfloat(
80 left_digits, right_digits, positive, min_value, max_value)
81 return Decimal(str(float_))
82
83 def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):
84 return tuple(
85 self.pyset(
86 nb_elements,
87 variable_nb_elements,
88 *value_types))
89
90 def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):
91 return set(
92 self._pyiterable(
93 nb_elements,
94 variable_nb_elements,
95 *value_types))
96
97 def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):
98 return list(
99 self._pyiterable(
100 nb_elements,
101 variable_nb_elements,
102 *value_types))
103
104 def pyiterable(
105 self,
106 nb_elements=10,
107 variable_nb_elements=True,
108 *value_types):
109 return self.random_element([self.pylist, self.pytuple, self.pyset])(
110 nb_elements, variable_nb_elements, *value_types)
111
112 def _random_type(self, type_list):
113 value_type = self.random_element(type_list)
114
115 method_name = "py{0}".format(value_type)
116 if hasattr(self, method_name):
117 value_type = method_name
118
119 return self.generator.format(value_type)
120
121 def _pyiterable(
122 self,
123 nb_elements=10,
124 variable_nb_elements=True,
125 *value_types):
126
127 value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()
128 for t in value_types
129 # avoid recursion
130 if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]
131 if not value_types:
132 value_types = ['str', 'str', 'str', 'str', 'float',
133 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
134
135 if variable_nb_elements:
136 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
137
138 for _ in range(nb_elements):
139 yield self._random_type(value_types)
140
141 def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):
142 """
143 Returns a dictionary.
144
145 :nb_elements: number of elements for dictionary
146 :variable_nb_elements: is use variable number of elements for dictionary
147 :value_types: type of dictionary values
148 """
149 if variable_nb_elements:
150 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
151
152 return dict(zip(
153 self.generator.words(nb_elements),
154 self._pyiterable(nb_elements, False, *value_types),
155 ))
156
157 def pystruct(self, count=10, *value_types):
158
159 value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()
160 for t in value_types
161 # avoid recursion
162 if t != 'struct']
163 if not value_types:
164 value_types = ['str', 'str', 'str', 'str', 'float',
165 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
166
167 types = []
168 d = {}
169 nd = {}
170 for i in range(count):
171 d[self.generator.word()] = self._random_type(value_types)
172 types.append(self._random_type(value_types))
173 nd[self.generator.word()] = {i: self._random_type(value_types),
174 i + 1: [self._random_type(value_types),
175 self._random_type(value_types),
176 self._random_type(value_types)],
177 i + 2: {i: self._random_type(value_types),
178 i + 1: self._random_type(value_types),
179 i + 2: [self._random_type(value_types),
180 self._random_type(value_types)]}}
181 return types, d, nd
182
[end of faker/providers/python/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py
--- a/faker/providers/python/__init__.py
+++ b/faker/providers/python/__init__.py
@@ -53,8 +53,7 @@
self.random_int(1, sys.float_info.dig))
right_digits = right_digits if right_digits is not None else (
self.random_int(0, sys.float_info.dig - left_digits))
- sign = 1 if positive else self.random_element((-1, 1))
-
+ sign = ''
if (min_value is not None) or (max_value is not None):
if min_value is None:
min_value = max_value - self.random_int()
@@ -63,9 +62,11 @@
left_number = self.random_int(min_value, max_value)
else:
- left_number = sign * self.random_number(left_digits)
+ sign = '+' if positive else self.random_element(('+', '-'))
+ left_number = self.random_number(left_digits)
- return float("{0}.{1}".format(
+ return float("{0}{1}.{2}".format(
+ sign,
left_number,
self.random_number(right_digits),
))
| {"golden_diff": "diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py\n--- a/faker/providers/python/__init__.py\n+++ b/faker/providers/python/__init__.py\n@@ -53,8 +53,7 @@\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n- sign = 1 if positive else self.random_element((-1, 1))\n-\n+ sign = ''\n if (min_value is not None) or (max_value is not None):\n if min_value is None:\n min_value = max_value - self.random_int()\n@@ -63,9 +62,11 @@\n \n left_number = self.random_int(min_value, max_value)\n else:\n- left_number = sign * self.random_number(left_digits)\n+ sign = '+' if positive else self.random_element(('+', '-'))\n+ left_number = self.random_number(left_digits)\n \n- return float(\"{0}.{1}\".format(\n+ return float(\"{0}{1}.{2}\".format(\n+ sign,\n left_number,\n self.random_number(right_digits),\n ))\n", "issue": "pyfloat, pydecimal cannot be negative when left_digits=0\nI am trying to fake random floats in the range `(-1, 1)`. Currently, `pyfloat` and `pydecimal` will never be negative as long as `left_digits=0`, even if `positive=False`.\r\n\r\nThis seems to be because [the `sign` (-1, 1) is multiplied by only the left digits](https://github.com/joke2k/faker/blob/6e32e07/faker/providers/python/__init__.py#L62) (which in this case is 0, canceling out the `sign`), rather than by the entire numeric value.\r\n\r\n### Steps to reproduce\r\n\r\n```python\r\n>>> import faker\r\n>>> fake = faker.Faker()\r\n>>> any(fake.pyfloat(left_digits=0, positive=False) < 0 for _ in range(10000))\r\nFalse\r\n```\r\n\r\n### Expected behavior\r\n\r\nI'd expect approximately half of the floats to be negative, when `positive` is not `True`\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nimport sys\n\nimport six\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minium length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n\n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = 1 if positive else self.random_element((-1, 1))\n\n if (min_value is not None) or (max_value is not None):\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n\n left_number = self.random_int(min_value, max_value)\n else:\n left_number = sign * self.random_number(left_digits)\n\n return float(\"{0}.{1}\".format(\n left_number,\n self.random_number(right_digits),\n ))\n\n def pyint(self, min=0, max=9999, step=1):\n return self.generator.random_int(min, max, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(\n self.pyset(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, *value_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = \"py{0}\".format(value_type)\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements),\n self._pyiterable(nb_elements, False, *value_types),\n ))\n\n def pystruct(self, count=10, *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}]} | 2,657 | 269 |
gh_patches_debug_3681 | rasdani/github-patches | git_diff | ivy-llc__ivy-13823 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rand_like
</issue>
<code>
[start of ivy/functional/frontends/torch/random_sampling.py]
1 import ivy
2 from ivy.func_wrapper import with_supported_dtypes
3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
4
5 try:
6 from torch import Generator
7 except ImportError:
8 from types import SimpleNamespace
9
10 Generator = SimpleNamespace
11
12
13 def seed() -> int:
14 """Returns a 64 bit number used to seed the RNG"""
15 return int(ivy.randint(-(2**63), 2**63 - 1))
16
17
18 @to_ivy_arrays_and_back
19 def manual_seed(seed: int):
20 ivy.seed(seed_value=seed)
21 return Generator().manual_seed(seed)
22
23
24 @with_supported_dtypes(
25 {
26 "1.11.0 and below": (
27 "float32",
28 "float64",
29 )
30 },
31 "torch",
32 )
33 @to_ivy_arrays_and_back
34 def multinomial(input, num_samples, replacement=False, *, generator=None, out=None):
35 return ivy.multinomial(
36 num_samples + 1, # doesn't matter because `probs` is provided, but should be
37 # greater than the number of samples
38 num_samples,
39 probs=input,
40 replace=replacement,
41 out=out,
42 )
43
44
45 @with_supported_dtypes(
46 {
47 "1.11.0 and below": (
48 "float32",
49 "float64",
50 )
51 },
52 "torch",
53 )
54 @to_ivy_arrays_and_back
55 def poisson(input, generator=None):
56 return ivy.poisson(input, shape=None)
57
58
59 @to_ivy_arrays_and_back
60 def rand(
61 size,
62 *,
63 generator=None,
64 out=None,
65 dtype=None,
66 layout=None,
67 device=None,
68 requires_grad=False,
69 pin_memory=False
70 ):
71 return ivy.random_uniform(
72 shape=size,
73 out=out,
74 dtype=dtype,
75 device=device,
76 )
77
78
79 @to_ivy_arrays_and_back
80 def randn(
81 size,
82 *,
83 generator=None,
84 out=None,
85 dtype=None,
86 layout=None,
87 device=None,
88 requires_grad=False,
89 pin_memory=False
90 ):
91 return ivy.random_normal(
92 shape=size,
93 out=out,
94 dtype=dtype,
95 device=device,
96 )
97
[end of ivy/functional/frontends/torch/random_sampling.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py
--- a/ivy/functional/frontends/torch/random_sampling.py
+++ b/ivy/functional/frontends/torch/random_sampling.py
@@ -76,6 +76,27 @@
)
+@to_ivy_arrays_and_back
+def rand_like(
+ input,
+ *,
+ dtype=None,
+ layout=None,
+ device=None,
+ requires_grad=False,
+ memory_format=False
+):
+ shape = input.shape
+ if not dtype:
+ dtype = input.dtype
+
+ return ivy.random_uniform(
+ shape=shape,
+ dtype=dtype,
+ device=device,
+ )
+
+
@to_ivy_arrays_and_back
def randn(
size,
| {"golden_diff": "diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py\n--- a/ivy/functional/frontends/torch/random_sampling.py\n+++ b/ivy/functional/frontends/torch/random_sampling.py\n@@ -76,6 +76,27 @@\n )\n \n \n+@to_ivy_arrays_and_back\n+def rand_like(\n+ input,\n+ *,\n+ dtype=None,\n+ layout=None,\n+ device=None,\n+ requires_grad=False,\n+ memory_format=False\n+):\n+ shape = input.shape\n+ if not dtype:\n+ dtype = input.dtype\n+\n+ return ivy.random_uniform(\n+ shape=shape,\n+ dtype=dtype,\n+ device=device,\n+ )\n+\n+\n @to_ivy_arrays_and_back\n def randn(\n size,\n", "issue": "rand_like\n\n", "before_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\ntry:\n from torch import Generator\nexcept ImportError:\n from types import SimpleNamespace\n\n Generator = SimpleNamespace\n\n\ndef seed() -> int:\n \"\"\"Returns a 64 bit number used to seed the RNG\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return Generator().manual_seed(seed)\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n return ivy.poisson(input, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef rand(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_uniform(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_normal(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}]} | 1,223 | 188 |
gh_patches_debug_2345 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-87 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Entities loaded in gcloud.datastore don't have a dataset
``` python
>>> dataset = demo.get_dataset()
>>> query = dataset.query()
>>> entity = query.fetch()[0]
>>> entity.delete()
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "gcloud/datastore/entity.py", line 206, in delete
self.dataset().connection().delete_entity(
AttributeError: 'NoneType' object has no attribute 'delete_entity'
```
This is because we're creating entities from the protobufs, with the proper `dataset_id` but not a true reference to the Dataset object (which has a pointer to the connection).
</issue>
<code>
[start of gcloud/datastore/query.py]
1 import copy
2
3 from gcloud.datastore import datastore_v1_pb2 as datastore_pb
4 from gcloud.datastore import helpers
5 from gcloud.datastore.entity import Entity
6
7
8 # TODO: Figure out how to properly handle namespaces.
9
10 class Query(object):
11 """A Query against the Cloud Datastore.
12
13 This class serves as an abstraction for creating
14 a query over data stored in the Cloud Datastore.
15
16 Each :class:`Query` object is immutable,
17 and a clone is returned whenever
18 any part of the query is modified::
19
20 >>> query = Query('MyKind')
21 >>> limited_query = query.limit(10)
22 >>> query.limit() == 10
23 False
24 >>> limited_query.limit() == 10
25 True
26
27 You typically won't construct a :class:`Query`
28 by initializing it like ``Query('MyKind', dataset=...)``
29 but instead use the helper
30 :func:`gcloud.datastore.dataset.Dataset.query` method
31 which generates a query that can be executed
32 without any additional work::
33
34 >>> from gcloud import datastore
35 >>> dataset = datastore.get_dataset('dataset-id', email, key_path)
36 >>> query = dataset.query('MyKind')
37
38 :type kind: string
39 :param kind: The kind to query.
40
41 :type dataset: :class:`gcloud.datastore.dataset.Dataset`
42 :param dataset: The dataset to query.
43 """
44
45 OPERATORS = {
46 '<': datastore_pb.PropertyFilter.LESS_THAN,
47 '<=': datastore_pb.PropertyFilter.LESS_THAN_OR_EQUAL,
48 '>': datastore_pb.PropertyFilter.GREATER_THAN,
49 '>=': datastore_pb.PropertyFilter.GREATER_THAN_OR_EQUAL,
50 '=': datastore_pb.PropertyFilter.EQUAL,
51 }
52 """Mapping of operator strings and their protobuf equivalents."""
53
54 def __init__(self, kind=None, dataset=None):
55 self._dataset = dataset
56 self._pb = datastore_pb.Query()
57
58 if kind:
59 self._pb.kind.add().name = kind
60
61 def _clone(self):
62 # TODO(jjg): Double check that this makes sense...
63 clone = copy.deepcopy(self)
64 clone._dataset = self._dataset # Shallow copy the dataset.
65 return clone
66
67 def to_protobuf(self):
68 """Convert the :class:`Query` instance to a :class:`gcloud.datastore.datastore_v1_pb2.Query`.
69
70 :rtype: :class:`gclouddatstore.datastore_v1_pb2.Query`
71 :returns: A Query protobuf that can be sent to the protobuf API.
72 """
73 return self._pb
74
75 def filter(self, expression, value):
76 """Filter the query based on an expression and a value.
77
78 This will return a clone of the current :class:`Query`
79 filtered by the expression and value provided.
80
81 Expressions take the form of::
82
83 .filter('<property> <operator>', <value>)
84
85 where property is a property stored on the entity in the datastore
86 and operator is one of ``OPERATORS``
87 (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``)::
88
89 >>> query = Query('Person')
90 >>> filtered_query = query.filter('name =', 'James')
91 >>> filtered_query = query.filter('age >', 50)
92
93 Because each call to ``.filter()`` returns a cloned ``Query`` object
94 we are able to string these together::
95
96 >>> query = Query('Person').filter('name =', 'James').filter('age >', 50)
97
98 :type expression: string
99 :param expression: An expression of a property and an operator (ie, ``=``).
100
101 :type value: integer, string, boolean, float, None, datetime
102 :param value: The value to filter on.
103
104 :rtype: :class:`Query`
105 :returns: A Query filtered by the expression and value provided.
106 """
107 clone = self._clone()
108
109 # Take an expression like 'property >=', and parse it into useful pieces.
110 property_name, operator = None, None
111 expression = expression.strip()
112
113 for operator_string in self.OPERATORS:
114 if expression.endswith(operator_string):
115 operator = self.OPERATORS[operator_string]
116 property_name = expression[0:-len(operator_string)].strip()
117
118 if not operator or not property_name:
119 raise ValueError('Invalid expression: "%s"' % expression)
120
121 # Build a composite filter AND'd together.
122 composite_filter = clone._pb.filter.composite_filter
123 composite_filter.operator = datastore_pb.CompositeFilter.AND
124
125 # Add the specific filter
126 property_filter = composite_filter.filter.add().property_filter
127 property_filter.property.name = property_name
128 property_filter.operator = operator
129
130 # Set the value to filter on based on the type.
131 attr_name, pb_value = helpers.get_protobuf_attribute_and_value(value)
132 setattr(property_filter.value, attr_name, pb_value)
133 return clone
134
135 def kind(self, *kinds):
136 """Get or set the Kind of the Query.
137
138 .. note::
139 This is an **additive** operation.
140 That is, if the Query is set for kinds A and B,
141 and you call ``.kind('C')``,
142 it will query for kinds A, B, *and*, C.
143
144 :type kinds: string
145 :param kinds: The entity kinds for which to query.
146
147 :rtype: string or :class:`Query`
148 :returns: If no arguments, returns the kind.
149 If a kind is provided, returns a clone of the :class:`Query`
150 with those kinds set.
151 """
152 # TODO: Do we want this to be additive?
153 # If not, clear the _pb.kind attribute.
154 if kinds:
155 clone = self._clone()
156 for kind in kinds:
157 clone._pb.kind.add().name = kind
158 return clone
159 else:
160 return self._pb.kind
161
162 def limit(self, limit=None):
163 """Get or set the limit of the Query.
164
165 This is the maximum number of rows (Entities) to return for this Query.
166
167 This is a hybrid getter / setter, used as::
168
169 >>> query = Query('Person')
170 >>> query = query.limit(100) # Set the limit to 100 rows.
171 >>> query.limit() # Get the limit for this query.
172 100
173
174 :rtype: integer, None, or :class:`Query`
175 :returns: If no arguments, returns the current limit.
176 If a limit is provided, returns a clone of the :class:`Query`
177 with that limit set.
178 """
179 if limit:
180 clone = self._clone()
181 clone._pb.limit = limit
182 return clone
183 else:
184 return self._pb.limit
185
186 def dataset(self, dataset=None):
187 """Get or set the :class:`gcloud.datastore.dataset.Dataset` for this Query.
188
189 This is the dataset against which the Query will be run.
190
191 This is a hybrid getter / setter, used as::
192
193 >>> query = Query('Person')
194 >>> query = query.dataset(my_dataset) # Set the dataset.
195 >>> query.dataset() # Get the current dataset.
196 <Dataset object>
197
198 :rtype: :class:`gcloud.datastore.dataset.Dataset`, None, or :class:`Query`
199 :returns: If no arguments, returns the current dataset.
200 If a dataset is provided, returns a clone of the :class:`Query`
201 with that dataset set.
202 """
203 if dataset:
204 clone = self._clone()
205 clone._dataset = dataset
206 return clone
207 else:
208 return self._dataset
209
210 def fetch(self, limit=None):
211 """Executes the Query and returns all matching entities.
212
213 This makes an API call to the Cloud Datastore,
214 sends the Query as a protobuf,
215 parses the responses to Entity protobufs,
216 and then converts them to :class:`gcloud.datastore.entity.Entity` objects.
217
218 For example::
219
220 >>> from gcloud import datastore
221 >>> dataset = datastore.get_dataset('dataset-id', email, key_path)
222 >>> query = dataset.query('Person').filter('name =', 'Sally')
223 >>> query.fetch()
224 [<Entity object>, <Entity object>, ...]
225 >>> query.fetch(1)
226 [<Entity object>]
227 >>> query.limit()
228 None
229
230 :type limit: integer
231 :param limit: An optional limit to apply temporarily to this query.
232 That is, the Query itself won't be altered,
233 but the limit will be applied to the query
234 before it is executed.
235
236 :rtype: list of :class:`gcloud.datastore.entity.Entity`'s
237 :returns: The list of entities matching this query's criteria.
238 """
239 clone = self
240
241 if limit:
242 clone = self.limit(limit)
243
244 entity_pbs = self.dataset().connection().run_query(
245 query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())
246
247 return [Entity.from_protobuf(entity) for entity in entity_pbs]
248
[end of gcloud/datastore/query.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gcloud/datastore/query.py b/gcloud/datastore/query.py
--- a/gcloud/datastore/query.py
+++ b/gcloud/datastore/query.py
@@ -244,4 +244,5 @@
entity_pbs = self.dataset().connection().run_query(
query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())
- return [Entity.from_protobuf(entity) for entity in entity_pbs]
+ return [Entity.from_protobuf(entity, dataset=self.dataset())
+ for entity in entity_pbs]
| {"golden_diff": "diff --git a/gcloud/datastore/query.py b/gcloud/datastore/query.py\n--- a/gcloud/datastore/query.py\n+++ b/gcloud/datastore/query.py\n@@ -244,4 +244,5 @@\n entity_pbs = self.dataset().connection().run_query(\n query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())\n \n- return [Entity.from_protobuf(entity) for entity in entity_pbs]\n+ return [Entity.from_protobuf(entity, dataset=self.dataset())\n+ for entity in entity_pbs]\n", "issue": "Entities loaded in gcloud.datastore don't have a dataset\n``` python\n>>> dataset = demo.get_dataset()\n>>> query = dataset.query()\n>>> entity = query.fetch()[0]\n>>> entity.delete()\n... \nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"gcloud/datastore/entity.py\", line 206, in delete\n self.dataset().connection().delete_entity(\nAttributeError: 'NoneType' object has no attribute 'delete_entity'\n```\n\nThis is because we're creating entities from the protobufs, with the proper `dataset_id` but not a true reference to the Dataset object (which has a pointer to the connection).\n\n", "before_files": [{"content": "import copy\n\nfrom gcloud.datastore import datastore_v1_pb2 as datastore_pb\nfrom gcloud.datastore import helpers\nfrom gcloud.datastore.entity import Entity\n\n\n# TODO: Figure out how to properly handle namespaces.\n\nclass Query(object):\n \"\"\"A Query against the Cloud Datastore.\n\n This class serves as an abstraction for creating\n a query over data stored in the Cloud Datastore.\n\n Each :class:`Query` object is immutable,\n and a clone is returned whenever\n any part of the query is modified::\n\n >>> query = Query('MyKind')\n >>> limited_query = query.limit(10)\n >>> query.limit() == 10\n False\n >>> limited_query.limit() == 10\n True\n\n You typically won't construct a :class:`Query`\n by initializing it like ``Query('MyKind', dataset=...)``\n but instead use the helper\n :func:`gcloud.datastore.dataset.Dataset.query` method\n which generates a query that can be executed\n without any additional work::\n\n >>> from gcloud import datastore\n >>> dataset = datastore.get_dataset('dataset-id', email, key_path)\n >>> query = dataset.query('MyKind')\n\n :type kind: string\n :param kind: The kind to query.\n\n :type dataset: :class:`gcloud.datastore.dataset.Dataset`\n :param dataset: The dataset to query.\n \"\"\"\n\n OPERATORS = {\n '<': datastore_pb.PropertyFilter.LESS_THAN,\n '<=': datastore_pb.PropertyFilter.LESS_THAN_OR_EQUAL,\n '>': datastore_pb.PropertyFilter.GREATER_THAN,\n '>=': datastore_pb.PropertyFilter.GREATER_THAN_OR_EQUAL,\n '=': datastore_pb.PropertyFilter.EQUAL,\n }\n \"\"\"Mapping of operator strings and their protobuf equivalents.\"\"\"\n\n def __init__(self, kind=None, dataset=None):\n self._dataset = dataset\n self._pb = datastore_pb.Query()\n\n if kind:\n self._pb.kind.add().name = kind\n\n def _clone(self):\n # TODO(jjg): Double check that this makes sense...\n clone = copy.deepcopy(self)\n clone._dataset = self._dataset # Shallow copy the dataset.\n return clone\n\n def to_protobuf(self):\n \"\"\"Convert the :class:`Query` instance to a :class:`gcloud.datastore.datastore_v1_pb2.Query`.\n\n :rtype: :class:`gclouddatstore.datastore_v1_pb2.Query`\n :returns: A Query protobuf that can be sent to the protobuf API.\n \"\"\"\n return self._pb\n\n def filter(self, expression, value):\n \"\"\"Filter the query based on an expression and a value.\n\n This will return a clone of the current :class:`Query`\n filtered by the expression and value provided.\n\n Expressions take the form of::\n\n .filter('<property> <operator>', <value>)\n\n where property is a property stored on the entity in the datastore\n and operator is one of ``OPERATORS``\n (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``)::\n\n >>> query = Query('Person')\n >>> filtered_query = query.filter('name =', 'James')\n >>> filtered_query = query.filter('age >', 50)\n\n Because each call to ``.filter()`` returns a cloned ``Query`` object\n we are able to string these together::\n\n >>> query = Query('Person').filter('name =', 'James').filter('age >', 50)\n\n :type expression: string\n :param expression: An expression of a property and an operator (ie, ``=``).\n\n :type value: integer, string, boolean, float, None, datetime\n :param value: The value to filter on.\n\n :rtype: :class:`Query`\n :returns: A Query filtered by the expression and value provided.\n \"\"\"\n clone = self._clone()\n\n # Take an expression like 'property >=', and parse it into useful pieces.\n property_name, operator = None, None\n expression = expression.strip()\n\n for operator_string in self.OPERATORS:\n if expression.endswith(operator_string):\n operator = self.OPERATORS[operator_string]\n property_name = expression[0:-len(operator_string)].strip()\n\n if not operator or not property_name:\n raise ValueError('Invalid expression: \"%s\"' % expression)\n\n # Build a composite filter AND'd together.\n composite_filter = clone._pb.filter.composite_filter\n composite_filter.operator = datastore_pb.CompositeFilter.AND\n\n # Add the specific filter\n property_filter = composite_filter.filter.add().property_filter\n property_filter.property.name = property_name\n property_filter.operator = operator\n\n # Set the value to filter on based on the type.\n attr_name, pb_value = helpers.get_protobuf_attribute_and_value(value)\n setattr(property_filter.value, attr_name, pb_value)\n return clone\n\n def kind(self, *kinds):\n \"\"\"Get or set the Kind of the Query.\n\n .. note::\n This is an **additive** operation.\n That is, if the Query is set for kinds A and B,\n and you call ``.kind('C')``,\n it will query for kinds A, B, *and*, C.\n\n :type kinds: string\n :param kinds: The entity kinds for which to query.\n\n :rtype: string or :class:`Query`\n :returns: If no arguments, returns the kind.\n If a kind is provided, returns a clone of the :class:`Query`\n with those kinds set.\n \"\"\"\n # TODO: Do we want this to be additive?\n # If not, clear the _pb.kind attribute.\n if kinds:\n clone = self._clone()\n for kind in kinds:\n clone._pb.kind.add().name = kind\n return clone\n else:\n return self._pb.kind\n\n def limit(self, limit=None):\n \"\"\"Get or set the limit of the Query.\n\n This is the maximum number of rows (Entities) to return for this Query.\n\n This is a hybrid getter / setter, used as::\n\n >>> query = Query('Person')\n >>> query = query.limit(100) # Set the limit to 100 rows.\n >>> query.limit() # Get the limit for this query.\n 100\n\n :rtype: integer, None, or :class:`Query`\n :returns: If no arguments, returns the current limit.\n If a limit is provided, returns a clone of the :class:`Query`\n with that limit set.\n \"\"\"\n if limit:\n clone = self._clone()\n clone._pb.limit = limit\n return clone\n else:\n return self._pb.limit\n\n def dataset(self, dataset=None):\n \"\"\"Get or set the :class:`gcloud.datastore.dataset.Dataset` for this Query.\n\n This is the dataset against which the Query will be run.\n\n This is a hybrid getter / setter, used as::\n\n >>> query = Query('Person')\n >>> query = query.dataset(my_dataset) # Set the dataset.\n >>> query.dataset() # Get the current dataset.\n <Dataset object>\n\n :rtype: :class:`gcloud.datastore.dataset.Dataset`, None, or :class:`Query`\n :returns: If no arguments, returns the current dataset.\n If a dataset is provided, returns a clone of the :class:`Query`\n with that dataset set.\n \"\"\"\n if dataset:\n clone = self._clone()\n clone._dataset = dataset\n return clone\n else:\n return self._dataset\n\n def fetch(self, limit=None):\n \"\"\"Executes the Query and returns all matching entities.\n\n This makes an API call to the Cloud Datastore,\n sends the Query as a protobuf,\n parses the responses to Entity protobufs,\n and then converts them to :class:`gcloud.datastore.entity.Entity` objects.\n\n For example::\n\n >>> from gcloud import datastore\n >>> dataset = datastore.get_dataset('dataset-id', email, key_path)\n >>> query = dataset.query('Person').filter('name =', 'Sally')\n >>> query.fetch()\n [<Entity object>, <Entity object>, ...]\n >>> query.fetch(1)\n [<Entity object>]\n >>> query.limit()\n None\n\n :type limit: integer\n :param limit: An optional limit to apply temporarily to this query.\n That is, the Query itself won't be altered,\n but the limit will be applied to the query\n before it is executed.\n\n :rtype: list of :class:`gcloud.datastore.entity.Entity`'s\n :returns: The list of entities matching this query's criteria.\n \"\"\"\n clone = self\n\n if limit:\n clone = self.limit(limit)\n\n entity_pbs = self.dataset().connection().run_query(\n query_pb=clone.to_protobuf(), dataset_id=self.dataset().id())\n\n return [Entity.from_protobuf(entity) for entity in entity_pbs]\n", "path": "gcloud/datastore/query.py"}]} | 3,298 | 120 |
gh_patches_debug_66902 | rasdani/github-patches | git_diff | ivy-llc__ivy-17524 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
argsort
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/search.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_supported_dtypes(
10 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
11 "paddle",
12 )
13 @to_ivy_arrays_and_back
14 def argmax(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
15 return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)
16
17
18 @with_supported_dtypes(
19 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
20 "paddle",
21 )
22 @to_ivy_arrays_and_back
23 def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
24 return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)
25
[end of ivy/functional/frontends/paddle/tensor/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py
--- a/ivy/functional/frontends/paddle/tensor/search.py
+++ b/ivy/functional/frontends/paddle/tensor/search.py
@@ -22,3 +22,12 @@
@to_ivy_arrays_and_back
def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)
+
+
+@with_supported_dtypes(
+ {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def argsort(x, /, *, axis=-1, descending=False, name=None):
+ return ivy.argsort(x, axis=axis, descending=descending)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py\n--- a/ivy/functional/frontends/paddle/tensor/search.py\n+++ b/ivy/functional/frontends/paddle/tensor/search.py\n@@ -22,3 +22,12 @@\n @to_ivy_arrays_and_back\n def argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n+\n+\n+@with_supported_dtypes(\n+ {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def argsort(x, /, *, axis=-1, descending=False, name=None):\n+ return ivy.argsort(x, axis=axis, descending=descending)\n", "issue": "argsort\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmax(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/search.py"}]} | 845 | 228 |
gh_patches_debug_20489 | rasdani/github-patches | git_diff | geopandas__geopandas-1220 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ENH: Carry original schema and use it during to_file
Following the discussion in #1185, I have drafted a PR which saves schema during `read_file` to `gdf.schema` and then during `to_file` checks if the original schema for each column is still applicable. If so, it uses it, if not it infers new one based on the dtype as we do it now.
I am pretty sure that there will be some corner cases which are not covered here, but I wanted to have this PR opened so we can discuss the implementation. But for the cases described in #1185 and earlier in #177, this should work.
```
path = gpd.datasets.get_path('nybb')
gdf = gpd.read_file(path)
gdf.schema # original schema from fiona
{'properties': OrderedDict([('BoroCode', 'int:4'),
('BoroName', 'str:32'),
('Shape_Leng', 'float:19.11'),
('Shape_Area', 'float:19.11')]),
'geometry': 'Polygon'}
```
```
gpd.io.file.infer_schema(gdf)
{'geometry': 'MultiPolygon',
'properties': OrderedDict([('BoroCode', 'int:4'),
('BoroName', 'str:32'),
('Shape_Leng', 'float:19.11'),
('Shape_Area', 'float:19.11')])}
```
On master:
```
gpd.io.file.infer_schema(gdf)
{'geometry': 'MultiPolygon',
'properties': OrderedDict([('BoroCode', 'int'),
('BoroName', 'str'),
('Shape_Leng', 'float'),
('Shape_Area', 'float')])}
```
Closes #1185
</issue>
<code>
[start of geopandas/io/file.py]
1 from distutils.version import LooseVersion
2
3 import numpy as np
4
5 import fiona
6
7 from geopandas import GeoDataFrame, GeoSeries
8
9 try:
10 from fiona import Env as fiona_env
11 except ImportError:
12 from fiona import drivers as fiona_env
13 # Adapted from pandas.io.common
14 from urllib.request import urlopen as _urlopen
15 from urllib.parse import urlparse as parse_url
16 from urllib.parse import uses_relative, uses_netloc, uses_params
17
18 _FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion("1.8")
19
20
21 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
22 _VALID_URLS.discard("")
23
24
25 def _is_url(url):
26 """Check to see if *url* has a valid protocol."""
27 try:
28 return parse_url(url).scheme in _VALID_URLS
29 except Exception:
30 return False
31
32
33 def read_file(filename, bbox=None, **kwargs):
34 """
35 Returns a GeoDataFrame from a file or URL.
36
37 Parameters
38 ----------
39 filename: str
40 Either the absolute or relative path to the file or URL to
41 be opened.
42 bbox : tuple | GeoDataFrame or GeoSeries, default None
43 Filter features by given bounding box, GeoSeries, or GeoDataFrame.
44 CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
45 **kwargs:
46 Keyword args to be passed to the `open` or `BytesCollection` method
47 in the fiona library when opening the file. For more information on
48 possible keywords, type:
49 ``import fiona; help(fiona.open)``
50
51 Examples
52 --------
53 >>> df = geopandas.read_file("nybb.shp")
54
55 Returns
56 -------
57 geodataframe : GeoDataFrame
58 """
59 if _is_url(filename):
60 req = _urlopen(filename)
61 path_or_bytes = req.read()
62 reader = fiona.BytesCollection
63 else:
64 path_or_bytes = filename
65 reader = fiona.open
66
67 with fiona_env():
68 with reader(path_or_bytes, **kwargs) as features:
69
70 # In a future Fiona release the crs attribute of features will
71 # no longer be a dict. The following code will be both forward
72 # and backward compatible.
73 if hasattr(features.crs, "to_dict"):
74 crs = features.crs.to_dict()
75 else:
76 crs = features.crs
77
78 if bbox is not None:
79 if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):
80 bbox = tuple(bbox.to_crs(crs).total_bounds)
81 assert len(bbox) == 4
82 f_filt = features.filter(bbox=bbox)
83 else:
84 f_filt = features
85
86 columns = list(features.meta["schema"]["properties"]) + ["geometry"]
87 gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)
88
89 return gdf
90
91
92 def to_file(df, filename, driver="ESRI Shapefile", schema=None, **kwargs):
93 """
94 Write this GeoDataFrame to an OGR data source
95
96 A dictionary of supported OGR providers is available via:
97 >>> import fiona
98 >>> fiona.supported_drivers
99
100 Parameters
101 ----------
102 df : GeoDataFrame to be written
103 filename : string
104 File path or file handle to write to.
105 driver : string, default 'ESRI Shapefile'
106 The OGR format driver used to write the vector file.
107 schema : dict, default None
108 If specified, the schema dictionary is passed to Fiona to
109 better control how the file is written. If None, GeoPandas
110 will determine the schema based on each column's dtype
111
112 The *kwargs* are passed to fiona.open and can be used to write
113 to multi-layer data, store data within archives (zip files), etc.
114 The path may specify a fiona VSI scheme.
115 """
116 if schema is None:
117 schema = infer_schema(df)
118 with fiona_env():
119 with fiona.open(
120 filename, "w", driver=driver, crs=df.crs, schema=schema, **kwargs
121 ) as colxn:
122 colxn.writerecords(df.iterfeatures())
123
124
125 def infer_schema(df):
126 try:
127 from collections import OrderedDict
128 except ImportError:
129 from ordereddict import OrderedDict
130
131 def convert_type(column, in_type):
132 if in_type == object:
133 return "str"
134 if in_type.name.startswith("datetime64"):
135 # numpy datetime type regardless of frequency
136 return "datetime"
137 out_type = type(np.zeros(1, in_type).item()).__name__
138 if out_type == "long":
139 out_type = "int"
140 if not _FIONA18 and out_type == "bool":
141 raise ValueError(
142 'column "{}" is boolean type, '.format(column)
143 + "which is unsupported in file writing with fiona "
144 "< 1.8. Consider casting the column to int type."
145 )
146 return out_type
147
148 properties = OrderedDict(
149 [
150 (col, convert_type(col, _type))
151 for col, _type in zip(df.columns, df.dtypes)
152 if col != df._geometry_column_name
153 ]
154 )
155
156 if df.empty:
157 raise ValueError("Cannot write empty DataFrame to file.")
158
159 # Since https://github.com/Toblerity/Fiona/issues/446 resolution,
160 # Fiona allows a list of geometry types
161 geom_types = _geometry_types(df)
162
163 schema = {"geometry": geom_types, "properties": properties}
164
165 return schema
166
167
168 def _geometry_types(df):
169 """
170 Determine the geometry types in the GeoDataFrame for the schema.
171 """
172 if _FIONA18:
173 # Starting from Fiona 1.8, schema submitted to fiona to write a gdf
174 # can have mixed geometries:
175 # - 3D and 2D shapes can coexist in inferred schema
176 # - Shape and MultiShape types can (and must) coexist in inferred
177 # schema
178 geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()
179 geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]
180 geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()
181 geom_types_3D = ["3D " + gtype for gtype in geom_types_3D if gtype is not None]
182 geom_types = geom_types_3D + geom_types_2D
183
184 else:
185 # Before Fiona 1.8, schema submitted to write a gdf should have
186 # one single geometry type whenever possible:
187 # - 3D and 2D shapes cannot coexist in inferred schema
188 # - Shape and MultiShape can not coexist in inferred schema
189 geom_types = _geometry_types_back_compat(df)
190
191 if len(geom_types) == 0:
192 # Default geometry type supported by Fiona
193 # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)
194 return "Unknown"
195
196 if len(geom_types) == 1:
197 geom_types = geom_types[0]
198
199 return geom_types
200
201
202 def _geometry_types_back_compat(df):
203 """
204 for backward compatibility with Fiona<1.8 only
205 """
206 unique_geom_types = df.geometry.geom_type.unique()
207 unique_geom_types = [gtype for gtype in unique_geom_types if gtype is not None]
208
209 # merge single and Multi types (eg Polygon and MultiPolygon)
210 unique_geom_types = [
211 gtype
212 for gtype in unique_geom_types
213 if not gtype.startswith("Multi") or gtype[5:] not in unique_geom_types
214 ]
215
216 if df.geometry.has_z.any():
217 # declare all geometries as 3D geometries
218 unique_geom_types = ["3D " + type for type in unique_geom_types]
219 # by default, all geometries are 2D geometries
220
221 return unique_geom_types
222
[end of geopandas/io/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -123,10 +123,10 @@
def infer_schema(df):
- try:
- from collections import OrderedDict
- except ImportError:
- from ordereddict import OrderedDict
+ from collections import OrderedDict
+
+ # TODO: test pandas string type and boolean type once released
+ types = {"Int64": "int", "string": "str", "boolean": "bool"}
def convert_type(column, in_type):
if in_type == object:
@@ -134,7 +134,10 @@
if in_type.name.startswith("datetime64"):
# numpy datetime type regardless of frequency
return "datetime"
- out_type = type(np.zeros(1, in_type).item()).__name__
+ if str(in_type) in types:
+ out_type = types[str(in_type)]
+ else:
+ out_type = type(np.zeros(1, in_type).item()).__name__
if out_type == "long":
out_type = "int"
if not _FIONA18 and out_type == "bool":
| {"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -123,10 +123,10 @@\n \n \n def infer_schema(df):\n- try:\n- from collections import OrderedDict\n- except ImportError:\n- from ordereddict import OrderedDict\n+ from collections import OrderedDict\n+\n+ # TODO: test pandas string type and boolean type once released\n+ types = {\"Int64\": \"int\", \"string\": \"str\", \"boolean\": \"bool\"}\n \n def convert_type(column, in_type):\n if in_type == object:\n@@ -134,7 +134,10 @@\n if in_type.name.startswith(\"datetime64\"):\n # numpy datetime type regardless of frequency\n return \"datetime\"\n- out_type = type(np.zeros(1, in_type).item()).__name__\n+ if str(in_type) in types:\n+ out_type = types[str(in_type)]\n+ else:\n+ out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == \"long\":\n out_type = \"int\"\n if not _FIONA18 and out_type == \"bool\":\n", "issue": "ENH: Carry original schema and use it during to_file\nFollowing the discussion in #1185, I have drafted a PR which saves schema during `read_file` to `gdf.schema` and then during `to_file` checks if the original schema for each column is still applicable. If so, it uses it, if not it infers new one based on the dtype as we do it now.\r\n\r\nI am pretty sure that there will be some corner cases which are not covered here, but I wanted to have this PR opened so we can discuss the implementation. But for the cases described in #1185 and earlier in #177, this should work.\r\n\r\n```\r\npath = gpd.datasets.get_path('nybb')\r\ngdf = gpd.read_file(path)\r\ngdf.schema # original schema from fiona\r\n\r\n{'properties': OrderedDict([('BoroCode', 'int:4'),\r\n ('BoroName', 'str:32'),\r\n ('Shape_Leng', 'float:19.11'),\r\n ('Shape_Area', 'float:19.11')]),\r\n 'geometry': 'Polygon'}\r\n```\r\n\r\n```\r\ngpd.io.file.infer_schema(gdf)\r\n{'geometry': 'MultiPolygon',\r\n\r\n 'properties': OrderedDict([('BoroCode', 'int:4'),\r\n ('BoroName', 'str:32'),\r\n ('Shape_Leng', 'float:19.11'),\r\n ('Shape_Area', 'float:19.11')])}\r\n```\r\n\r\nOn master:\r\n```\r\ngpd.io.file.infer_schema(gdf)\r\n\r\n{'geometry': 'MultiPolygon',\r\n 'properties': OrderedDict([('BoroCode', 'int'),\r\n ('BoroName', 'str'),\r\n ('Shape_Leng', 'float'),\r\n ('Shape_Area', 'float')])}\r\n```\r\n\r\nCloses #1185 \n", "before_files": [{"content": "from distutils.version import LooseVersion\n\nimport numpy as np\n\nimport fiona\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\ntry:\n from fiona import Env as fiona_env\nexcept ImportError:\n from fiona import drivers as fiona_env\n# Adapted from pandas.io.common\nfrom urllib.request import urlopen as _urlopen\nfrom urllib.parse import urlparse as parse_url\nfrom urllib.parse import uses_relative, uses_netloc, uses_params\n\n_FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion(\"1.8\")\n\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard(\"\")\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except Exception:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with fiona_env():\n with reader(path_or_bytes, **kwargs) as features:\n\n # In a future Fiona release the crs attribute of features will\n # no longer be a dict. The following code will be both forward\n # and backward compatible.\n if hasattr(features.crs, \"to_dict\"):\n crs = features.crs.to_dict()\n else:\n crs = features.crs\n\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None, **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n The path may specify a fiona VSI scheme.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n with fiona_env():\n with fiona.open(\n filename, \"w\", driver=driver, crs=df.crs, schema=schema, **kwargs\n ) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return \"str\"\n if in_type.name.startswith(\"datetime64\"):\n # numpy datetime type regardless of frequency\n return \"datetime\"\n out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == \"long\":\n out_type = \"int\"\n if not _FIONA18 and out_type == \"bool\":\n raise ValueError(\n 'column \"{}\" is boolean type, '.format(column)\n + \"which is unsupported in file writing with fiona \"\n \"< 1.8. Consider casting the column to int type.\"\n )\n return out_type\n\n properties = OrderedDict(\n [\n (col, convert_type(col, _type))\n for col, _type in zip(df.columns, df.dtypes)\n if col != df._geometry_column_name\n ]\n )\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n # Since https://github.com/Toblerity/Fiona/issues/446 resolution,\n # Fiona allows a list of geometry types\n geom_types = _geometry_types(df)\n\n schema = {\"geometry\": geom_types, \"properties\": properties}\n\n return schema\n\n\ndef _geometry_types(df):\n \"\"\"\n Determine the geometry types in the GeoDataFrame for the schema.\n \"\"\"\n if _FIONA18:\n # Starting from Fiona 1.8, schema submitted to fiona to write a gdf\n # can have mixed geometries:\n # - 3D and 2D shapes can coexist in inferred schema\n # - Shape and MultiShape types can (and must) coexist in inferred\n # schema\n geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()\n geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]\n geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()\n geom_types_3D = [\"3D \" + gtype for gtype in geom_types_3D if gtype is not None]\n geom_types = geom_types_3D + geom_types_2D\n\n else:\n # Before Fiona 1.8, schema submitted to write a gdf should have\n # one single geometry type whenever possible:\n # - 3D and 2D shapes cannot coexist in inferred schema\n # - Shape and MultiShape can not coexist in inferred schema\n geom_types = _geometry_types_back_compat(df)\n\n if len(geom_types) == 0:\n # Default geometry type supported by Fiona\n # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)\n return \"Unknown\"\n\n if len(geom_types) == 1:\n geom_types = geom_types[0]\n\n return geom_types\n\n\ndef _geometry_types_back_compat(df):\n \"\"\"\n for backward compatibility with Fiona<1.8 only\n \"\"\"\n unique_geom_types = df.geometry.geom_type.unique()\n unique_geom_types = [gtype for gtype in unique_geom_types if gtype is not None]\n\n # merge single and Multi types (eg Polygon and MultiPolygon)\n unique_geom_types = [\n gtype\n for gtype in unique_geom_types\n if not gtype.startswith(\"Multi\") or gtype[5:] not in unique_geom_types\n ]\n\n if df.geometry.has_z.any():\n # declare all geometries as 3D geometries\n unique_geom_types = [\"3D \" + type for type in unique_geom_types]\n # by default, all geometries are 2D geometries\n\n return unique_geom_types\n", "path": "geopandas/io/file.py"}]} | 3,247 | 279 |
gh_patches_debug_11382 | rasdani/github-patches | git_diff | activeloopai__deeplake-683 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]
## 🐛🐛 Bug Report
### ⚗️ Current Behavior
Hub's version info is present in two locations, `setup.py` and `hub/version.py`. As result, the released version displays the wrong version info (1.2.3 instead of 1.3.0) when users do `hub --version`.
### 🧰 Possible Solution (optional)
Remove version info from `setup.py`.
</issue>
<code>
[start of setup.py]
1 """
2 License:
3 This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
4 If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
5 """
6
7 import os
8
9 from setuptools import find_packages, setup
10
11 project = "hub"
12 VERSION = "1.3.0"
13
14 this_directory = os.path.abspath(os.path.dirname(__file__))
15 with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
16 long_description = f.read()
17
18 with open(os.path.join(this_directory, "requirements.txt")) as f:
19 requirements = f.readlines()
20
21 setup(
22 name=project,
23 version=VERSION,
24 description="Activeloop Hub",
25 long_description=long_description,
26 long_description_content_type="text/markdown",
27 author="Snark AI Inc.",
28 author_email="[email protected]",
29 license="MPL 2.0",
30 url="https://github.com/activeloopai/Hub",
31 packages=find_packages(),
32 include_package_data=True,
33 zip_safe=False,
34 keywords="snark-hub",
35 project_urls={
36 "Documentation": "https://docs.activeloop.ai/",
37 "Source": "https://github.com/activeloopai/Hub",
38 },
39 classifiers=[
40 "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
41 "Programming Language :: Python :: 3",
42 "Programming Language :: Python :: 3.6",
43 "Programming Language :: Python :: 3.7",
44 "Programming Language :: Python :: 3.8",
45 "Programming Language :: Python :: 3.9",
46 "Programming Language :: Python :: 3 :: Only",
47 ],
48 python_requires=">=3.6",
49 install_requires=requirements,
50 setup_requires=[],
51 dependency_links=[],
52 entry_points={
53 "console_scripts": [
54 "activeloop = hub.cli.command:cli",
55 "activeloop-local = hub.cli.local:cli",
56 "activeloop-dev = hub.cli.dev:cli",
57 "hub = hub.cli.command:cli",
58 ]
59 },
60 tests_require=["pytest", "mock>=1.0.1"],
61 )
62
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,6 @@
from setuptools import find_packages, setup
project = "hub"
-VERSION = "1.3.0"
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md")) as f:
@@ -20,7 +19,6 @@
setup(
name=project,
- version=VERSION,
description="Activeloop Hub",
long_description=long_description,
long_description_content_type="text/markdown",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,6 @@\n from setuptools import find_packages, setup\n \n project = \"hub\"\n-VERSION = \"1.3.0\"\n \n this_directory = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(this_directory, \"README.md\")) as f:\n@@ -20,7 +19,6 @@\n \n setup(\n name=project,\n- version=VERSION,\n description=\"Activeloop Hub\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n", "issue": "[BUG]\n## \ud83d\udc1b\ud83d\udc1b Bug Report\r\n\r\n\r\n### \u2697\ufe0f Current Behavior\r\nHub's version info is present in two locations, `setup.py` and `hub/version.py`. As result, the released version displays the wrong version info (1.2.3 instead of 1.3.0) when users do `hub --version`.\r\n\r\n### \ud83e\uddf0 Possible Solution (optional)\r\nRemove version info from `setup.py`.\n", "before_files": [{"content": "\"\"\"\nLicense:\nThis Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.\nIf a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\"\n\nimport os\n\nfrom setuptools import find_packages, setup\n\nproject = \"hub\"\nVERSION = \"1.3.0\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nwith open(os.path.join(this_directory, \"requirements.txt\")) as f:\n requirements = f.readlines()\n\nsetup(\n name=project,\n version=VERSION,\n description=\"Activeloop Hub\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Snark AI Inc.\",\n author_email=\"[email protected]\",\n license=\"MPL 2.0\",\n url=\"https://github.com/activeloopai/Hub\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n keywords=\"snark-hub\",\n project_urls={\n \"Documentation\": \"https://docs.activeloop.ai/\",\n \"Source\": \"https://github.com/activeloopai/Hub\",\n },\n classifiers=[\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n python_requires=\">=3.6\",\n install_requires=requirements,\n setup_requires=[],\n dependency_links=[],\n entry_points={\n \"console_scripts\": [\n \"activeloop = hub.cli.command:cli\",\n \"activeloop-local = hub.cli.local:cli\",\n \"activeloop-dev = hub.cli.dev:cli\",\n \"hub = hub.cli.command:cli\",\n ]\n },\n tests_require=[\"pytest\", \"mock>=1.0.1\"],\n)\n", "path": "setup.py"}]} | 1,223 | 132 |
gh_patches_debug_14878 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2814 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TX: No vote data for v2 API
State: TX
There is no vote data being returned for TX bills/resolutions for the 86 session in the v2 API. I am opening this issue because it is specific to the latest API, and previous issues were specific to the v1 API.
I would like to have a conversation with someone from open-states who can explain your use of the scrapers for retrieving vote data, since it appears you need some help to update/fix these. We have some people that can probably be mobilized to help with this from [Open Austin](https://www.open-austin.org/]
</issue>
<code>
[start of openstates/tx/__init__.py]
1 from pupa.scrape import Jurisdiction, Organization
2
3 from openstates.utils import url_xpath
4 from .bills import TXBillScraper
5 # from .committees import TXCommitteeScraper
6 # from .events import TXEventScraper
7 from .people import TXPersonScraper
8 # from .votes import TXVoteScraper
9
10
11 class Texas(Jurisdiction):
12 division_id = "ocd-division/country:us/state:tx"
13 classification = "government"
14 name = "Texas"
15 url = "https://capitol.texas.gov/"
16 scrapers = {
17 'people': TXPersonScraper,
18 # 'committees': TXCommitteeScraper,
19 'bills': TXBillScraper,
20 # Re-enable vote scraper when adding next regular session
21 # 'votes': TXVoteScraper,
22 # 'events': TXEventScraper
23 }
24 legislative_sessions = [
25 {
26 "_scraped_name": "81(R) - 2009",
27 "classification": "primary",
28 "end_date": "2009-06-01",
29 "identifier": "81",
30 "name": "81st Legislature (2009)",
31 "start_date": "2009-01-13"
32 },
33 {
34 "_scraped_name": "81(1) - 2009",
35 "classification": "special",
36 "end_date": "2009-07-02",
37 "identifier": "811",
38 "name": "81st Legislature, 1st Called Session (2009)",
39 "start_date": "2009-07-01"
40 },
41 {
42 "_scraped_name": "82(R) - 2011",
43 "classification": "primary",
44 "end_date": "2011-05-30",
45 "identifier": "82",
46 "name": "82nd Legislature (2011)",
47 "start_date": "2011-01-11"
48 },
49 {
50 "_scraped_name": "82(1) - 2011",
51 "classification": "special",
52 "end_date": "2011-06-29",
53 "identifier": "821",
54 "name": "82nd Legislature, 1st Called Session (2011)",
55 "start_date": "2011-05-31"
56 },
57 {
58 "_scraped_name": "83(R) - 2013",
59 "classification": "primary",
60 "end_date": "2013-05-27",
61 "identifier": "83",
62 "name": "83rd Legislature (2013)",
63 "start_date": "2013-01-08"
64 },
65 {
66 "_scraped_name": "83(1) - 2013",
67 "classification": "special",
68 "end_date": "2013-06-25",
69 "identifier": "831",
70 "name": "83nd Legislature, 1st Called Session (2013)",
71 "start_date": "2013-05-27"
72 },
73 {
74 "_scraped_name": "83(2) - 2013",
75 "classification": "special",
76 "end_date": "2013-07-30",
77 "identifier": "832",
78 "name": "83nd Legislature, 2st Called Session (2013)",
79 "start_date": "2013-07-01"
80 },
81 {
82 "_scraped_name": "83(3) - 2013",
83 "classification": "special",
84 "end_date": "2013-08-05",
85 "identifier": "833",
86 "name": "83nd Legislature, 3rd Called Session (2013)",
87 "start_date": "2013-07-30"
88 },
89 {
90 "_scraped_name": "84(R) - 2015",
91 "classification": "primary",
92 "end_date": "2015-06-01",
93 "identifier": "84",
94 "name": "84th Legislature (2015)",
95 "start_date": "2015-01-13"
96 },
97 {
98 "_scraped_name": "85(R) - 2017",
99 "classification": "primary",
100 "end_date": "2017-06-01",
101 "identifier": "85",
102 "name": "85th Legislature (2017)",
103 "start_date": "2017-01-13"
104 },
105 {
106 "_scraped_name": "85(1) - 2017",
107 "classification": "special",
108 "end_date": "2017-08-19",
109 "identifier": "851",
110 "name": "85nd Legislature, 1st Called Session (2017)",
111 "start_date": "2017-07-10"
112 },
113 {
114 "_scraped_name": "86(R) - 2019",
115 "classification": "primary",
116 "end_date": "2019-05-27",
117 "identifier": "86",
118 "name": "86th Legislature (2019)",
119 "start_date": "2019-01-08"
120 },
121 # TODO: Re-enable vote scraper when adding next regular session
122 ]
123 ignored_scraped_sessions = [
124 "80(R) - 2007",
125 "79(3) - 2006",
126 "79(2) - 2005",
127 "79(1) - 2005",
128 "79(R) - 2005",
129 "78(4) - 2004",
130 "78(3) - 2003",
131 "78(2) - 2003",
132 "78(1) - 2003",
133 "78(R) - 2003",
134 "77(R) - 2001",
135 "76(R) - 1999",
136 "75(R) - 1997",
137 "74(R) - 1995",
138 "73(R) - 1993",
139 "72(4) - 1992",
140 "72(3) - 1992",
141 "72(2) - 1991",
142 "72(1) - 1991",
143 "72(R) - 1991",
144 "71(6) - 1990",
145 "71(5) - 1990",
146 "71(4) - 1990",
147 "71(3) - 1990",
148 "71(2) - 1989",
149 "71(1) - 1989",
150 "71(R) - 1989"
151 ]
152
153 def get_session_list(self):
154 return url_xpath('https://capitol.texas.gov/',
155 '//select[@name="cboLegSess"]/option/text()')
156
157 def get_organizations(self):
158 legislature_name = "Texas Legislature"
159
160 legislature = Organization(name=legislature_name,
161 classification="legislature")
162 upper = Organization('Senate', classification='upper',
163 parent_id=legislature._id)
164 lower = Organization('House', classification='lower',
165 parent_id=legislature._id)
166
167 yield Organization(name='Office of the Governor', classification='executive')
168 yield legislature
169 yield upper
170 yield lower
171
[end of openstates/tx/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/tx/__init__.py b/openstates/tx/__init__.py
--- a/openstates/tx/__init__.py
+++ b/openstates/tx/__init__.py
@@ -5,7 +5,7 @@
# from .committees import TXCommitteeScraper
# from .events import TXEventScraper
from .people import TXPersonScraper
-# from .votes import TXVoteScraper
+from .votes import TXVoteScraper
class Texas(Jurisdiction):
@@ -18,7 +18,7 @@
# 'committees': TXCommitteeScraper,
'bills': TXBillScraper,
# Re-enable vote scraper when adding next regular session
- # 'votes': TXVoteScraper,
+ 'votes': TXVoteScraper,
# 'events': TXEventScraper
}
legislative_sessions = [
| {"golden_diff": "diff --git a/openstates/tx/__init__.py b/openstates/tx/__init__.py\n--- a/openstates/tx/__init__.py\n+++ b/openstates/tx/__init__.py\n@@ -5,7 +5,7 @@\n # from .committees import TXCommitteeScraper\n # from .events import TXEventScraper\n from .people import TXPersonScraper\n-# from .votes import TXVoteScraper\n+from .votes import TXVoteScraper\n \n \n class Texas(Jurisdiction):\n@@ -18,7 +18,7 @@\n # 'committees': TXCommitteeScraper,\n 'bills': TXBillScraper,\n # Re-enable vote scraper when adding next regular session\n- # 'votes': TXVoteScraper,\n+ 'votes': TXVoteScraper,\n # 'events': TXEventScraper\n }\n legislative_sessions = [\n", "issue": "TX: No vote data for v2 API\nState: TX\r\n\r\nThere is no vote data being returned for TX bills/resolutions for the 86 session in the v2 API. I am opening this issue because it is specific to the latest API, and previous issues were specific to the v1 API.\r\n\r\nI would like to have a conversation with someone from open-states who can explain your use of the scrapers for retrieving vote data, since it appears you need some help to update/fix these. We have some people that can probably be mobilized to help with this from [Open Austin](https://www.open-austin.org/]\r\n\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\nfrom .bills import TXBillScraper\n# from .committees import TXCommitteeScraper\n# from .events import TXEventScraper\nfrom .people import TXPersonScraper\n# from .votes import TXVoteScraper\n\n\nclass Texas(Jurisdiction):\n division_id = \"ocd-division/country:us/state:tx\"\n classification = \"government\"\n name = \"Texas\"\n url = \"https://capitol.texas.gov/\"\n scrapers = {\n 'people': TXPersonScraper,\n # 'committees': TXCommitteeScraper,\n 'bills': TXBillScraper,\n # Re-enable vote scraper when adding next regular session\n # 'votes': TXVoteScraper,\n # 'events': TXEventScraper\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"81(R) - 2009\",\n \"classification\": \"primary\",\n \"end_date\": \"2009-06-01\",\n \"identifier\": \"81\",\n \"name\": \"81st Legislature (2009)\",\n \"start_date\": \"2009-01-13\"\n },\n {\n \"_scraped_name\": \"81(1) - 2009\",\n \"classification\": \"special\",\n \"end_date\": \"2009-07-02\",\n \"identifier\": \"811\",\n \"name\": \"81st Legislature, 1st Called Session (2009)\",\n \"start_date\": \"2009-07-01\"\n },\n {\n \"_scraped_name\": \"82(R) - 2011\",\n \"classification\": \"primary\",\n \"end_date\": \"2011-05-30\",\n \"identifier\": \"82\",\n \"name\": \"82nd Legislature (2011)\",\n \"start_date\": \"2011-01-11\"\n },\n {\n \"_scraped_name\": \"82(1) - 2011\",\n \"classification\": \"special\",\n \"end_date\": \"2011-06-29\",\n \"identifier\": \"821\",\n \"name\": \"82nd Legislature, 1st Called Session (2011)\",\n \"start_date\": \"2011-05-31\"\n },\n {\n \"_scraped_name\": \"83(R) - 2013\",\n \"classification\": \"primary\",\n \"end_date\": \"2013-05-27\",\n \"identifier\": \"83\",\n \"name\": \"83rd Legislature (2013)\",\n \"start_date\": \"2013-01-08\"\n },\n {\n \"_scraped_name\": \"83(1) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-06-25\",\n \"identifier\": \"831\",\n \"name\": \"83nd Legislature, 1st Called Session (2013)\",\n \"start_date\": \"2013-05-27\"\n },\n {\n \"_scraped_name\": \"83(2) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-07-30\",\n \"identifier\": \"832\",\n \"name\": \"83nd Legislature, 2st Called Session (2013)\",\n \"start_date\": \"2013-07-01\"\n },\n {\n \"_scraped_name\": \"83(3) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-08-05\",\n \"identifier\": \"833\",\n \"name\": \"83nd Legislature, 3rd Called Session (2013)\",\n \"start_date\": \"2013-07-30\"\n },\n {\n \"_scraped_name\": \"84(R) - 2015\",\n \"classification\": \"primary\",\n \"end_date\": \"2015-06-01\",\n \"identifier\": \"84\",\n \"name\": \"84th Legislature (2015)\",\n \"start_date\": \"2015-01-13\"\n },\n {\n \"_scraped_name\": \"85(R) - 2017\",\n \"classification\": \"primary\",\n \"end_date\": \"2017-06-01\",\n \"identifier\": \"85\",\n \"name\": \"85th Legislature (2017)\",\n \"start_date\": \"2017-01-13\"\n },\n {\n \"_scraped_name\": \"85(1) - 2017\",\n \"classification\": \"special\",\n \"end_date\": \"2017-08-19\",\n \"identifier\": \"851\",\n \"name\": \"85nd Legislature, 1st Called Session (2017)\",\n \"start_date\": \"2017-07-10\"\n },\n {\n \"_scraped_name\": \"86(R) - 2019\",\n \"classification\": \"primary\",\n \"end_date\": \"2019-05-27\",\n \"identifier\": \"86\",\n \"name\": \"86th Legislature (2019)\",\n \"start_date\": \"2019-01-08\"\n },\n # TODO: Re-enable vote scraper when adding next regular session\n ]\n ignored_scraped_sessions = [\n \"80(R) - 2007\",\n \"79(3) - 2006\",\n \"79(2) - 2005\",\n \"79(1) - 2005\",\n \"79(R) - 2005\",\n \"78(4) - 2004\",\n \"78(3) - 2003\",\n \"78(2) - 2003\",\n \"78(1) - 2003\",\n \"78(R) - 2003\",\n \"77(R) - 2001\",\n \"76(R) - 1999\",\n \"75(R) - 1997\",\n \"74(R) - 1995\",\n \"73(R) - 1993\",\n \"72(4) - 1992\",\n \"72(3) - 1992\",\n \"72(2) - 1991\",\n \"72(1) - 1991\",\n \"72(R) - 1991\",\n \"71(6) - 1990\",\n \"71(5) - 1990\",\n \"71(4) - 1990\",\n \"71(3) - 1990\",\n \"71(2) - 1989\",\n \"71(1) - 1989\",\n \"71(R) - 1989\"\n ]\n\n def get_session_list(self):\n return url_xpath('https://capitol.texas.gov/',\n '//select[@name=\"cboLegSess\"]/option/text()')\n\n def get_organizations(self):\n legislature_name = \"Texas Legislature\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization('Senate', classification='upper',\n parent_id=legislature._id)\n lower = Organization('House', classification='lower',\n parent_id=legislature._id)\n\n yield Organization(name='Office of the Governor', classification='executive')\n yield legislature\n yield upper\n yield lower\n", "path": "openstates/tx/__init__.py"}]} | 2,902 | 197 |
gh_patches_debug_15407 | rasdani/github-patches | git_diff | nipy__nipype-3168 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sgegraph only submits the next processing node
### Summary
the `SGEGraph` plugin isn't submitting the entire workflow only the next required step.
### Actual behavior
When calling `workflow.run(plugin='SGEGraph', plugin_args = {'dont_resubmit_completed_jobs': True})` only the next processing node is submitted. subsequent calls are needed to iteratively processes subsequent nodes.
`workflow.run(plugin='SGE)` works as expected. submitting one job waiting until completion then submitting the next.
### Expected behavior
I would expect that `workflow.run(plugin='SGEGraph', plugin_args = {'dont_resubmit_completed_jobs': True})` would submit all processing jobs at once with later jobs being held until the earlier required jobs are finished.
### How to replicate the behavior
I'm new to nipype so I'm not sure how to get sample/example data to provide a minimal example but I think that any of the workflows in the example folder would demonstrate this issue.
### Platform details:
<!-- Please run the following code from your shell and place the output between the triple ticks, below.
python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())"
-->
```
200207-13:43:08,275 nipype.utils INFO:
No new version available.
{'commit_hash': '792988f19',
'commit_source': 'installation',
'networkx_version': '2.4',
'nibabel_version': '3.0.1',
'nipype_version': '1.4.1',
'numpy_version': '1.18.1',
'pkg_path': '/mnt/isilon/meg_lab/Linux_software/anaconda3_lab/envs/nipype/lib/python3.8/site-packages/nipype',
'scipy_version': '1.3.2',
'sys_executable': '/mnt/isilon/meg_lab/Linux_software/anaconda3_lab/envs/nipype/bin/python',
'sys_platform': 'linux',
'sys_version': '3.8.1 (default, Jan 8 2020, 22:29:32) \n[GCC 7.3.0]',
'traits_version': '5.2.0'}
```
### Execution environment
Choose one
- My python environment outside container
</issue>
<code>
[start of nipype/pipeline/plugins/sgegraph.py]
1 # -*- coding: utf-8 -*-
2 """Parallel workflow execution via SGE
3 """
4 import os
5 import sys
6
7 from ...interfaces.base import CommandLine
8 from .base import GraphPluginBase, logger
9
10
11 def node_completed_status(checknode):
12 """
13 A function to determine if a node has previously completed it's work
14 :param checknode: The node to check the run status
15 :return: boolean value True indicates that the node does not need to be run.
16 """
17 """ TODO: place this in the base.py file and refactor """
18 node_state_does_not_require_overwrite = checknode.overwrite is False or (
19 checknode.overwrite is None and not checknode._interface.always_run
20 )
21 hash_exists = False
22 try:
23 hash_exists, _, _, _ = checknode.hash_exists()
24 except Exception:
25 hash_exists = False
26 return hash_exists and node_state_does_not_require_overwrite
27
28
29 class SGEGraphPlugin(GraphPluginBase):
30 """Execute using SGE
31
32 The plugin_args input to run can be used to control the SGE execution.
33 Currently supported options are:
34
35 - template : template to use for batch job submission
36 - qsub_args : arguments to be prepended to the job execution script in the
37 qsub call
38
39 """
40
41 _template = """
42 #!/bin/bash
43 #$ -V
44 #$ -S /bin/bash
45 """
46
47 def __init__(self, **kwargs):
48 self._qsub_args = ""
49 self._dont_resubmit_completed_jobs = False
50 if "plugin_args" in kwargs and kwargs["plugin_args"]:
51 plugin_args = kwargs["plugin_args"]
52 if "template" in plugin_args:
53 self._template = plugin_args["template"]
54 if os.path.isfile(self._template):
55 self._template = open(self._template).read()
56 if "qsub_args" in plugin_args:
57 self._qsub_args = plugin_args["qsub_args"]
58 if "dont_resubmit_completed_jobs" in plugin_args:
59 self._dont_resubmit_completed_jobs = plugin_args[
60 "dont_resubmit_completed_jobs"
61 ]
62 super(SGEGraphPlugin, self).__init__(**kwargs)
63
64 def _submit_graph(self, pyfiles, dependencies, nodes):
65 def make_job_name(jobnumber, nodeslist):
66 """
67 - jobnumber: The index number of the job to create
68 - nodeslist: The name of the node being processed
69 - return: A string representing this job to be displayed by SGE
70 """
71 job_name = "j{0}_{1}".format(jobnumber, nodeslist[jobnumber]._id)
72 # Condition job_name to be a valid bash identifier (i.e. - is invalid)
73 job_name = job_name.replace("-", "_").replace(".", "_").replace(":", "_")
74 return job_name
75
76 batch_dir, _ = os.path.split(pyfiles[0])
77 submitjobsfile = os.path.join(batch_dir, "submit_jobs.sh")
78
79 cache_doneness_per_node = dict()
80 if (
81 self._dont_resubmit_completed_jobs
82 ): # A future parameter for controlling this behavior could be added here
83 for idx, pyscript in enumerate(pyfiles):
84 node = nodes[idx]
85 node_status_done = node_completed_status(node)
86
87 # if the node itself claims done, then check to ensure all
88 # dependancies are also done
89 if node_status_done and idx in dependencies:
90 for child_idx in dependencies[idx]:
91 if child_idx in cache_doneness_per_node:
92 child_status_done = cache_doneness_per_node[child_idx]
93 else:
94 child_status_done = node_completed_status(nodes[child_idx])
95 node_status_done = node_status_done and child_status_done
96
97 cache_doneness_per_node[idx] = node_status_done
98
99 with open(submitjobsfile, "wt") as fp:
100 fp.writelines("#!/usr/bin/env bash\n")
101 fp.writelines("# Condense format attempted\n")
102 for idx, pyscript in enumerate(pyfiles):
103 node = nodes[idx]
104 if cache_doneness_per_node.get(idx, False):
105 continue
106 else:
107 template, qsub_args = self._get_args(
108 node, ["template", "qsub_args"]
109 )
110
111 batch_dir, name = os.path.split(pyscript)
112 name = ".".join(name.split(".")[:-1])
113 batchscript = "\n".join(
114 (template, "%s %s" % (sys.executable, pyscript))
115 )
116 batchscriptfile = os.path.join(
117 batch_dir, "batchscript_%s.sh" % name
118 )
119
120 batchscriptoutfile = batchscriptfile + ".o"
121 batchscripterrfile = batchscriptfile + ".e"
122
123 with open(batchscriptfile, "wt") as batchfp:
124 batchfp.writelines(batchscript)
125 batchfp.close()
126 deps = ""
127 if idx in dependencies:
128 values = " "
129 for jobid in dependencies[idx]:
130 # Avoid dependancies of done jobs
131 if (
132 not self._dont_resubmit_completed_jobs
133 or not cache_doneness_per_node[jobid]
134 ):
135 values += "${{{0}}},".format(
136 make_job_name(jobid, nodes)
137 )
138 if (
139 values != " "
140 ): # i.e. if some jobs were added to dependency list
141 values = values.rstrip(",")
142 deps = "-hold_jid%s" % values
143 jobname = make_job_name(idx, nodes)
144 # Do not use default output locations if they are set in self._qsub_args
145 stderrFile = ""
146 if self._qsub_args.count("-e ") == 0:
147 stderrFile = "-e {errFile}".format(errFile=batchscripterrfile)
148 stdoutFile = ""
149 if self._qsub_args.count("-o ") == 0:
150 stdoutFile = "-o {outFile}".format(outFile=batchscriptoutfile)
151 full_line = "{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\n".format(
152 jobNm=jobname,
153 outFileOption=stdoutFile,
154 errFileOption=stderrFile,
155 extraQSubArgs=qsub_args,
156 dependantIndex=deps,
157 batchscript=batchscriptfile,
158 )
159 fp.writelines(full_line)
160 cmd = CommandLine(
161 "bash",
162 environ=dict(os.environ),
163 resource_monitor=False,
164 terminal_output="allatonce",
165 )
166 cmd.inputs.args = "%s" % submitjobsfile
167 cmd.run()
168 logger.info("submitted all jobs to queue")
169
[end of nipype/pipeline/plugins/sgegraph.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py
--- a/nipype/pipeline/plugins/sgegraph.py
+++ b/nipype/pipeline/plugins/sgegraph.py
@@ -148,7 +148,7 @@
stdoutFile = ""
if self._qsub_args.count("-o ") == 0:
stdoutFile = "-o {outFile}".format(outFile=batchscriptoutfile)
- full_line = "{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\n".format(
+ full_line = "{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '/^Your job/{{print $3}}')\n".format(
jobNm=jobname,
outFileOption=stdoutFile,
errFileOption=stderrFile,
| {"golden_diff": "diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py\n--- a/nipype/pipeline/plugins/sgegraph.py\n+++ b/nipype/pipeline/plugins/sgegraph.py\n@@ -148,7 +148,7 @@\n stdoutFile = \"\"\n if self._qsub_args.count(\"-o \") == 0:\n stdoutFile = \"-o {outFile}\".format(outFile=batchscriptoutfile)\n- full_line = \"{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\\n\".format(\n+ full_line = \"{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '/^Your job/{{print $3}}')\\n\".format(\n jobNm=jobname,\n outFileOption=stdoutFile,\n errFileOption=stderrFile,\n", "issue": "sgegraph only submits the next processing node\n### Summary\r\nthe `SGEGraph` plugin isn't submitting the entire workflow only the next required step.\r\n\r\n### Actual behavior\r\n\r\nWhen calling `workflow.run(plugin='SGEGraph', plugin_args = {'dont_resubmit_completed_jobs': True})` only the next processing node is submitted. subsequent calls are needed to iteratively processes subsequent nodes.\r\n\r\n`workflow.run(plugin='SGE)` works as expected. submitting one job waiting until completion then submitting the next.\r\n\r\n### Expected behavior\r\nI would expect that `workflow.run(plugin='SGEGraph', plugin_args = {'dont_resubmit_completed_jobs': True})` would submit all processing jobs at once with later jobs being held until the earlier required jobs are finished.\r\n\r\n### How to replicate the behavior\r\nI'm new to nipype so I'm not sure how to get sample/example data to provide a minimal example but I think that any of the workflows in the example folder would demonstrate this issue.\r\n\r\n### Platform details:\r\n\r\n<!-- Please run the following code from your shell and place the output between the triple ticks, below.\r\npython -c \"import nipype; from pprint import pprint; pprint(nipype.get_info())\"\r\n-->\r\n\r\n```\r\n200207-13:43:08,275 nipype.utils INFO:\r\n\t No new version available.\r\n{'commit_hash': '792988f19',\r\n 'commit_source': 'installation',\r\n 'networkx_version': '2.4',\r\n 'nibabel_version': '3.0.1',\r\n 'nipype_version': '1.4.1',\r\n 'numpy_version': '1.18.1',\r\n 'pkg_path': '/mnt/isilon/meg_lab/Linux_software/anaconda3_lab/envs/nipype/lib/python3.8/site-packages/nipype',\r\n 'scipy_version': '1.3.2',\r\n 'sys_executable': '/mnt/isilon/meg_lab/Linux_software/anaconda3_lab/envs/nipype/bin/python',\r\n 'sys_platform': 'linux',\r\n 'sys_version': '3.8.1 (default, Jan 8 2020, 22:29:32) \\n[GCC 7.3.0]',\r\n 'traits_version': '5.2.0'}\r\n```\r\n\r\n### Execution environment\r\n\r\nChoose one\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Parallel workflow execution via SGE\n\"\"\"\nimport os\nimport sys\n\nfrom ...interfaces.base import CommandLine\nfrom .base import GraphPluginBase, logger\n\n\ndef node_completed_status(checknode):\n \"\"\"\n A function to determine if a node has previously completed it's work\n :param checknode: The node to check the run status\n :return: boolean value True indicates that the node does not need to be run.\n \"\"\"\n \"\"\" TODO: place this in the base.py file and refactor \"\"\"\n node_state_does_not_require_overwrite = checknode.overwrite is False or (\n checknode.overwrite is None and not checknode._interface.always_run\n )\n hash_exists = False\n try:\n hash_exists, _, _, _ = checknode.hash_exists()\n except Exception:\n hash_exists = False\n return hash_exists and node_state_does_not_require_overwrite\n\n\nclass SGEGraphPlugin(GraphPluginBase):\n \"\"\"Execute using SGE\n\n The plugin_args input to run can be used to control the SGE execution.\n Currently supported options are:\n\n - template : template to use for batch job submission\n - qsub_args : arguments to be prepended to the job execution script in the\n qsub call\n\n \"\"\"\n\n _template = \"\"\"\n#!/bin/bash\n#$ -V\n#$ -S /bin/bash\n\"\"\"\n\n def __init__(self, **kwargs):\n self._qsub_args = \"\"\n self._dont_resubmit_completed_jobs = False\n if \"plugin_args\" in kwargs and kwargs[\"plugin_args\"]:\n plugin_args = kwargs[\"plugin_args\"]\n if \"template\" in plugin_args:\n self._template = plugin_args[\"template\"]\n if os.path.isfile(self._template):\n self._template = open(self._template).read()\n if \"qsub_args\" in plugin_args:\n self._qsub_args = plugin_args[\"qsub_args\"]\n if \"dont_resubmit_completed_jobs\" in plugin_args:\n self._dont_resubmit_completed_jobs = plugin_args[\n \"dont_resubmit_completed_jobs\"\n ]\n super(SGEGraphPlugin, self).__init__(**kwargs)\n\n def _submit_graph(self, pyfiles, dependencies, nodes):\n def make_job_name(jobnumber, nodeslist):\n \"\"\"\n - jobnumber: The index number of the job to create\n - nodeslist: The name of the node being processed\n - return: A string representing this job to be displayed by SGE\n \"\"\"\n job_name = \"j{0}_{1}\".format(jobnumber, nodeslist[jobnumber]._id)\n # Condition job_name to be a valid bash identifier (i.e. - is invalid)\n job_name = job_name.replace(\"-\", \"_\").replace(\".\", \"_\").replace(\":\", \"_\")\n return job_name\n\n batch_dir, _ = os.path.split(pyfiles[0])\n submitjobsfile = os.path.join(batch_dir, \"submit_jobs.sh\")\n\n cache_doneness_per_node = dict()\n if (\n self._dont_resubmit_completed_jobs\n ): # A future parameter for controlling this behavior could be added here\n for idx, pyscript in enumerate(pyfiles):\n node = nodes[idx]\n node_status_done = node_completed_status(node)\n\n # if the node itself claims done, then check to ensure all\n # dependancies are also done\n if node_status_done and idx in dependencies:\n for child_idx in dependencies[idx]:\n if child_idx in cache_doneness_per_node:\n child_status_done = cache_doneness_per_node[child_idx]\n else:\n child_status_done = node_completed_status(nodes[child_idx])\n node_status_done = node_status_done and child_status_done\n\n cache_doneness_per_node[idx] = node_status_done\n\n with open(submitjobsfile, \"wt\") as fp:\n fp.writelines(\"#!/usr/bin/env bash\\n\")\n fp.writelines(\"# Condense format attempted\\n\")\n for idx, pyscript in enumerate(pyfiles):\n node = nodes[idx]\n if cache_doneness_per_node.get(idx, False):\n continue\n else:\n template, qsub_args = self._get_args(\n node, [\"template\", \"qsub_args\"]\n )\n\n batch_dir, name = os.path.split(pyscript)\n name = \".\".join(name.split(\".\")[:-1])\n batchscript = \"\\n\".join(\n (template, \"%s %s\" % (sys.executable, pyscript))\n )\n batchscriptfile = os.path.join(\n batch_dir, \"batchscript_%s.sh\" % name\n )\n\n batchscriptoutfile = batchscriptfile + \".o\"\n batchscripterrfile = batchscriptfile + \".e\"\n\n with open(batchscriptfile, \"wt\") as batchfp:\n batchfp.writelines(batchscript)\n batchfp.close()\n deps = \"\"\n if idx in dependencies:\n values = \" \"\n for jobid in dependencies[idx]:\n # Avoid dependancies of done jobs\n if (\n not self._dont_resubmit_completed_jobs\n or not cache_doneness_per_node[jobid]\n ):\n values += \"${{{0}}},\".format(\n make_job_name(jobid, nodes)\n )\n if (\n values != \" \"\n ): # i.e. if some jobs were added to dependency list\n values = values.rstrip(\",\")\n deps = \"-hold_jid%s\" % values\n jobname = make_job_name(idx, nodes)\n # Do not use default output locations if they are set in self._qsub_args\n stderrFile = \"\"\n if self._qsub_args.count(\"-e \") == 0:\n stderrFile = \"-e {errFile}\".format(errFile=batchscripterrfile)\n stdoutFile = \"\"\n if self._qsub_args.count(\"-o \") == 0:\n stdoutFile = \"-o {outFile}\".format(outFile=batchscriptoutfile)\n full_line = \"{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\\n\".format(\n jobNm=jobname,\n outFileOption=stdoutFile,\n errFileOption=stderrFile,\n extraQSubArgs=qsub_args,\n dependantIndex=deps,\n batchscript=batchscriptfile,\n )\n fp.writelines(full_line)\n cmd = CommandLine(\n \"bash\",\n environ=dict(os.environ),\n resource_monitor=False,\n terminal_output=\"allatonce\",\n )\n cmd.inputs.args = \"%s\" % submitjobsfile\n cmd.run()\n logger.info(\"submitted all jobs to queue\")\n", "path": "nipype/pipeline/plugins/sgegraph.py"}]} | 2,890 | 241 |
gh_patches_debug_36004 | rasdani/github-patches | git_diff | great-expectations__great_expectations-5336 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py]
1 import copy
2 import logging
3 from typing import Any, Dict
4
5 import numpy as np
6 import pandas as pd
7
8 from great_expectations.core.util import (
9 convert_to_json_serializable,
10 get_sql_dialect_floating_point_infinity_value,
11 )
12 from great_expectations.execution_engine import (
13 PandasExecutionEngine,
14 SparkDFExecutionEngine,
15 SqlAlchemyExecutionEngine,
16 )
17 from great_expectations.execution_engine.execution_engine import MetricDomainTypes
18 from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
19 ColumnAggregateMetricProvider,
20 )
21 from great_expectations.expectations.metrics.import_manager import Bucketizer, F, sa
22 from great_expectations.expectations.metrics.metric_provider import metric_value
23
24 logger = logging.getLogger(__name__)
25
26
27 class ColumnHistogram(ColumnAggregateMetricProvider):
28 metric_name = "column.histogram"
29 value_keys = ("bins",)
30
31 @metric_value(engine=PandasExecutionEngine)
32 def _pandas(
33 cls,
34 execution_engine: PandasExecutionEngine,
35 metric_domain_kwargs: Dict,
36 metric_value_kwargs: Dict,
37 metrics: Dict[str, Any],
38 runtime_configuration: Dict,
39 ):
40 df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
41 domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
42 )
43 column = accessor_domain_kwargs["column"]
44 bins = metric_value_kwargs["bins"]
45 column_series: pd.Series = df[column]
46 column_null_elements_cond: pd.Series = column_series.isnull()
47 column_nonnull_elements: pd.Series = column_series[~column_null_elements_cond]
48 hist, bin_edges = np.histogram(column_nonnull_elements, bins, density=False)
49 return list(hist)
50
51 @metric_value(engine=SqlAlchemyExecutionEngine)
52 def _sqlalchemy(
53 cls,
54 execution_engine: SqlAlchemyExecutionEngine,
55 metric_domain_kwargs: Dict,
56 metric_value_kwargs: Dict,
57 metrics: Dict[str, Any],
58 runtime_configuration: Dict,
59 ):
60 """return a list of counts corresponding to bins
61
62 Args:
63 column: the name of the column for which to get the histogram
64 bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
65 """
66 selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
67 domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
68 )
69 column = accessor_domain_kwargs["column"]
70 bins = metric_value_kwargs["bins"]
71
72 case_conditions = []
73 idx = 0
74 if isinstance(bins, np.ndarray):
75 bins = bins.tolist()
76 else:
77 bins = list(bins)
78
79 # If we have an infinite lower bound, don't express that in sql
80 if (
81 bins[0]
82 == get_sql_dialect_floating_point_infinity_value(
83 schema="api_np", negative=True
84 )
85 ) or (
86 bins[0]
87 == get_sql_dialect_floating_point_infinity_value(
88 schema="api_cast", negative=True
89 )
90 ):
91 case_conditions.append(
92 sa.func.sum(
93 sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)
94 ).label(f"bin_{str(idx)}")
95 )
96 idx += 1
97
98 for idx in range(idx, len(bins) - 2):
99 case_conditions.append(
100 sa.func.sum(
101 sa.case(
102 [
103 (
104 sa.and_(
105 bins[idx] <= sa.column(column),
106 sa.column(column) < bins[idx + 1],
107 ),
108 1,
109 )
110 ],
111 else_=0,
112 )
113 ).label(f"bin_{str(idx)}")
114 )
115
116 if (
117 bins[-1]
118 == get_sql_dialect_floating_point_infinity_value(
119 schema="api_np", negative=False
120 )
121 ) or (
122 bins[-1]
123 == get_sql_dialect_floating_point_infinity_value(
124 schema="api_cast", negative=False
125 )
126 ):
127 case_conditions.append(
128 sa.func.sum(
129 sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)
130 ).label(f"bin_{str(len(bins) - 1)}")
131 )
132 else:
133 case_conditions.append(
134 sa.func.sum(
135 sa.case(
136 [
137 (
138 sa.and_(
139 bins[-2] <= sa.column(column),
140 sa.column(column) <= bins[-1],
141 ),
142 1,
143 )
144 ],
145 else_=0,
146 )
147 ).label(f"bin_{str(len(bins) - 1)}")
148 )
149
150 query = (
151 sa.select(case_conditions)
152 .where(
153 sa.column(column) != None,
154 )
155 .select_from(selectable)
156 )
157
158 # Run the data through convert_to_json_serializable to ensure we do not have Decimal types
159 hist = convert_to_json_serializable(
160 list(execution_engine.engine.execute(query).fetchone())
161 )
162 return hist
163
164 @metric_value(engine=SparkDFExecutionEngine)
165 def _spark(
166 cls,
167 execution_engine: SparkDFExecutionEngine,
168 metric_domain_kwargs: Dict,
169 metric_value_kwargs: Dict,
170 metrics: Dict[str, Any],
171 runtime_configuration: Dict,
172 ):
173 df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
174 domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
175 )
176 bins = metric_value_kwargs["bins"]
177 column = metric_domain_kwargs["column"]
178
179 """return a list of counts corresponding to bins"""
180 bins = list(
181 copy.deepcopy(bins)
182 ) # take a copy since we are inserting and popping
183 if bins[0] == -np.inf or bins[0] == -float("inf"):
184 added_min = False
185 bins[0] = -float("inf")
186 else:
187 added_min = True
188 bins.insert(0, -float("inf"))
189
190 if bins[-1] == np.inf or bins[-1] == float("inf"):
191 added_max = False
192 bins[-1] = float("inf")
193 else:
194 added_max = True
195 bins.append(float("inf"))
196
197 temp_column = df.select(column).where(F.col(column).isNotNull())
198 bucketizer = Bucketizer(splits=bins, inputCol=column, outputCol="buckets")
199 bucketed = bucketizer.setHandleInvalid("skip").transform(temp_column)
200
201 # This is painful to do, but: bucketizer cannot handle values outside of a range
202 # (hence adding -/+ infinity above)
203
204 # Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound
205 # for all but the last bin
206
207 # But, since the last bin in our case will often be +infinity, we need to
208 # find the number of values exactly equal to the upper bound to add those
209
210 # We'll try for an optimization by asking for it at the same time
211 if added_max:
212 upper_bound_count = (
213 temp_column.select(column).filter(F.col(column) == bins[-2]).count()
214 )
215 else:
216 upper_bound_count = 0
217
218 hist_rows = bucketed.groupBy("buckets").count().collect()
219 # Spark only returns buckets that have nonzero counts.
220 hist = [0] * (len(bins) - 1)
221 for row in hist_rows:
222 hist[int(row["buckets"])] = row["count"]
223
224 hist[-2] += upper_bound_count
225
226 if added_min:
227 below_bins = hist.pop(0)
228 bins.pop(0)
229 if below_bins > 0:
230 logger.warning("Discarding histogram values below lowest bin.")
231
232 if added_max:
233 above_bins = hist.pop(-1)
234 bins.pop(-1)
235 if above_bins > 0:
236 logger.warning("Discarding histogram values above highest bin.")
237
238 return hist
239
[end of great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py
@@ -95,15 +95,19 @@
)
idx += 1
+ negative_boundary: float
+ positive_boundary: float
for idx in range(idx, len(bins) - 2):
+ negative_boundary = float(bins[idx])
+ positive_boundary = float(bins[idx + 1])
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
- bins[idx] <= sa.column(column),
- sa.column(column) < bins[idx + 1],
+ negative_boundary <= sa.column(column),
+ sa.column(column) < positive_boundary,
),
1,
)
@@ -124,20 +128,23 @@
schema="api_cast", negative=False
)
):
+ negative_boundary = float(bins[-2])
case_conditions.append(
sa.func.sum(
- sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)
+ sa.case([(negative_boundary <= sa.column(column), 1)], else_=0)
).label(f"bin_{str(len(bins) - 1)}")
)
else:
+ negative_boundary = float(bins[-2])
+ positive_boundary = float(bins[-1])
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
- bins[-2] <= sa.column(column),
- sa.column(column) <= bins[-1],
+ negative_boundary <= sa.column(column),
+ sa.column(column) <= positive_boundary,
),
1,
)
| {"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py\n@@ -95,15 +95,19 @@\n )\n idx += 1\n \n+ negative_boundary: float\n+ positive_boundary: float\n for idx in range(idx, len(bins) - 2):\n+ negative_boundary = float(bins[idx])\n+ positive_boundary = float(bins[idx + 1])\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n- bins[idx] <= sa.column(column),\n- sa.column(column) < bins[idx + 1],\n+ negative_boundary <= sa.column(column),\n+ sa.column(column) < positive_boundary,\n ),\n 1,\n )\n@@ -124,20 +128,23 @@\n schema=\"api_cast\", negative=False\n )\n ):\n+ negative_boundary = float(bins[-2])\n case_conditions.append(\n sa.func.sum(\n- sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)\n+ sa.case([(negative_boundary <= sa.column(column), 1)], else_=0)\n ).label(f\"bin_{str(len(bins) - 1)}\")\n )\n else:\n+ negative_boundary = float(bins[-2])\n+ positive_boundary = float(bins[-1])\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n- bins[-2] <= sa.column(column),\n- sa.column(column) <= bins[-1],\n+ negative_boundary <= sa.column(column),\n+ sa.column(column) <= positive_boundary,\n ),\n 1,\n )\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import copy\nimport logging\nfrom typing import Any, Dict\n\nimport numpy as np\nimport pandas as pd\n\nfrom great_expectations.core.util import (\n convert_to_json_serializable,\n get_sql_dialect_floating_point_infinity_value,\n)\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.execution_engine.execution_engine import MetricDomainTypes\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n)\nfrom great_expectations.expectations.metrics.import_manager import Bucketizer, F, sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\n\nlogger = logging.getLogger(__name__)\n\n\nclass ColumnHistogram(ColumnAggregateMetricProvider):\n metric_name = \"column.histogram\"\n value_keys = (\"bins\",)\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n column = accessor_domain_kwargs[\"column\"]\n bins = metric_value_kwargs[\"bins\"]\n column_series: pd.Series = df[column]\n column_null_elements_cond: pd.Series = column_series.isnull()\n column_nonnull_elements: pd.Series = column_series[~column_null_elements_cond]\n hist, bin_edges = np.histogram(column_nonnull_elements, bins, density=False)\n return list(hist)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n \"\"\"return a list of counts corresponding to bins\n\n Args:\n column: the name of the column for which to get the histogram\n bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching\n \"\"\"\n selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n column = accessor_domain_kwargs[\"column\"]\n bins = metric_value_kwargs[\"bins\"]\n\n case_conditions = []\n idx = 0\n if isinstance(bins, np.ndarray):\n bins = bins.tolist()\n else:\n bins = list(bins)\n\n # If we have an infinite lower bound, don't express that in sql\n if (\n bins[0]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_np\", negative=True\n )\n ) or (\n bins[0]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_cast\", negative=True\n )\n ):\n case_conditions.append(\n sa.func.sum(\n sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)\n ).label(f\"bin_{str(idx)}\")\n )\n idx += 1\n\n for idx in range(idx, len(bins) - 2):\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n bins[idx] <= sa.column(column),\n sa.column(column) < bins[idx + 1],\n ),\n 1,\n )\n ],\n else_=0,\n )\n ).label(f\"bin_{str(idx)}\")\n )\n\n if (\n bins[-1]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_np\", negative=False\n )\n ) or (\n bins[-1]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_cast\", negative=False\n )\n ):\n case_conditions.append(\n sa.func.sum(\n sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)\n ).label(f\"bin_{str(len(bins) - 1)}\")\n )\n else:\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n bins[-2] <= sa.column(column),\n sa.column(column) <= bins[-1],\n ),\n 1,\n )\n ],\n else_=0,\n )\n ).label(f\"bin_{str(len(bins) - 1)}\")\n )\n\n query = (\n sa.select(case_conditions)\n .where(\n sa.column(column) != None,\n )\n .select_from(selectable)\n )\n\n # Run the data through convert_to_json_serializable to ensure we do not have Decimal types\n hist = convert_to_json_serializable(\n list(execution_engine.engine.execute(query).fetchone())\n )\n return hist\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n bins = metric_value_kwargs[\"bins\"]\n column = metric_domain_kwargs[\"column\"]\n\n \"\"\"return a list of counts corresponding to bins\"\"\"\n bins = list(\n copy.deepcopy(bins)\n ) # take a copy since we are inserting and popping\n if bins[0] == -np.inf or bins[0] == -float(\"inf\"):\n added_min = False\n bins[0] = -float(\"inf\")\n else:\n added_min = True\n bins.insert(0, -float(\"inf\"))\n\n if bins[-1] == np.inf or bins[-1] == float(\"inf\"):\n added_max = False\n bins[-1] = float(\"inf\")\n else:\n added_max = True\n bins.append(float(\"inf\"))\n\n temp_column = df.select(column).where(F.col(column).isNotNull())\n bucketizer = Bucketizer(splits=bins, inputCol=column, outputCol=\"buckets\")\n bucketed = bucketizer.setHandleInvalid(\"skip\").transform(temp_column)\n\n # This is painful to do, but: bucketizer cannot handle values outside of a range\n # (hence adding -/+ infinity above)\n\n # Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound\n # for all but the last bin\n\n # But, since the last bin in our case will often be +infinity, we need to\n # find the number of values exactly equal to the upper bound to add those\n\n # We'll try for an optimization by asking for it at the same time\n if added_max:\n upper_bound_count = (\n temp_column.select(column).filter(F.col(column) == bins[-2]).count()\n )\n else:\n upper_bound_count = 0\n\n hist_rows = bucketed.groupBy(\"buckets\").count().collect()\n # Spark only returns buckets that have nonzero counts.\n hist = [0] * (len(bins) - 1)\n for row in hist_rows:\n hist[int(row[\"buckets\"])] = row[\"count\"]\n\n hist[-2] += upper_bound_count\n\n if added_min:\n below_bins = hist.pop(0)\n bins.pop(0)\n if below_bins > 0:\n logger.warning(\"Discarding histogram values below lowest bin.\")\n\n if added_max:\n above_bins = hist.pop(-1)\n bins.pop(-1)\n if above_bins > 0:\n logger.warning(\"Discarding histogram values above highest bin.\")\n\n return hist\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py"}]} | 2,889 | 429 |
gh_patches_debug_33625 | rasdani/github-patches | git_diff | robocorp__rpaframework-971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RPA.Browser.Selenium fails in cloud workers if download=TRUE
2023-05-08 12:47:36: ==============================================================================
2023-05-08 12:47:36: Tasks
2023-05-08 12:47:36: ==============================================================================
2023-05-08 12:47:46: Minimal task | FAIL |
2023-05-08 12:47:46: BrowserNotFoundError: Failed to start a browser:
2023-05-08 12:47:46: - Chrome: Message: session not created: This version of ChromeDriver only supports Chrome version 113
2023-05-08 12:47:46: Current browser version is 105.0.5195.102 with binary path /usr/bin/chromium-browser
2023-05-08 12:47:46: Stacktrace:
2023-05-08 12:47:46: #0 0x55e4a398c133 <unknown>
2023-05-08 12:47:46: #1 0x55e4a36c0966 <unknown>
2023-05-08 12:47:46: #2 0x55e4a36ee5ec <unknown>
2023-05-08 12:47:46: #3 0x55e4a36e98da <unknown>
2023-05-08 12:47:46: #4 0x55e4a36e607b <unknown>
2023-05-08 12:47:46: #5 0x55e4a37259ad <unknown>
2023-05-08 12:47:46: #6 0x55e4a372518f <unknown>
2023-05-08 12:47:46: #7 0x55e4a371c9a3 <unknown>
2023-05-08 12:47:46: #8 0x55e4a36f146a <unknown>
2023-05-08 12:47:46: #9 0x55e4a36f255e <unknown>
2023-05-08 12:47:46: #10 0x55e4a394bcae <unknown>
2023-05-08 12:47:46: #11 0x55e4a394f8fe <unknown>
2023-05-08 12:47:46: #12 0x55e4a3958f20 <unknown>
2023-05-08 12:47:46: #13 0x55e4a3950923 <unknown>
2023-05-08 12:47:46: [ Message content over the limit has been removed. ]
2023-05-08 12:47:46: #0 0x55c657673273 <unknown>
2023-05-08 12:47:46: #1 0x55c65738e7a1 <unknown>
2023-05-08 12:47:46: #2 0x55c6573babeb <unknown>
2023-05-08 12:47:46: #3 0x55c6573b5fdc <unknown>
2023-05-08 12:47:46: #4 0x55c6573f7695 <unknown>
2023-05-08 12:47:46: #5 0x55c6573ee723 <unknown>
2023-05-08 12:47:46: #6 0x55c6573c17d1 <unknown>
2023-05-08 12:47:46: #7 0x55c6573c2a0e <unknown>
2023-05-08 12:47:46: #8 0x55c657641390 <unknown>
2023-05-08 12:47:46: #9 0x55c657643a9e <unknown>
2023-05-08 12:47:46: #10 0x55c6576434b9 <unknown>
2023-05-08 12:47:46: #11 0x55c6576441a5 <unknown>
2023-05-08 12:47:46: #12 0x55c65764ae0b <unknown>
2023-05-08 12:47:46: #13 0x55c65764456e <unknown>
2023-05-08 12:47:46: #14 0x55c657625373 <unknown>
2023-05-08 12:47:46: #15 0x55c65765fc58 <unknown>
2023-05-08 12:47:46: #16 0x55c65765fd94 <unknown>
2023-05-08 12:47:46: #17 0x55c65766d5c6 <unknown>
2023-05-08 12:47:46: #18 0x7f8de95b36db start_thread
2023-05-08 12:47:46: ------------------------------------------------------------------------------
2023-05-08 12:47:46: Tasks | FAIL |
2023-05-08 12:47:46: 1 task, 0 passed, 1 failed
2023-05-08 12:47:46: ==============================================================================
</issue>
<code>
[start of packages/core/src/RPA/core/webdriver.py]
1 import contextlib
2 import logging
3 import os
4 import platform
5 import stat
6 from pathlib import Path
7 from typing import List, Optional
8
9 import requests
10 from requests import Response
11 from selenium import webdriver
12 from selenium.webdriver.common.service import Service
13 from selenium.webdriver.remote.webdriver import WebDriver
14 from webdriver_manager.chrome import ChromeDriverManager
15 from webdriver_manager.core.download_manager import WDMDownloadManager
16 from webdriver_manager.core.http import WDMHttpClient
17 from webdriver_manager.core.manager import DriverManager
18 from webdriver_manager.core.utils import os_name as get_os_name
19 from webdriver_manager.firefox import GeckoDriverManager
20 from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager
21 from webdriver_manager.opera import OperaDriverManager
22
23 from RPA.core.robocorp import robocorp_home
24
25
26 LOGGER = logging.getLogger(__name__)
27
28 DRIVER_ROOT = robocorp_home() / "webdrivers"
29 AVAILABLE_DRIVERS = {
30 # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.
31 "chrome": ChromeDriverManager,
32 "firefox": GeckoDriverManager,
33 "gecko": GeckoDriverManager,
34 "mozilla": GeckoDriverManager,
35 # NOTE: Selenium 4 dropped support for Opera.
36 # (https://github.com/SeleniumHQ/selenium/issues/10835)
37 "opera": OperaDriverManager,
38 # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.
39 "edge": EdgeChromiumDriverManager,
40 "chromiumedge": EdgeChromiumDriverManager,
41 # NOTE: IE is discontinued and not supported/encouraged anymore.
42 "ie": IEDriverManager,
43 }
44 # Available `WebDriver` classes in Selenium.
45 SUPPORTED_BROWSERS = dict(
46 {name: name.capitalize() for name in AVAILABLE_DRIVERS},
47 **{"chromiumedge": "ChromiumEdge"},
48 )
49 _DRIVER_PREFERENCE = {
50 "Windows": ["Chrome", "Firefox", "Edge"],
51 "Linux": ["Chrome", "Firefox", "Edge"],
52 "Darwin": ["Chrome", "Firefox", "Edge", "Safari"],
53 "default": ["Chrome", "Firefox"],
54 }
55
56
57 def _get_browser_order_from_env() -> Optional[List[str]]:
58 browsers: str = os.getenv("RPA_SELENIUM_BROWSER_ORDER", "")
59 if browsers:
60 return [browser.strip() for browser in browsers.split(sep=",")]
61
62 return None # meaning there's no env var to control the order
63
64
65 def get_browser_order() -> List[str]:
66 """Get a list of preferred browsers based on the environment variable
67 `RPA_SELENIUM_BROWSER_ORDER` if set.
68
69 The OS dictates the order if no such env var is set.
70 """
71 browsers: Optional[List[str]] = _get_browser_order_from_env()
72 if browsers:
73 return browsers
74
75 return _DRIVER_PREFERENCE.get(platform.system(), _DRIVER_PREFERENCE["default"])
76
77
78 class Downloader(WDMHttpClient):
79
80 """Custom downloader which disables download progress reporting."""
81
82 def get(self, url, **kwargs) -> Response:
83 resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)
84 self.validate_response(resp)
85 return resp
86
87
88 @contextlib.contextmanager
89 def suppress_logging():
90 """Suppress webdriver-manager logging."""
91 wdm_log = "WDM_LOG"
92 original_value = os.getenv(wdm_log, "")
93 try:
94 os.environ[wdm_log] = str(logging.NOTSET)
95 yield
96 finally:
97 os.environ[wdm_log] = original_value
98
99
100 def start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:
101 """Start a webdriver with the given options."""
102 browser = browser.strip()
103 webdriver_factory = getattr(webdriver, browser, None)
104 if not webdriver_factory:
105 raise ValueError(f"Unsupported Selenium browser: {browser}")
106
107 # NOTE: It is recommended to pass a `service` rather than deprecated `options`.
108 driver = webdriver_factory(service=service, **options)
109 return driver
110
111
112 def _to_manager(browser: str, *, root: Path) -> DriverManager:
113 browser = browser.strip()
114 manager_factory = AVAILABLE_DRIVERS.get(browser.lower())
115 if not manager_factory:
116 raise ValueError(
117 f"Unsupported browser {browser!r} for webdriver download!"
118 f" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})"
119 )
120
121 downloader = Downloader()
122 download_manager = WDMDownloadManager(downloader)
123 manager = manager_factory(path=str(root), download_manager=download_manager)
124 return manager
125
126
127 def _set_executable(path: str) -> None:
128 st = os.stat(path)
129 os.chmod(
130 path,
131 st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,
132 )
133
134
135 def download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:
136 """Download a webdriver binary for the given browser and return the path to it."""
137 manager = _to_manager(browser, root=root)
138 driver = manager.driver
139 resolved_os = getattr(driver, "os_type", driver.get_os_type())
140 os_name = get_os_name()
141 if os_name.lower() not in resolved_os.lower():
142 LOGGER.warning(
143 "Attempting to download incompatible driver for OS %r on OS %r! Skip",
144 resolved_os,
145 os_name,
146 )
147 return None # incompatible driver download attempt
148
149 with suppress_logging():
150 path: str = manager.install()
151 if platform.system() != "Windows":
152 _set_executable(path)
153 LOGGER.info("Downloaded webdriver to: %s", path)
154 return path
155
[end of packages/core/src/RPA/core/webdriver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/core/src/RPA/core/webdriver.py b/packages/core/src/RPA/core/webdriver.py
--- a/packages/core/src/RPA/core/webdriver.py
+++ b/packages/core/src/RPA/core/webdriver.py
@@ -1,4 +1,5 @@
import contextlib
+import functools
import logging
import os
import platform
@@ -15,6 +16,7 @@
from webdriver_manager.core.download_manager import WDMDownloadManager
from webdriver_manager.core.http import WDMHttpClient
from webdriver_manager.core.manager import DriverManager
+from webdriver_manager.core.utils import ChromeType, get_browser_version_from_os
from webdriver_manager.core.utils import os_name as get_os_name
from webdriver_manager.firefox import GeckoDriverManager
from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager
@@ -109,6 +111,14 @@
return driver
+def _is_chromium() -> bool:
+ """Detects if Chromium is used instead of Chrome no matter the platform."""
+ is_browser = lambda browser_type: bool( # noqa: E731
+ get_browser_version_from_os(browser_type)
+ )
+ return not is_browser(ChromeType.GOOGLE) and is_browser(ChromeType.CHROMIUM)
+
+
def _to_manager(browser: str, *, root: Path) -> DriverManager:
browser = browser.strip()
manager_factory = AVAILABLE_DRIVERS.get(browser.lower())
@@ -118,6 +128,10 @@
f" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})"
)
+ if manager_factory == ChromeDriverManager and _is_chromium():
+ manager_factory = functools.partial(
+ manager_factory, chrome_type=ChromeType.CHROMIUM
+ )
downloader = Downloader()
download_manager = WDMDownloadManager(downloader)
manager = manager_factory(path=str(root), download_manager=download_manager)
| {"golden_diff": "diff --git a/packages/core/src/RPA/core/webdriver.py b/packages/core/src/RPA/core/webdriver.py\n--- a/packages/core/src/RPA/core/webdriver.py\n+++ b/packages/core/src/RPA/core/webdriver.py\n@@ -1,4 +1,5 @@\n import contextlib\n+import functools\n import logging\n import os\n import platform\n@@ -15,6 +16,7 @@\n from webdriver_manager.core.download_manager import WDMDownloadManager\n from webdriver_manager.core.http import WDMHttpClient\n from webdriver_manager.core.manager import DriverManager\n+from webdriver_manager.core.utils import ChromeType, get_browser_version_from_os\n from webdriver_manager.core.utils import os_name as get_os_name\n from webdriver_manager.firefox import GeckoDriverManager\n from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager\n@@ -109,6 +111,14 @@\n return driver\n \n \n+def _is_chromium() -> bool:\n+ \"\"\"Detects if Chromium is used instead of Chrome no matter the platform.\"\"\"\n+ is_browser = lambda browser_type: bool( # noqa: E731\n+ get_browser_version_from_os(browser_type)\n+ )\n+ return not is_browser(ChromeType.GOOGLE) and is_browser(ChromeType.CHROMIUM)\n+\n+\n def _to_manager(browser: str, *, root: Path) -> DriverManager:\n browser = browser.strip()\n manager_factory = AVAILABLE_DRIVERS.get(browser.lower())\n@@ -118,6 +128,10 @@\n f\" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})\"\n )\n \n+ if manager_factory == ChromeDriverManager and _is_chromium():\n+ manager_factory = functools.partial(\n+ manager_factory, chrome_type=ChromeType.CHROMIUM\n+ )\n downloader = Downloader()\n download_manager = WDMDownloadManager(downloader)\n manager = manager_factory(path=str(root), download_manager=download_manager)\n", "issue": "RPA.Browser.Selenium fails in cloud workers if download=TRUE\n2023-05-08 12:47:36: ==============================================================================\r\n2023-05-08 12:47:36: Tasks \r\n2023-05-08 12:47:36: ==============================================================================\r\n2023-05-08 12:47:46: Minimal task | FAIL |\r\n2023-05-08 12:47:46: BrowserNotFoundError: Failed to start a browser:\r\n2023-05-08 12:47:46: - Chrome: Message: session not created: This version of ChromeDriver only supports Chrome version 113\r\n2023-05-08 12:47:46: Current browser version is 105.0.5195.102 with binary path /usr/bin/chromium-browser\r\n2023-05-08 12:47:46: Stacktrace:\r\n2023-05-08 12:47:46: #0 0x55e4a398c133 <unknown>\r\n2023-05-08 12:47:46: #1 0x55e4a36c0966 <unknown>\r\n2023-05-08 12:47:46: #2 0x55e4a36ee5ec <unknown>\r\n2023-05-08 12:47:46: #3 0x55e4a36e98da <unknown>\r\n2023-05-08 12:47:46: #4 0x55e4a36e607b <unknown>\r\n2023-05-08 12:47:46: #5 0x55e4a37259ad <unknown>\r\n2023-05-08 12:47:46: #6 0x55e4a372518f <unknown>\r\n2023-05-08 12:47:46: #7 0x55e4a371c9a3 <unknown>\r\n2023-05-08 12:47:46: #8 0x55e4a36f146a <unknown>\r\n2023-05-08 12:47:46: #9 0x55e4a36f255e <unknown>\r\n2023-05-08 12:47:46: #10 0x55e4a394bcae <unknown>\r\n2023-05-08 12:47:46: #11 0x55e4a394f8fe <unknown>\r\n2023-05-08 12:47:46: #12 0x55e4a3958f20 <unknown>\r\n2023-05-08 12:47:46: #13 0x55e4a3950923 <unknown>\r\n2023-05-08 12:47:46: [ Message content over the limit has been removed. ]\r\n2023-05-08 12:47:46: #0 0x55c657673273 <unknown>\r\n2023-05-08 12:47:46: #1 0x55c65738e7a1 <unknown>\r\n2023-05-08 12:47:46: #2 0x55c6573babeb <unknown>\r\n2023-05-08 12:47:46: #3 0x55c6573b5fdc <unknown>\r\n2023-05-08 12:47:46: #4 0x55c6573f7695 <unknown>\r\n2023-05-08 12:47:46: #5 0x55c6573ee723 <unknown>\r\n2023-05-08 12:47:46: #6 0x55c6573c17d1 <unknown>\r\n2023-05-08 12:47:46: #7 0x55c6573c2a0e <unknown>\r\n2023-05-08 12:47:46: #8 0x55c657641390 <unknown>\r\n2023-05-08 12:47:46: #9 0x55c657643a9e <unknown>\r\n2023-05-08 12:47:46: #10 0x55c6576434b9 <unknown>\r\n2023-05-08 12:47:46: #11 0x55c6576441a5 <unknown>\r\n2023-05-08 12:47:46: #12 0x55c65764ae0b <unknown>\r\n2023-05-08 12:47:46: #13 0x55c65764456e <unknown>\r\n2023-05-08 12:47:46: #14 0x55c657625373 <unknown>\r\n2023-05-08 12:47:46: #15 0x55c65765fc58 <unknown>\r\n2023-05-08 12:47:46: #16 0x55c65765fd94 <unknown>\r\n2023-05-08 12:47:46: #17 0x55c65766d5c6 <unknown>\r\n2023-05-08 12:47:46: #18 0x7f8de95b36db start_thread\r\n2023-05-08 12:47:46: ------------------------------------------------------------------------------\r\n2023-05-08 12:47:46: Tasks | FAIL |\r\n2023-05-08 12:47:46: 1 task, 0 passed, 1 failed\r\n2023-05-08 12:47:46: ==============================================================================\n", "before_files": [{"content": "import contextlib\nimport logging\nimport os\nimport platform\nimport stat\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport requests\nfrom requests import Response\nfrom selenium import webdriver\nfrom selenium.webdriver.common.service import Service\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.core.download_manager import WDMDownloadManager\nfrom webdriver_manager.core.http import WDMHttpClient\nfrom webdriver_manager.core.manager import DriverManager\nfrom webdriver_manager.core.utils import os_name as get_os_name\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager\nfrom webdriver_manager.opera import OperaDriverManager\n\nfrom RPA.core.robocorp import robocorp_home\n\n\nLOGGER = logging.getLogger(__name__)\n\nDRIVER_ROOT = robocorp_home() / \"webdrivers\"\nAVAILABLE_DRIVERS = {\n # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.\n \"chrome\": ChromeDriverManager,\n \"firefox\": GeckoDriverManager,\n \"gecko\": GeckoDriverManager,\n \"mozilla\": GeckoDriverManager,\n # NOTE: Selenium 4 dropped support for Opera.\n # (https://github.com/SeleniumHQ/selenium/issues/10835)\n \"opera\": OperaDriverManager,\n # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.\n \"edge\": EdgeChromiumDriverManager,\n \"chromiumedge\": EdgeChromiumDriverManager,\n # NOTE: IE is discontinued and not supported/encouraged anymore.\n \"ie\": IEDriverManager,\n}\n# Available `WebDriver` classes in Selenium.\nSUPPORTED_BROWSERS = dict(\n {name: name.capitalize() for name in AVAILABLE_DRIVERS},\n **{\"chromiumedge\": \"ChromiumEdge\"},\n)\n_DRIVER_PREFERENCE = {\n \"Windows\": [\"Chrome\", \"Firefox\", \"Edge\"],\n \"Linux\": [\"Chrome\", \"Firefox\", \"Edge\"],\n \"Darwin\": [\"Chrome\", \"Firefox\", \"Edge\", \"Safari\"],\n \"default\": [\"Chrome\", \"Firefox\"],\n}\n\n\ndef _get_browser_order_from_env() -> Optional[List[str]]:\n browsers: str = os.getenv(\"RPA_SELENIUM_BROWSER_ORDER\", \"\")\n if browsers:\n return [browser.strip() for browser in browsers.split(sep=\",\")]\n\n return None # meaning there's no env var to control the order\n\n\ndef get_browser_order() -> List[str]:\n \"\"\"Get a list of preferred browsers based on the environment variable\n `RPA_SELENIUM_BROWSER_ORDER` if set.\n\n The OS dictates the order if no such env var is set.\n \"\"\"\n browsers: Optional[List[str]] = _get_browser_order_from_env()\n if browsers:\n return browsers\n\n return _DRIVER_PREFERENCE.get(platform.system(), _DRIVER_PREFERENCE[\"default\"])\n\n\nclass Downloader(WDMHttpClient):\n\n \"\"\"Custom downloader which disables download progress reporting.\"\"\"\n\n def get(self, url, **kwargs) -> Response:\n resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)\n self.validate_response(resp)\n return resp\n\n\[email protected]\ndef suppress_logging():\n \"\"\"Suppress webdriver-manager logging.\"\"\"\n wdm_log = \"WDM_LOG\"\n original_value = os.getenv(wdm_log, \"\")\n try:\n os.environ[wdm_log] = str(logging.NOTSET)\n yield\n finally:\n os.environ[wdm_log] = original_value\n\n\ndef start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:\n \"\"\"Start a webdriver with the given options.\"\"\"\n browser = browser.strip()\n webdriver_factory = getattr(webdriver, browser, None)\n if not webdriver_factory:\n raise ValueError(f\"Unsupported Selenium browser: {browser}\")\n\n # NOTE: It is recommended to pass a `service` rather than deprecated `options`.\n driver = webdriver_factory(service=service, **options)\n return driver\n\n\ndef _to_manager(browser: str, *, root: Path) -> DriverManager:\n browser = browser.strip()\n manager_factory = AVAILABLE_DRIVERS.get(browser.lower())\n if not manager_factory:\n raise ValueError(\n f\"Unsupported browser {browser!r} for webdriver download!\"\n f\" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})\"\n )\n\n downloader = Downloader()\n download_manager = WDMDownloadManager(downloader)\n manager = manager_factory(path=str(root), download_manager=download_manager)\n return manager\n\n\ndef _set_executable(path: str) -> None:\n st = os.stat(path)\n os.chmod(\n path,\n st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,\n )\n\n\ndef download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:\n \"\"\"Download a webdriver binary for the given browser and return the path to it.\"\"\"\n manager = _to_manager(browser, root=root)\n driver = manager.driver\n resolved_os = getattr(driver, \"os_type\", driver.get_os_type())\n os_name = get_os_name()\n if os_name.lower() not in resolved_os.lower():\n LOGGER.warning(\n \"Attempting to download incompatible driver for OS %r on OS %r! Skip\",\n resolved_os,\n os_name,\n )\n return None # incompatible driver download attempt\n\n with suppress_logging():\n path: str = manager.install()\n if platform.system() != \"Windows\":\n _set_executable(path)\n LOGGER.info(\"Downloaded webdriver to: %s\", path)\n return path\n", "path": "packages/core/src/RPA/core/webdriver.py"}]} | 3,863 | 420 |
gh_patches_debug_63280 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1113 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rust hook requires `--path` attribute
Cargo has changed how packages get installed and requires an extra `--path <destination>` attribute.
Symptom:
```
[INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt.
[INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default')
Return code: 101
Expected return code: 0
Output: (none)
Errors:
error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package.
```
I guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87
Do we want to make pre-commit compatible with multiple versions of cargo or just the latest one?
/cc @asottile @chriskuehl
</issue>
<code>
[start of pre_commit/languages/rust.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os.path
5
6 import toml
7
8 import pre_commit.constants as C
9 from pre_commit.envcontext import envcontext
10 from pre_commit.envcontext import Var
11 from pre_commit.languages import helpers
12 from pre_commit.util import clean_path_on_failure
13 from pre_commit.util import cmd_output
14
15
16 ENVIRONMENT_DIR = 'rustenv'
17 get_default_version = helpers.basic_get_default_version
18 healthy = helpers.basic_healthy
19
20
21 def get_env_patch(target_dir):
22 return (
23 (
24 'PATH',
25 (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),
26 ),
27 )
28
29
30 @contextlib.contextmanager
31 def in_env(prefix):
32 target_dir = prefix.path(
33 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
34 )
35 with envcontext(get_env_patch(target_dir)):
36 yield
37
38
39 def _add_dependencies(cargo_toml_path, additional_dependencies):
40 with open(cargo_toml_path, 'r+') as f:
41 cargo_toml = toml.load(f)
42 cargo_toml.setdefault('dependencies', {})
43 for dep in additional_dependencies:
44 name, _, spec = dep.partition(':')
45 cargo_toml['dependencies'][name] = spec or '*'
46 f.seek(0)
47 toml.dump(cargo_toml, f)
48 f.truncate()
49
50
51 def install_environment(prefix, version, additional_dependencies):
52 helpers.assert_version_default('rust', version)
53 directory = prefix.path(
54 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
55 )
56
57 # There are two cases where we might want to specify more dependencies:
58 # as dependencies for the library being built, and as binary packages
59 # to be `cargo install`'d.
60 #
61 # Unlike e.g. Python, if we just `cargo install` a library, it won't be
62 # used for compilation. And if we add a crate providing a binary to the
63 # `Cargo.toml`, the binary won't be built.
64 #
65 # Because of this, we allow specifying "cli" dependencies by prefixing
66 # with 'cli:'.
67 cli_deps = {
68 dep for dep in additional_dependencies if dep.startswith('cli:')
69 }
70 lib_deps = set(additional_dependencies) - cli_deps
71
72 if len(lib_deps) > 0:
73 _add_dependencies(prefix.path('Cargo.toml'), lib_deps)
74
75 with clean_path_on_failure(directory):
76 packages_to_install = {()}
77 for cli_dep in cli_deps:
78 cli_dep = cli_dep[len('cli:'):]
79 package, _, version = cli_dep.partition(':')
80 if version != '':
81 packages_to_install.add((package, '--version', version))
82 else:
83 packages_to_install.add((package,))
84
85 for package in packages_to_install:
86 cmd_output(
87 'cargo', 'install', '--bins', '--root', directory, *package,
88 cwd=prefix.prefix_dir
89 )
90
91
92 def run_hook(hook, file_args):
93 with in_env(hook.prefix):
94 return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)
95
[end of pre_commit/languages/rust.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py
--- a/pre_commit/languages/rust.py
+++ b/pre_commit/languages/rust.py
@@ -73,7 +73,7 @@
_add_dependencies(prefix.path('Cargo.toml'), lib_deps)
with clean_path_on_failure(directory):
- packages_to_install = {()}
+ packages_to_install = {('--path', '.')}
for cli_dep in cli_deps:
cli_dep = cli_dep[len('cli:'):]
package, _, version = cli_dep.partition(':')
| {"golden_diff": "diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py\n--- a/pre_commit/languages/rust.py\n+++ b/pre_commit/languages/rust.py\n@@ -73,7 +73,7 @@\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n \n with clean_path_on_failure(directory):\n- packages_to_install = {()}\n+ packages_to_install = {('--path', '.')}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n", "issue": "rust hook requires `--path` attribute\nCargo has changed how packages get installed and requires an extra `--path <destination>` attribute.\r\n\r\nSymptom:\r\n```\r\n[INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt.\r\n[INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default')\r\nReturn code: 101\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors: \r\n error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package.\r\n```\r\n\r\nI guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87\r\n\r\nDo we want to make pre-commit compatible with multiple versions of cargo or just the latest one?\r\n\r\n/cc @asottile @chriskuehl \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os.path\n\nimport toml\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'rustenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(target_dir):\n return (\n (\n 'PATH',\n (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),\n ),\n )\n\n\[email protected]\ndef in_env(prefix):\n target_dir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(target_dir)):\n yield\n\n\ndef _add_dependencies(cargo_toml_path, additional_dependencies):\n with open(cargo_toml_path, 'r+') as f:\n cargo_toml = toml.load(f)\n cargo_toml.setdefault('dependencies', {})\n for dep in additional_dependencies:\n name, _, spec = dep.partition(':')\n cargo_toml['dependencies'][name] = spec or '*'\n f.seek(0)\n toml.dump(cargo_toml, f)\n f.truncate()\n\n\ndef install_environment(prefix, version, additional_dependencies):\n helpers.assert_version_default('rust', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # There are two cases where we might want to specify more dependencies:\n # as dependencies for the library being built, and as binary packages\n # to be `cargo install`'d.\n #\n # Unlike e.g. Python, if we just `cargo install` a library, it won't be\n # used for compilation. And if we add a crate providing a binary to the\n # `Cargo.toml`, the binary won't be built.\n #\n # Because of this, we allow specifying \"cli\" dependencies by prefixing\n # with 'cli:'.\n cli_deps = {\n dep for dep in additional_dependencies if dep.startswith('cli:')\n }\n lib_deps = set(additional_dependencies) - cli_deps\n\n if len(lib_deps) > 0:\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n\n with clean_path_on_failure(directory):\n packages_to_install = {()}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n if version != '':\n packages_to_install.add((package, '--version', version))\n else:\n packages_to_install.add((package,))\n\n for package in packages_to_install:\n cmd_output(\n 'cargo', 'install', '--bins', '--root', directory, *package,\n cwd=prefix.prefix_dir\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/rust.py"}]} | 1,767 | 125 |
gh_patches_debug_17363 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-4009 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jinja2: Handle non-string template names as span resources
Jinja2 template names may not be strings, but need to be serialized as strings prior to being set as a span resource. Problem areas can be found in `ddtrace/contrib/jinja2/patch.py` on lines 63, 81 and 97 where the value from Jinja2 is trusted to be a string, but may not be.
https://github.com/DataDog/dd-trace-py/blob/fb8dfa2f33fff37d21df9728d8386c0260df9744/ddtrace/contrib/jinja2/patch.py#L51-L100
Here is an example of the exception we are seeing when `span.finish` is called:
```
File "/opt/venv/lib/python3.8/site-packages/ddtrace/span.py", line 237, in finish
cb(self)
File "/opt/venv/lib/python3.8/site-packages/ddtrace/tracer.py", line 712, in _on_span_finish
p.on_span_finish(span)
File "/opt/venv/lib/python3.8/site-packages/ddtrace/internal/processor/trace.py", line 208, in on_span_finish
self._writer.write(spans)
File "/opt/venv/lib/python3.8/site-packages/ddtrace/internal/writer.py", line 518, in write
self._encoder.put(spans)
File "ddtrace/internal/_encoding.pyx", line 456, in ddtrace.internal._encoding.MsgpackEncoderBase.put
File "ddtrace/internal/_encoding.pyx", line 460, in ddtrace.internal._encoding.MsgpackEncoderBase.put
File "ddtrace/internal/_encoding.pyx", line 483, in ddtrace.internal._encoding.MsgpackEncoderBase.put
File "ddtrace/internal/_encoding.pyx", line 464, in ddtrace.internal._encoding.MsgpackEncoderBase.put
File "ddtrace/internal/_encoding.pyx", line 451, in ddtrace.internal._encoding.MsgpackEncoderBase._pack_trace
File "ddtrace/internal/_encoding.pyx", line 600, in ddtrace.internal._encoding.MsgpackEncoderV03.pack_span
File "ddtrace/internal/_encoding.pyx", line 142, in ddtrace.internal._encoding.pack_text
TypeError: Unhandled text type: <class 'int'>
```
Relevant pip dependencies:
datadogpy == 0.44.0
dd-trace-py == 1.2.3
jinja2 == 3.0.3
</issue>
<code>
[start of ddtrace/contrib/jinja2/patch.py]
1 import os
2
3 import jinja2
4
5 from ddtrace import config
6 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
7
8 from ...constants import SPAN_MEASURED_KEY
9 from ...ext import SpanTypes
10 from ...internal.utils import ArgumentError
11 from ...internal.utils import get_argument_value
12 from ...pin import Pin
13 from ..trace_utils import unwrap as _u
14 from .constants import DEFAULT_TEMPLATE_NAME
15
16
17 # default settings
18 config._add(
19 "jinja2",
20 {
21 "service_name": os.getenv("DD_JINJA2_SERVICE_NAME"),
22 },
23 )
24
25
26 def patch():
27 if getattr(jinja2, "__datadog_patch", False):
28 # already patched
29 return
30 setattr(jinja2, "__datadog_patch", True)
31 Pin(
32 service=config.jinja2["service_name"],
33 _config=config.jinja2,
34 ).onto(jinja2.environment.Environment)
35 _w(jinja2, "environment.Template.render", _wrap_render)
36 _w(jinja2, "environment.Template.generate", _wrap_render)
37 _w(jinja2, "environment.Environment.compile", _wrap_compile)
38 _w(jinja2, "environment.Environment._load_template", _wrap_load_template)
39
40
41 def unpatch():
42 if not getattr(jinja2, "__datadog_patch", False):
43 return
44 setattr(jinja2, "__datadog_patch", False)
45 _u(jinja2.Template, "render")
46 _u(jinja2.Template, "generate")
47 _u(jinja2.Environment, "compile")
48 _u(jinja2.Environment, "_load_template")
49
50
51 def _wrap_render(wrapped, instance, args, kwargs):
52 """Wrap `Template.render()` or `Template.generate()`"""
53 pin = Pin.get_from(instance.environment)
54 if not pin or not pin.enabled():
55 return wrapped(*args, **kwargs)
56
57 template_name = instance.name or DEFAULT_TEMPLATE_NAME
58 with pin.tracer.trace("jinja2.render", pin.service, span_type=SpanTypes.TEMPLATE) as span:
59 span.set_tag(SPAN_MEASURED_KEY)
60 try:
61 return wrapped(*args, **kwargs)
62 finally:
63 span.resource = template_name
64 span.set_tag("jinja2.template_name", template_name)
65
66
67 def _wrap_compile(wrapped, instance, args, kwargs):
68 pin = Pin.get_from(instance)
69 if not pin or not pin.enabled():
70 return wrapped(*args, **kwargs)
71
72 try:
73 template_name = get_argument_value(args, kwargs, 1, "name")
74 except ArgumentError:
75 template_name = DEFAULT_TEMPLATE_NAME
76
77 with pin.tracer.trace("jinja2.compile", pin.service, span_type=SpanTypes.TEMPLATE) as span:
78 try:
79 return wrapped(*args, **kwargs)
80 finally:
81 span.resource = template_name
82 span.set_tag("jinja2.template_name", template_name)
83
84
85 def _wrap_load_template(wrapped, instance, args, kwargs):
86 pin = Pin.get_from(instance)
87 if not pin or not pin.enabled():
88 return wrapped(*args, **kwargs)
89
90 template_name = get_argument_value(args, kwargs, 0, "name")
91 with pin.tracer.trace("jinja2.load", pin.service, span_type=SpanTypes.TEMPLATE) as span:
92 template = None
93 try:
94 template = wrapped(*args, **kwargs)
95 return template
96 finally:
97 span.resource = template_name
98 span.set_tag("jinja2.template_name", template_name)
99 if template:
100 span.set_tag("jinja2.template_path", template.filename)
101
[end of ddtrace/contrib/jinja2/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/jinja2/patch.py b/ddtrace/contrib/jinja2/patch.py
--- a/ddtrace/contrib/jinja2/patch.py
+++ b/ddtrace/contrib/jinja2/patch.py
@@ -7,6 +7,7 @@
from ...constants import SPAN_MEASURED_KEY
from ...ext import SpanTypes
+from ...internal.compat import stringify
from ...internal.utils import ArgumentError
from ...internal.utils import get_argument_value
from ...pin import Pin
@@ -54,7 +55,7 @@
if not pin or not pin.enabled():
return wrapped(*args, **kwargs)
- template_name = instance.name or DEFAULT_TEMPLATE_NAME
+ template_name = stringify(instance.name or DEFAULT_TEMPLATE_NAME)
with pin.tracer.trace("jinja2.render", pin.service, span_type=SpanTypes.TEMPLATE) as span:
span.set_tag(SPAN_MEASURED_KEY)
try:
| {"golden_diff": "diff --git a/ddtrace/contrib/jinja2/patch.py b/ddtrace/contrib/jinja2/patch.py\n--- a/ddtrace/contrib/jinja2/patch.py\n+++ b/ddtrace/contrib/jinja2/patch.py\n@@ -7,6 +7,7 @@\n \n from ...constants import SPAN_MEASURED_KEY\n from ...ext import SpanTypes\n+from ...internal.compat import stringify\n from ...internal.utils import ArgumentError\n from ...internal.utils import get_argument_value\n from ...pin import Pin\n@@ -54,7 +55,7 @@\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n \n- template_name = instance.name or DEFAULT_TEMPLATE_NAME\n+ template_name = stringify(instance.name or DEFAULT_TEMPLATE_NAME)\n with pin.tracer.trace(\"jinja2.render\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n try:\n", "issue": "Jinja2: Handle non-string template names as span resources\nJinja2 template names may not be strings, but need to be serialized as strings prior to being set as a span resource. Problem areas can be found in `ddtrace/contrib/jinja2/patch.py` on lines 63, 81 and 97 where the value from Jinja2 is trusted to be a string, but may not be.\r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/fb8dfa2f33fff37d21df9728d8386c0260df9744/ddtrace/contrib/jinja2/patch.py#L51-L100\r\n\r\n\r\nHere is an example of the exception we are seeing when `span.finish` is called:\r\n\r\n```\r\n File \"/opt/venv/lib/python3.8/site-packages/ddtrace/span.py\", line 237, in finish\r\n cb(self)\r\n File \"/opt/venv/lib/python3.8/site-packages/ddtrace/tracer.py\", line 712, in _on_span_finish\r\n p.on_span_finish(span)\r\n File \"/opt/venv/lib/python3.8/site-packages/ddtrace/internal/processor/trace.py\", line 208, in on_span_finish\r\n self._writer.write(spans)\r\n File \"/opt/venv/lib/python3.8/site-packages/ddtrace/internal/writer.py\", line 518, in write\r\n self._encoder.put(spans)\r\n File \"ddtrace/internal/_encoding.pyx\", line 456, in ddtrace.internal._encoding.MsgpackEncoderBase.put\r\n File \"ddtrace/internal/_encoding.pyx\", line 460, in ddtrace.internal._encoding.MsgpackEncoderBase.put\r\n File \"ddtrace/internal/_encoding.pyx\", line 483, in ddtrace.internal._encoding.MsgpackEncoderBase.put\r\n File \"ddtrace/internal/_encoding.pyx\", line 464, in ddtrace.internal._encoding.MsgpackEncoderBase.put\r\n File \"ddtrace/internal/_encoding.pyx\", line 451, in ddtrace.internal._encoding.MsgpackEncoderBase._pack_trace\r\n File \"ddtrace/internal/_encoding.pyx\", line 600, in ddtrace.internal._encoding.MsgpackEncoderV03.pack_span\r\n File \"ddtrace/internal/_encoding.pyx\", line 142, in ddtrace.internal._encoding.pack_text\r\nTypeError: Unhandled text type: <class 'int'>\r\n```\r\n\r\nRelevant pip dependencies:\r\ndatadogpy == 0.44.0\r\ndd-trace-py == 1.2.3\r\njinja2 == 3.0.3\r\n\n", "before_files": [{"content": "import os\n\nimport jinja2\n\nfrom ddtrace import config\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...internal.utils import ArgumentError\nfrom ...internal.utils import get_argument_value\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap as _u\nfrom .constants import DEFAULT_TEMPLATE_NAME\n\n\n# default settings\nconfig._add(\n \"jinja2\",\n {\n \"service_name\": os.getenv(\"DD_JINJA2_SERVICE_NAME\"),\n },\n)\n\n\ndef patch():\n if getattr(jinja2, \"__datadog_patch\", False):\n # already patched\n return\n setattr(jinja2, \"__datadog_patch\", True)\n Pin(\n service=config.jinja2[\"service_name\"],\n _config=config.jinja2,\n ).onto(jinja2.environment.Environment)\n _w(jinja2, \"environment.Template.render\", _wrap_render)\n _w(jinja2, \"environment.Template.generate\", _wrap_render)\n _w(jinja2, \"environment.Environment.compile\", _wrap_compile)\n _w(jinja2, \"environment.Environment._load_template\", _wrap_load_template)\n\n\ndef unpatch():\n if not getattr(jinja2, \"__datadog_patch\", False):\n return\n setattr(jinja2, \"__datadog_patch\", False)\n _u(jinja2.Template, \"render\")\n _u(jinja2.Template, \"generate\")\n _u(jinja2.Environment, \"compile\")\n _u(jinja2.Environment, \"_load_template\")\n\n\ndef _wrap_render(wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\"\"\"\n pin = Pin.get_from(instance.environment)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n with pin.tracer.trace(\"jinja2.render\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n try:\n return wrapped(*args, **kwargs)\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n\n\ndef _wrap_compile(wrapped, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n try:\n template_name = get_argument_value(args, kwargs, 1, \"name\")\n except ArgumentError:\n template_name = DEFAULT_TEMPLATE_NAME\n\n with pin.tracer.trace(\"jinja2.compile\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n try:\n return wrapped(*args, **kwargs)\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n\n\ndef _wrap_load_template(wrapped, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n template_name = get_argument_value(args, kwargs, 0, \"name\")\n with pin.tracer.trace(\"jinja2.load\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n if template:\n span.set_tag(\"jinja2.template_path\", template.filename)\n", "path": "ddtrace/contrib/jinja2/patch.py"}]} | 2,103 | 207 |
gh_patches_debug_3321 | rasdani/github-patches | git_diff | chainer__chainer-7167 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Wrong initialization of the Linear link weights when called with n_batch_axes>1
# **Overview**
When creating the _Linear_ link without specifying the __input_size__ in the constructor and then calling the link with the parameter __n_batch_axes__ > 1, the weight matrix is initialized with a wrong shape and causes an error.
# **Conditions**
Platform: Windows-7-6.1.7601-SP1
Chainer: 5.4.0
NumPy: 1.15.4
CuPy: Not Available
iDeep: Not Available
# Code to reproduce
```
import numpy as np
import chainer
batch_size, seq_len, num_of_features, out_features = (1, 4, 3, 8)
linear_layer_chainer = chainer.links.Linear(out_features)
data = chainer.Variable(np.ones((batch_size, seq_len, num_of_features), dtype=np.float32))
results = linear_layer_chainer (data, n_batch_axes=2)
```
# **Error messages, stack traces, or logs**
```
Traceback (most recent call last):
File "C:\Users\ntt\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3267, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-9-9df2914c2f5a>", line 1, in <module>
results = linear_layer_chainer_2_axes(data, n_batch_axes=2)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\link.py", line 242, in __call__
out = forward(*args, **kwargs)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\links\connection\linear.py", line 138, in forward
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\functions\connection\linear.py", line 288, in linear
y, = LinearFunction().apply(args)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\function_node.py", line 245, in apply
self._check_data_type_forward(in_data)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\function_node.py", line 330, in _check_data_type_forward
self.check_type_forward(in_type)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\functions\connection\linear.py", line 27, in check_type_forward
x_type.shape[1] == w_type.shape[1],
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\utils\type_check.py", line 546, in expect
expr.expect()
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\utils\type_check.py", line 483, in expect
'{0} {1} {2}'.format(left, self.inv, right))
chainer.utils.type_check.InvalidType:
Invalid operation is performed in: LinearFunction (Forward)
Expect: x.shape[1] == W.shape[1]
Actual: 3 != 12
```
# Others
If you create the _Linear_ layer link with __in_size__ passed explicitly to the correct shape, it works:
```
working_linear = chainer.links.Linear(num_of_features, out_features)
working_results = working_linear(data, n_batch_axes=2)
working_results.shape
```
Output : `(1, 4, 8)`
# Solution
I believe the culprit lays in the _Linear_ link _forward_ method:
```
def forward(self, x, n_batch_axes=1):
"""Applies the linear layer.
Args:
x (~chainer.Variable): Batch of input vectors.
n_batch_axes (int): The number of batch axes. The default is 1. The
input variable is reshaped into
(:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional tensor.
This should be greater than 0.
Returns:
~chainer.Variable: Output of the linear layer.
"""
if self.W.array is None:
in_size = functools.reduce(operator.mul, x.shape[1:], 1)
self._initialize_params(in_size)
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
```
If I interpret this part correctly, it does not take into account the _n_batch_axes_ parameter when initializing the weight matrix.
</issue>
<code>
[start of chainer/links/connection/linear.py]
1 import typing as tp # NOQA
2
3 from chainer.functions.connection import linear
4 from chainer import initializers
5 from chainer import link
6 from chainer import types # NOQA
7 from chainer import utils
8 from chainer import variable
9
10
11 class Linear(link.Link):
12
13 """Linear layer (a.k.a.\\ fully-connected layer).
14
15 This is a link that wraps the :func:`~chainer.functions.linear` function,
16 and holds a weight matrix ``W`` and optionally a bias vector ``b`` as
17 parameters.
18
19 If ``initialW`` is left to the default value of ``None``, the weight matrix
20 ``W`` is initialized with i.i.d. Gaussian samples, each of which has zero
21 mean and deviation :math:`\\sqrt{1/\\text{in_size}}`. The bias vector ``b``
22 is of size ``out_size``. If the ``initial_bias`` is to left the default
23 value of ``None``, each element is initialized as zero. If the ``nobias``
24 argument is set to ``True``, then this link does not hold a bias vector.
25
26 Args:
27 in_size (int or None): Dimension of input vectors. If unspecified or
28 ``None``, parameter initialization will be deferred until the
29 first forward data pass at which time the size will be determined.
30 out_size (int): Dimension of output vectors. If only one value is
31 passed for ``in_size`` and ``out_size``, that value will be used
32 for the ``out_size`` dimension.
33 nobias (bool): If ``True``, then this function does not use the bias.
34 initialW (:ref:`initializer <initializer>`): Initializer to initialize
35 the weight. When it is :class:`numpy.ndarray`,
36 its ``ndim`` should be 2. If ``initialW`` is ``None``, then the
37 weights are initialized with i.i.d. Gaussian samples, each of which
38 has zero mean and deviation :math:`\\sqrt{1/\\text{in_size}}`.
39 initial_bias (:ref:`initializer <initializer>`): Initializer to
40 initialize the bias. If ``None``, the bias will be initialized to
41 zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
42 .. seealso:: :func:`~chainer.functions.linear`
43
44 Attributes:
45 W (~chainer.Variable): Weight parameter.
46 b (~chainer.Variable): Bias parameter.
47
48 .. admonition:: Example
49
50 There are several ways to make a Linear link.
51
52 Define an input vector ``x`` as:
53
54 >>> x = np.array([[0, 1, 2, 3, 4]], np.float32)
55
56 1. Give the first two arguments explicitly:
57
58 Those numbers are considered as the input size and the output size.
59
60 >>> l = L.Linear(5, 10)
61 >>> y = l(x)
62 >>> y.shape
63 (1, 10)
64
65 2. Omit ``in_size`` (give the output size only as the first argument)
66 or fill it with ``None``:
67
68 In this case, the size of second axis of ``x`` is used as the
69 input size. So the below two cases are the same.
70
71 >>> l = L.Linear(10)
72 >>> y = l(x)
73 >>> y.shape
74 (1, 10)
75
76 >>> l = L.Linear(None, 10)
77 >>> y = l(x)
78 >>> y.shape
79 (1, 10)
80
81 When you omit the first argument, you need to specify the other
82 subsequent arguments from ``nobias`` as keyword arguments. So the
83 below two cases are the same.
84
85 >>> l = L.Linear(None, 10, False, None, 0)
86 >>> y = l(x)
87 >>> y.shape
88 (1, 10)
89
90 >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)
91 >>> y = l(x)
92 >>> y.shape
93 (1, 10)
94
95 """
96
97 def __init__(self, in_size, out_size=None, nobias=False,
98 initialW=None, initial_bias=None):
99 # type: (tp.Optional[int], tp.Optional[int], bool, tp.Optional[types.InitializerSpec], tp.Optional[types.InitializerSpec]) -> None # NOQA
100
101 super(Linear, self).__init__()
102
103 if out_size is None:
104 in_size, out_size = None, in_size
105 self.in_size = in_size
106 self.out_size = out_size
107
108 with self.init_scope():
109 W_initializer = initializers._get_initializer(initialW)
110 self.W = variable.Parameter(W_initializer) # type: variable.Variable # NOQA
111 if in_size is not None:
112 self._initialize_params(in_size)
113
114 if nobias:
115 self.b = None # type: tp.Optional[variable.Variable]
116 else:
117 if initial_bias is None:
118 initial_bias = 0
119 bias_initializer = initializers._get_initializer(initial_bias)
120 self.b = variable.Parameter(bias_initializer, out_size)
121
122 def _initialize_params(self, in_size):
123 # type: (int) -> None
124
125 self.W.initialize((self.out_size, in_size)) # type: ignore
126
127 @property
128 def printable_specs(self):
129 specs = [
130 ('in_size', self.in_size),
131 ('out_size', self.out_size),
132 ('nobias', self.b is None),
133 ]
134 for spec in specs:
135 yield spec
136
137 def forward(self, x, n_batch_axes=1):
138 # type: (variable.Variable, int) -> variable.Variable
139 """Applies the linear layer.
140
141 Args:
142 x (~chainer.Variable): Batch of input vectors.
143 n_batch_axes (int): The number of batch axes. The default is 1. The
144 input variable is reshaped into
145 (:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional tensor.
146 This should be greater than 0.
147
148 Returns:
149 ~chainer.Variable: Output of the linear layer.
150
151 """
152 if self.W.array is None:
153 in_size = utils.size_of_shape(x.shape[1:])
154 self._initialize_params(in_size)
155 return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
156
[end of chainer/links/connection/linear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/links/connection/linear.py b/chainer/links/connection/linear.py
--- a/chainer/links/connection/linear.py
+++ b/chainer/links/connection/linear.py
@@ -150,6 +150,6 @@
"""
if self.W.array is None:
- in_size = utils.size_of_shape(x.shape[1:])
+ in_size = utils.size_of_shape(x.shape[n_batch_axes:])
self._initialize_params(in_size)
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
| {"golden_diff": "diff --git a/chainer/links/connection/linear.py b/chainer/links/connection/linear.py\n--- a/chainer/links/connection/linear.py\n+++ b/chainer/links/connection/linear.py\n@@ -150,6 +150,6 @@\n \n \"\"\"\n if self.W.array is None:\n- in_size = utils.size_of_shape(x.shape[1:])\n+ in_size = utils.size_of_shape(x.shape[n_batch_axes:])\n self._initialize_params(in_size)\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\n", "issue": "[BUG] Wrong initialization of the Linear link weights when called with n_batch_axes>1\n# **Overview**\r\nWhen creating the _Linear_ link without specifying the __input_size__ in the constructor and then calling the link with the parameter __n_batch_axes__ > 1, the weight matrix is initialized with a wrong shape and causes an error.\r\n\r\n# **Conditions**\r\nPlatform: Windows-7-6.1.7601-SP1\r\nChainer: 5.4.0\r\nNumPy: 1.15.4\r\nCuPy: Not Available\r\niDeep: Not Available\r\n\r\n# Code to reproduce\r\n```\r\nimport numpy as np\r\nimport chainer\r\nbatch_size, seq_len, num_of_features, out_features = (1, 4, 3, 8)\r\nlinear_layer_chainer = chainer.links.Linear(out_features)\r\ndata = chainer.Variable(np.ones((batch_size, seq_len, num_of_features), dtype=np.float32))\r\nresults = linear_layer_chainer (data, n_batch_axes=2)\r\n```\r\n\r\n\r\n# **Error messages, stack traces, or logs**\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3267, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"<ipython-input-9-9df2914c2f5a>\", line 1, in <module>\r\n results = linear_layer_chainer_2_axes(data, n_batch_axes=2)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\link.py\", line 242, in __call__\r\n out = forward(*args, **kwargs)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\links\\connection\\linear.py\", line 138, in forward\r\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\functions\\connection\\linear.py\", line 288, in linear\r\n y, = LinearFunction().apply(args)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\function_node.py\", line 245, in apply\r\n self._check_data_type_forward(in_data)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\function_node.py\", line 330, in _check_data_type_forward\r\n self.check_type_forward(in_type)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\functions\\connection\\linear.py\", line 27, in check_type_forward\r\n x_type.shape[1] == w_type.shape[1],\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\utils\\type_check.py\", line 546, in expect\r\n expr.expect()\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\utils\\type_check.py\", line 483, in expect\r\n '{0} {1} {2}'.format(left, self.inv, right))\r\nchainer.utils.type_check.InvalidType: \r\nInvalid operation is performed in: LinearFunction (Forward)\r\nExpect: x.shape[1] == W.shape[1]\r\nActual: 3 != 12\r\n```\r\n\r\n# Others \r\nIf you create the _Linear_ layer link with __in_size__ passed explicitly to the correct shape, it works:\r\n\r\n```\r\nworking_linear = chainer.links.Linear(num_of_features, out_features)\r\nworking_results = working_linear(data, n_batch_axes=2)\r\nworking_results.shape\r\n```\r\nOutput : `(1, 4, 8)`\r\n\r\n# Solution \r\nI believe the culprit lays in the _Linear_ link _forward_ method:\r\n```\r\n def forward(self, x, n_batch_axes=1):\r\n \"\"\"Applies the linear layer.\r\n\r\n Args:\r\n x (~chainer.Variable): Batch of input vectors.\r\n n_batch_axes (int): The number of batch axes. The default is 1. The\r\n input variable is reshaped into\r\n (:math:`{\\\\rm n\\\\_batch\\\\_axes} + 1`)-dimensional tensor.\r\n This should be greater than 0.\r\n\r\n Returns:\r\n ~chainer.Variable: Output of the linear layer.\r\n\r\n \"\"\"\r\n if self.W.array is None:\r\n in_size = functools.reduce(operator.mul, x.shape[1:], 1)\r\n self._initialize_params(in_size)\r\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\r\n```\r\nIf I interpret this part correctly, it does not take into account the _n_batch_axes_ parameter when initializing the weight matrix.\r\n\r\n\n", "before_files": [{"content": "import typing as tp # NOQA\n\nfrom chainer.functions.connection import linear\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer import types # NOQA\nfrom chainer import utils\nfrom chainer import variable\n\n\nclass Linear(link.Link):\n\n \"\"\"Linear layer (a.k.a.\\\\ fully-connected layer).\n\n This is a link that wraps the :func:`~chainer.functions.linear` function,\n and holds a weight matrix ``W`` and optionally a bias vector ``b`` as\n parameters.\n\n If ``initialW`` is left to the default value of ``None``, the weight matrix\n ``W`` is initialized with i.i.d. Gaussian samples, each of which has zero\n mean and deviation :math:`\\\\sqrt{1/\\\\text{in_size}}`. The bias vector ``b``\n is of size ``out_size``. If the ``initial_bias`` is to left the default\n value of ``None``, each element is initialized as zero. If the ``nobias``\n argument is set to ``True``, then this link does not hold a bias vector.\n\n Args:\n in_size (int or None): Dimension of input vectors. If unspecified or\n ``None``, parameter initialization will be deferred until the\n first forward data pass at which time the size will be determined.\n out_size (int): Dimension of output vectors. If only one value is\n passed for ``in_size`` and ``out_size``, that value will be used\n for the ``out_size`` dimension.\n nobias (bool): If ``True``, then this function does not use the bias.\n initialW (:ref:`initializer <initializer>`): Initializer to initialize\n the weight. When it is :class:`numpy.ndarray`,\n its ``ndim`` should be 2. If ``initialW`` is ``None``, then the\n weights are initialized with i.i.d. Gaussian samples, each of which\n has zero mean and deviation :math:`\\\\sqrt{1/\\\\text{in_size}}`.\n initial_bias (:ref:`initializer <initializer>`): Initializer to\n initialize the bias. If ``None``, the bias will be initialized to\n zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.\n .. seealso:: :func:`~chainer.functions.linear`\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter.\n\n .. admonition:: Example\n\n There are several ways to make a Linear link.\n\n Define an input vector ``x`` as:\n\n >>> x = np.array([[0, 1, 2, 3, 4]], np.float32)\n\n 1. Give the first two arguments explicitly:\n\n Those numbers are considered as the input size and the output size.\n\n >>> l = L.Linear(5, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n 2. Omit ``in_size`` (give the output size only as the first argument)\n or fill it with ``None``:\n\n In this case, the size of second axis of ``x`` is used as the\n input size. So the below two cases are the same.\n\n >>> l = L.Linear(10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(None, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n When you omit the first argument, you need to specify the other\n subsequent arguments from ``nobias`` as keyword arguments. So the\n below two cases are the same.\n\n >>> l = L.Linear(None, 10, False, None, 0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n \"\"\"\n\n def __init__(self, in_size, out_size=None, nobias=False,\n initialW=None, initial_bias=None):\n # type: (tp.Optional[int], tp.Optional[int], bool, tp.Optional[types.InitializerSpec], tp.Optional[types.InitializerSpec]) -> None # NOQA\n\n super(Linear, self).__init__()\n\n if out_size is None:\n in_size, out_size = None, in_size\n self.in_size = in_size\n self.out_size = out_size\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer) # type: variable.Variable # NOQA\n if in_size is not None:\n self._initialize_params(in_size)\n\n if nobias:\n self.b = None # type: tp.Optional[variable.Variable]\n else:\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_size)\n\n def _initialize_params(self, in_size):\n # type: (int) -> None\n\n self.W.initialize((self.out_size, in_size)) # type: ignore\n\n @property\n def printable_specs(self):\n specs = [\n ('in_size', self.in_size),\n ('out_size', self.out_size),\n ('nobias', self.b is None),\n ]\n for spec in specs:\n yield spec\n\n def forward(self, x, n_batch_axes=1):\n # type: (variable.Variable, int) -> variable.Variable\n \"\"\"Applies the linear layer.\n\n Args:\n x (~chainer.Variable): Batch of input vectors.\n n_batch_axes (int): The number of batch axes. The default is 1. The\n input variable is reshaped into\n (:math:`{\\\\rm n\\\\_batch\\\\_axes} + 1`)-dimensional tensor.\n This should be greater than 0.\n\n Returns:\n ~chainer.Variable: Output of the linear layer.\n\n \"\"\"\n if self.W.array is None:\n in_size = utils.size_of_shape(x.shape[1:])\n self._initialize_params(in_size)\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\n", "path": "chainer/links/connection/linear.py"}]} | 3,380 | 125 |
gh_patches_debug_22154 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New connection creation flow does not handle schema creation failure scenarios
## Description
1. Create a database manually, say `test_db`, and create a schema called `Library Management` in it.
1. In Mathesar, create a new connection to the database.
1. While creating the connection, in the "New PostgreSQL Database Connection" modal, check the "Library Management" schema in the "Schemas to install" field.

1. Click on 'Add Connection'.
1. An error is thrown:
- `"(psycopg2.errors.DuplicateSchema) schema \"Library Management\" already exists\n\n[SQL: CREATE SCHEMA \"Library Management\";]\n(Background on this error at: https://sqlalche.me/e/14/f405)"`.
- This is expected.
1. The modal remains open. Uncheck the "Library Management" schema, and try clicking on 'Add Connection' again.
1. Another error is thrown:
- `duplicate key value violates unique constraint "mathesar_database_name_key" DETAIL: Key (name)=(pavish_install_test_1) already exists.`
- From the user perpective, there's no clear next step for them to take when this happens.
1. Close the modal and refresh the page, notice that the connection has been created already.
1. Open the connection, and notice that it shows `Schemas (0)`.
1. Forcing a manual reflection using the "Sync external changes" button shows the public schema and existing tables.
### Note:
* I performed the above steps 1-3 only as a means to force an error in the schema creation flow. While testing, I encountered an error which occurred due to an unknown reason.
* This issue primarily focuses on steps 4-10.
## Expected behavior
* When an error occurs, there should not be an inconsistent middle state in the DB vs the UI.
* When schema creation fails, we should try to revert back to the original state, or we should make it extra clear on the UI that it's a multi-step process.
</issue>
<code>
[start of mathesar/utils/connections.py]
1 """Utilities to help with creating and managing connections in Mathesar."""
2 from psycopg2.errors import DuplicateSchema
3 from sqlalchemy.exc import OperationalError
4 from mathesar.models.base import Database
5 from db import install, connection as dbconn
6 from mathesar.state import reset_reflection
7 from demo.install.library_dataset import load_library_dataset
8 from demo.install.movies_dataset import load_movies_dataset
9
10
11 class BadInstallationTarget(Exception):
12 """Raise when an attempt is made to install on a disallowed target"""
13 pass
14
15
16 def copy_connection_from_preexisting(
17 connection, nickname, db_name, create_db, sample_data
18 ):
19 if connection['connection_type'] == 'internal_database':
20 db_model = Database.create_from_settings_key('default')
21 elif connection['connection_type'] == 'user_database':
22 db_model = Database.current_objects.get(id=connection['id'])
23 db_model.id = None
24 else:
25 raise KeyError("connection_type")
26 root_db = db_model.db_name
27 return _save_and_install(
28 db_model, db_name, root_db, nickname, create_db, sample_data
29 )
30
31
32 def create_connection_from_scratch(
33 user, password, host, port, nickname, db_name, sample_data
34 ):
35 db_model = Database(username=user, password=password, host=host, port=port)
36 root_db = db_name
37 return _save_and_install(
38 db_model, db_name, root_db, nickname, False, sample_data
39 )
40
41
42 def create_connection_with_new_user(
43 connection, user, password, nickname, db_name, create_db, sample_data
44 ):
45 db_model = copy_connection_from_preexisting(
46 connection, nickname, db_name, create_db, []
47 )
48 engine = db_model._sa_engine
49 db_model.username = user
50 db_model.password = password
51 db_model.save()
52 dbconn.execute_msar_func_with_engine(
53 engine,
54 'create_basic_mathesar_user',
55 db_model.username,
56 db_model.password
57 )
58 _load_sample_data(db_model._sa_engine, sample_data)
59 return db_model
60
61
62 def _save_and_install(
63 db_model, db_name, root_db, nickname, create_db, sample_data
64 ):
65 db_model.name = nickname
66 db_model.db_name = db_name
67 _validate_db_model(db_model)
68 db_model.save()
69 try:
70 install.install_mathesar(
71 database_name=db_model.db_name,
72 username=db_model.username,
73 password=db_model.password,
74 hostname=db_model.host,
75 port=db_model.port,
76 skip_confirm=True,
77 create_db=create_db,
78 root_db=root_db,
79 )
80 except OperationalError as e:
81 db_model.delete()
82 raise e
83 _load_sample_data(db_model._sa_engine, sample_data)
84 return db_model
85
86
87 def _load_sample_data(engine, sample_data):
88 DATASET_MAP = {
89 'library_management': load_library_dataset,
90 'movie_collection': load_movies_dataset,
91 }
92 for key in sample_data:
93 try:
94 DATASET_MAP[key](engine, safe_mode=True)
95 except DuplicateSchema:
96 # We swallow this error, since otherwise we'll raise an error on the
97 # front end even though installation generally succeeded.
98 continue
99 reset_reflection()
100
101
102 def _validate_db_model(db_model):
103 internal_db_model = Database.create_from_settings_key('default')
104 if (
105 internal_db_model is not None
106 and db_model.host == internal_db_model.host
107 and db_model.port == internal_db_model.port
108 and db_model.db_name == internal_db_model.db_name
109 ):
110 raise BadInstallationTarget(
111 "Mathesar can't be installed in the internal DB namespace"
112 )
113
[end of mathesar/utils/connections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/utils/connections.py b/mathesar/utils/connections.py
--- a/mathesar/utils/connections.py
+++ b/mathesar/utils/connections.py
@@ -1,6 +1,6 @@
"""Utilities to help with creating and managing connections in Mathesar."""
from psycopg2.errors import DuplicateSchema
-from sqlalchemy.exc import OperationalError
+from sqlalchemy.exc import OperationalError, ProgrammingError
from mathesar.models.base import Database
from db import install, connection as dbconn
from mathesar.state import reset_reflection
@@ -92,10 +92,11 @@
for key in sample_data:
try:
DATASET_MAP[key](engine, safe_mode=True)
- except DuplicateSchema:
- # We swallow this error, since otherwise we'll raise an error on the
- # front end even though installation generally succeeded.
- continue
+ except ProgrammingError as e:
+ if isinstance(e.orig, DuplicateSchema):
+ # We swallow this error, since otherwise we'll raise an error on the
+ # front end even though installation generally succeeded.
+ continue
reset_reflection()
| {"golden_diff": "diff --git a/mathesar/utils/connections.py b/mathesar/utils/connections.py\n--- a/mathesar/utils/connections.py\n+++ b/mathesar/utils/connections.py\n@@ -1,6 +1,6 @@\n \"\"\"Utilities to help with creating and managing connections in Mathesar.\"\"\"\n from psycopg2.errors import DuplicateSchema\n-from sqlalchemy.exc import OperationalError\n+from sqlalchemy.exc import OperationalError, ProgrammingError\n from mathesar.models.base import Database\n from db import install, connection as dbconn\n from mathesar.state import reset_reflection\n@@ -92,10 +92,11 @@\n for key in sample_data:\n try:\n DATASET_MAP[key](engine, safe_mode=True)\n- except DuplicateSchema:\n- # We swallow this error, since otherwise we'll raise an error on the\n- # front end even though installation generally succeeded.\n- continue\n+ except ProgrammingError as e:\n+ if isinstance(e.orig, DuplicateSchema):\n+ # We swallow this error, since otherwise we'll raise an error on the\n+ # front end even though installation generally succeeded.\n+ continue\n reset_reflection()\n", "issue": "New connection creation flow does not handle schema creation failure scenarios\n## Description\r\n1. Create a database manually, say `test_db`, and create a schema called `Library Management` in it.\r\n1. In Mathesar, create a new connection to the database.\r\n1. While creating the connection, in the \"New PostgreSQL Database Connection\" modal, check the \"Library Management\" schema in the \"Schemas to install\" field.\r\n \r\n1. Click on 'Add Connection'.\r\n1. An error is thrown:\r\n - `\"(psycopg2.errors.DuplicateSchema) schema \\\"Library Management\\\" already exists\\n\\n[SQL: CREATE SCHEMA \\\"Library Management\\\";]\\n(Background on this error at: https://sqlalche.me/e/14/f405)\"`.\r\n - This is expected.\r\n1. The modal remains open. Uncheck the \"Library Management\" schema, and try clicking on 'Add Connection' again.\r\n1. Another error is thrown:\r\n - `duplicate key value violates unique constraint \"mathesar_database_name_key\" DETAIL: Key (name)=(pavish_install_test_1) already exists.`\r\n - From the user perpective, there's no clear next step for them to take when this happens.\r\n1. Close the modal and refresh the page, notice that the connection has been created already.\r\n1. Open the connection, and notice that it shows `Schemas (0)`.\r\n1. Forcing a manual reflection using the \"Sync external changes\" button shows the public schema and existing tables.\r\n\r\n### Note:\r\n* I performed the above steps 1-3 only as a means to force an error in the schema creation flow. While testing, I encountered an error which occurred due to an unknown reason. \r\n* This issue primarily focuses on steps 4-10.\r\n\r\n## Expected behavior\r\n* When an error occurs, there should not be an inconsistent middle state in the DB vs the UI.\r\n* When schema creation fails, we should try to revert back to the original state, or we should make it extra clear on the UI that it's a multi-step process.\n", "before_files": [{"content": "\"\"\"Utilities to help with creating and managing connections in Mathesar.\"\"\"\nfrom psycopg2.errors import DuplicateSchema\nfrom sqlalchemy.exc import OperationalError\nfrom mathesar.models.base import Database\nfrom db import install, connection as dbconn\nfrom mathesar.state import reset_reflection\nfrom demo.install.library_dataset import load_library_dataset\nfrom demo.install.movies_dataset import load_movies_dataset\n\n\nclass BadInstallationTarget(Exception):\n \"\"\"Raise when an attempt is made to install on a disallowed target\"\"\"\n pass\n\n\ndef copy_connection_from_preexisting(\n connection, nickname, db_name, create_db, sample_data\n):\n if connection['connection_type'] == 'internal_database':\n db_model = Database.create_from_settings_key('default')\n elif connection['connection_type'] == 'user_database':\n db_model = Database.current_objects.get(id=connection['id'])\n db_model.id = None\n else:\n raise KeyError(\"connection_type\")\n root_db = db_model.db_name\n return _save_and_install(\n db_model, db_name, root_db, nickname, create_db, sample_data\n )\n\n\ndef create_connection_from_scratch(\n user, password, host, port, nickname, db_name, sample_data\n):\n db_model = Database(username=user, password=password, host=host, port=port)\n root_db = db_name\n return _save_and_install(\n db_model, db_name, root_db, nickname, False, sample_data\n )\n\n\ndef create_connection_with_new_user(\n connection, user, password, nickname, db_name, create_db, sample_data\n):\n db_model = copy_connection_from_preexisting(\n connection, nickname, db_name, create_db, []\n )\n engine = db_model._sa_engine\n db_model.username = user\n db_model.password = password\n db_model.save()\n dbconn.execute_msar_func_with_engine(\n engine,\n 'create_basic_mathesar_user',\n db_model.username,\n db_model.password\n )\n _load_sample_data(db_model._sa_engine, sample_data)\n return db_model\n\n\ndef _save_and_install(\n db_model, db_name, root_db, nickname, create_db, sample_data\n):\n db_model.name = nickname\n db_model.db_name = db_name\n _validate_db_model(db_model)\n db_model.save()\n try:\n install.install_mathesar(\n database_name=db_model.db_name,\n username=db_model.username,\n password=db_model.password,\n hostname=db_model.host,\n port=db_model.port,\n skip_confirm=True,\n create_db=create_db,\n root_db=root_db,\n )\n except OperationalError as e:\n db_model.delete()\n raise e\n _load_sample_data(db_model._sa_engine, sample_data)\n return db_model\n\n\ndef _load_sample_data(engine, sample_data):\n DATASET_MAP = {\n 'library_management': load_library_dataset,\n 'movie_collection': load_movies_dataset,\n }\n for key in sample_data:\n try:\n DATASET_MAP[key](engine, safe_mode=True)\n except DuplicateSchema:\n # We swallow this error, since otherwise we'll raise an error on the\n # front end even though installation generally succeeded.\n continue\n reset_reflection()\n\n\ndef _validate_db_model(db_model):\n internal_db_model = Database.create_from_settings_key('default')\n if (\n internal_db_model is not None\n and db_model.host == internal_db_model.host\n and db_model.port == internal_db_model.port\n and db_model.db_name == internal_db_model.db_name\n ):\n raise BadInstallationTarget(\n \"Mathesar can't be installed in the internal DB namespace\"\n )\n", "path": "mathesar/utils/connections.py"}]} | 2,065 | 243 |
gh_patches_debug_43089 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1898 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Socket in use error when sending epp messages
### Current Behavior
Occasionally, we are unable to send messages via epp because of the following error:
"InfoDomain failed to execute due to an unknown error. Error: This socket is already used by another greenlet: <bound method Waiter.switch of <gevent._gevent_c_waiter.Waiter object at 0x7f8e1e5f6750>>"
### Expected Behavior
No error should occur.
### Steps to Reproduce
reproduction steps unknown, this happens irregularly and we haven't narrowed down a specif set of easily followed steps that can always guarantee this occurs. see additional context
### Environment
_No response_
### Additional Context
This bug started after we added gunicorn using gevent instead of just the default sync option. While we still should be asynchronious this creates an issue with how we are using our sockets on epp calls. We were able to see this more with the removal of the connection pool.
### Issue Links
_No response_
</issue>
<code>
[start of src/epplibwrapper/client.py]
1 """Provide a wrapper around epplib to handle authentication and errors."""
2
3 import logging
4
5 try:
6 from epplib.client import Client
7 from epplib import commands
8 from epplib.exceptions import TransportError, ParsingError
9 from epplib.transport import SocketTransport
10 except ImportError:
11 pass
12
13 from django.conf import settings
14
15 from .cert import Cert, Key
16 from .errors import ErrorCode, LoginError, RegistryError
17
18 logger = logging.getLogger(__name__)
19
20 try:
21 # Write cert and key to disk
22 CERT = Cert()
23 KEY = Key()
24 except Exception:
25 CERT = None # type: ignore
26 KEY = None # type: ignore
27 logger.warning(
28 "Problem with client certificate. Registrar cannot contact registry.",
29 exc_info=True,
30 )
31
32
33 class EPPLibWrapper:
34 """
35 A wrapper over epplib's client.
36
37 ATTN: This should not be used directly. Use `Domain` from domain.py.
38 """
39
40 def __init__(self) -> None:
41 """Initialize settings which will be used for all connections."""
42 # set _client to None initially. In the event that the __init__ fails
43 # before _client initializes, app should still start and be in a state
44 # that it can attempt _client initialization on send attempts
45 self._client = None # type: ignore
46 # prepare (but do not send) a Login command
47 self._login = commands.Login(
48 cl_id=settings.SECRET_REGISTRY_CL_ID,
49 password=settings.SECRET_REGISTRY_PASSWORD,
50 obj_uris=[
51 "urn:ietf:params:xml:ns:domain-1.0",
52 "urn:ietf:params:xml:ns:contact-1.0",
53 ],
54 )
55 try:
56 self._initialize_client()
57 except Exception:
58 logger.warning("Unable to configure epplib. Registrar cannot contact registry.")
59
60 def _initialize_client(self) -> None:
61 """Initialize a client, assuming _login defined. Sets _client to initialized
62 client. Raises errors if initialization fails.
63 This method will be called at app initialization, and also during retries."""
64 # establish a client object with a TCP socket transport
65 # note that type: ignore added in several places because linter complains
66 # about _client initially being set to None, and None type doesn't match code
67 self._client = Client( # type: ignore
68 SocketTransport(
69 settings.SECRET_REGISTRY_HOSTNAME,
70 cert_file=CERT.filename,
71 key_file=KEY.filename,
72 password=settings.SECRET_REGISTRY_KEY_PASSPHRASE,
73 )
74 )
75 try:
76 # use the _client object to connect
77 self._client.connect() # type: ignore
78 response = self._client.send(self._login) # type: ignore
79 if response.code >= 2000: # type: ignore
80 self._client.close() # type: ignore
81 raise LoginError(response.msg) # type: ignore
82 except TransportError as err:
83 message = "_initialize_client failed to execute due to a connection error."
84 logger.error(f"{message} Error: {err}")
85 raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err
86 except LoginError as err:
87 raise err
88 except Exception as err:
89 message = "_initialize_client failed to execute due to an unknown error."
90 logger.error(f"{message} Error: {err}")
91 raise RegistryError(message) from err
92
93 def _disconnect(self) -> None:
94 """Close the connection."""
95 try:
96 self._client.send(commands.Logout()) # type: ignore
97 self._client.close() # type: ignore
98 except Exception:
99 logger.warning("Connection to registry was not cleanly closed.")
100
101 def _send(self, command):
102 """Helper function used by `send`."""
103 cmd_type = command.__class__.__name__
104
105 try:
106 # check for the condition that the _client was not initialized properly
107 # at app initialization
108 if self._client is None:
109 self._initialize_client()
110 response = self._client.send(command)
111 except (ValueError, ParsingError) as err:
112 message = f"{cmd_type} failed to execute due to some syntax error."
113 logger.error(f"{message} Error: {err}")
114 raise RegistryError(message) from err
115 except TransportError as err:
116 message = f"{cmd_type} failed to execute due to a connection error."
117 logger.error(f"{message} Error: {err}")
118 raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err
119 except LoginError as err:
120 # For linter due to it not liking this line length
121 text = "failed to execute due to a registry login error."
122 message = f"{cmd_type} {text}"
123 logger.error(f"{message} Error: {err}")
124 raise RegistryError(message) from err
125 except Exception as err:
126 message = f"{cmd_type} failed to execute due to an unknown error."
127 logger.error(f"{message} Error: {err}")
128 raise RegistryError(message) from err
129 else:
130 if response.code >= 2000:
131 raise RegistryError(response.msg, code=response.code)
132 else:
133 return response
134
135 def _retry(self, command):
136 """Retry sending a command through EPP by re-initializing the client
137 and then sending the command."""
138 # re-initialize by disconnecting and initial
139 self._disconnect()
140 self._initialize_client()
141 return self._send(command)
142
143 def send(self, command, *, cleaned=False):
144 """Login, the send the command. Retry once if an error is found"""
145 # try to prevent use of this method without appropriate safeguards
146 cmd_type = command.__class__.__name__
147 if not cleaned:
148 raise ValueError("Please sanitize user input before sending it.")
149 try:
150 return self._send(command)
151 except RegistryError as err:
152 if (
153 err.is_transport_error()
154 or err.is_connection_error()
155 or err.is_session_error()
156 or err.is_server_error()
157 or err.should_retry()
158 ):
159 message = f"{cmd_type} failed and will be retried"
160 logger.info(f"{message} Error: {err}")
161 return self._retry(command)
162 else:
163 raise err
164
165
166 try:
167 # Initialize epplib
168 CLIENT = EPPLibWrapper()
169 logger.info("registry client initialized")
170 except Exception:
171 logger.warning("Unable to configure epplib. Registrar cannot contact registry.")
172
[end of src/epplibwrapper/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/epplibwrapper/client.py b/src/epplibwrapper/client.py
--- a/src/epplibwrapper/client.py
+++ b/src/epplibwrapper/client.py
@@ -1,6 +1,7 @@
"""Provide a wrapper around epplib to handle authentication and errors."""
import logging
+from gevent.lock import BoundedSemaphore
try:
from epplib.client import Client
@@ -52,10 +53,16 @@
"urn:ietf:params:xml:ns:contact-1.0",
],
)
+ # We should only ever have one active connection at a time
+ self.connection_lock = BoundedSemaphore(1)
+
+ self.connection_lock.acquire()
try:
self._initialize_client()
except Exception:
- logger.warning("Unable to configure epplib. Registrar cannot contact registry.")
+ logger.warning("Unable to configure the connection to the registry.")
+ finally:
+ self.connection_lock.release()
def _initialize_client(self) -> None:
"""Initialize a client, assuming _login defined. Sets _client to initialized
@@ -74,11 +81,7 @@
)
try:
# use the _client object to connect
- self._client.connect() # type: ignore
- response = self._client.send(self._login) # type: ignore
- if response.code >= 2000: # type: ignore
- self._client.close() # type: ignore
- raise LoginError(response.msg) # type: ignore
+ self._connect()
except TransportError as err:
message = "_initialize_client failed to execute due to a connection error."
logger.error(f"{message} Error: {err}")
@@ -90,13 +93,33 @@
logger.error(f"{message} Error: {err}")
raise RegistryError(message) from err
+ def _connect(self) -> None:
+ """Connects to EPP. Sends a login command. If an invalid response is returned,
+ the client will be closed and a LoginError raised."""
+ self._client.connect() # type: ignore
+ response = self._client.send(self._login) # type: ignore
+ if response.code >= 2000: # type: ignore
+ self._client.close() # type: ignore
+ raise LoginError(response.msg) # type: ignore
+
def _disconnect(self) -> None:
- """Close the connection."""
+ """Close the connection. Sends a logout command and closes the connection."""
+ self._send_logout_command()
+ self._close_client()
+
+ def _send_logout_command(self):
+ """Sends a logout command to epp"""
try:
self._client.send(commands.Logout()) # type: ignore
- self._client.close() # type: ignore
- except Exception:
- logger.warning("Connection to registry was not cleanly closed.")
+ except Exception as err:
+ logger.warning(f"Logout command not sent successfully: {err}")
+
+ def _close_client(self):
+ """Closes an active client connection"""
+ try:
+ self._client.close()
+ except Exception as err:
+ logger.warning(f"Connection to registry was not cleanly closed: {err}")
def _send(self, command):
"""Helper function used by `send`."""
@@ -146,6 +169,8 @@
cmd_type = command.__class__.__name__
if not cleaned:
raise ValueError("Please sanitize user input before sending it.")
+
+ self.connection_lock.acquire()
try:
return self._send(command)
except RegistryError as err:
@@ -161,6 +186,8 @@
return self._retry(command)
else:
raise err
+ finally:
+ self.connection_lock.release()
try:
| {"golden_diff": "diff --git a/src/epplibwrapper/client.py b/src/epplibwrapper/client.py\n--- a/src/epplibwrapper/client.py\n+++ b/src/epplibwrapper/client.py\n@@ -1,6 +1,7 @@\n \"\"\"Provide a wrapper around epplib to handle authentication and errors.\"\"\"\n \n import logging\n+from gevent.lock import BoundedSemaphore\n \n try:\n from epplib.client import Client\n@@ -52,10 +53,16 @@\n \"urn:ietf:params:xml:ns:contact-1.0\",\n ],\n )\n+ # We should only ever have one active connection at a time\n+ self.connection_lock = BoundedSemaphore(1)\n+\n+ self.connection_lock.acquire()\n try:\n self._initialize_client()\n except Exception:\n- logger.warning(\"Unable to configure epplib. Registrar cannot contact registry.\")\n+ logger.warning(\"Unable to configure the connection to the registry.\")\n+ finally:\n+ self.connection_lock.release()\n \n def _initialize_client(self) -> None:\n \"\"\"Initialize a client, assuming _login defined. Sets _client to initialized\n@@ -74,11 +81,7 @@\n )\n try:\n # use the _client object to connect\n- self._client.connect() # type: ignore\n- response = self._client.send(self._login) # type: ignore\n- if response.code >= 2000: # type: ignore\n- self._client.close() # type: ignore\n- raise LoginError(response.msg) # type: ignore\n+ self._connect()\n except TransportError as err:\n message = \"_initialize_client failed to execute due to a connection error.\"\n logger.error(f\"{message} Error: {err}\")\n@@ -90,13 +93,33 @@\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n \n+ def _connect(self) -> None:\n+ \"\"\"Connects to EPP. Sends a login command. If an invalid response is returned,\n+ the client will be closed and a LoginError raised.\"\"\"\n+ self._client.connect() # type: ignore\n+ response = self._client.send(self._login) # type: ignore\n+ if response.code >= 2000: # type: ignore\n+ self._client.close() # type: ignore\n+ raise LoginError(response.msg) # type: ignore\n+\n def _disconnect(self) -> None:\n- \"\"\"Close the connection.\"\"\"\n+ \"\"\"Close the connection. Sends a logout command and closes the connection.\"\"\"\n+ self._send_logout_command()\n+ self._close_client()\n+\n+ def _send_logout_command(self):\n+ \"\"\"Sends a logout command to epp\"\"\"\n try:\n self._client.send(commands.Logout()) # type: ignore\n- self._client.close() # type: ignore\n- except Exception:\n- logger.warning(\"Connection to registry was not cleanly closed.\")\n+ except Exception as err:\n+ logger.warning(f\"Logout command not sent successfully: {err}\")\n+\n+ def _close_client(self):\n+ \"\"\"Closes an active client connection\"\"\"\n+ try:\n+ self._client.close()\n+ except Exception as err:\n+ logger.warning(f\"Connection to registry was not cleanly closed: {err}\")\n \n def _send(self, command):\n \"\"\"Helper function used by `send`.\"\"\"\n@@ -146,6 +169,8 @@\n cmd_type = command.__class__.__name__\n if not cleaned:\n raise ValueError(\"Please sanitize user input before sending it.\")\n+\n+ self.connection_lock.acquire()\n try:\n return self._send(command)\n except RegistryError as err:\n@@ -161,6 +186,8 @@\n return self._retry(command)\n else:\n raise err\n+ finally:\n+ self.connection_lock.release()\n \n \n try:\n", "issue": "Socket in use error when sending epp messages\n### Current Behavior\n\nOccasionally, we are unable to send messages via epp because of the following error:\r\n\"InfoDomain failed to execute due to an unknown error. Error: This socket is already used by another greenlet: <bound method Waiter.switch of <gevent._gevent_c_waiter.Waiter object at 0x7f8e1e5f6750>>\"\n\n### Expected Behavior\n\nNo error should occur.\n\n### Steps to Reproduce\n\nreproduction steps unknown, this happens irregularly and we haven't narrowed down a specif set of easily followed steps that can always guarantee this occurs. see additional context\r\n\n\n### Environment\n\n_No response_\n\n### Additional Context\n\nThis bug started after we added gunicorn using gevent instead of just the default sync option. While we still should be asynchronious this creates an issue with how we are using our sockets on epp calls. We were able to see this more with the removal of the connection pool. \n\n### Issue Links\n\n_No response_\n", "before_files": [{"content": "\"\"\"Provide a wrapper around epplib to handle authentication and errors.\"\"\"\n\nimport logging\n\ntry:\n from epplib.client import Client\n from epplib import commands\n from epplib.exceptions import TransportError, ParsingError\n from epplib.transport import SocketTransport\nexcept ImportError:\n pass\n\nfrom django.conf import settings\n\nfrom .cert import Cert, Key\nfrom .errors import ErrorCode, LoginError, RegistryError\n\nlogger = logging.getLogger(__name__)\n\ntry:\n # Write cert and key to disk\n CERT = Cert()\n KEY = Key()\nexcept Exception:\n CERT = None # type: ignore\n KEY = None # type: ignore\n logger.warning(\n \"Problem with client certificate. Registrar cannot contact registry.\",\n exc_info=True,\n )\n\n\nclass EPPLibWrapper:\n \"\"\"\n A wrapper over epplib's client.\n\n ATTN: This should not be used directly. Use `Domain` from domain.py.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize settings which will be used for all connections.\"\"\"\n # set _client to None initially. In the event that the __init__ fails\n # before _client initializes, app should still start and be in a state\n # that it can attempt _client initialization on send attempts\n self._client = None # type: ignore\n # prepare (but do not send) a Login command\n self._login = commands.Login(\n cl_id=settings.SECRET_REGISTRY_CL_ID,\n password=settings.SECRET_REGISTRY_PASSWORD,\n obj_uris=[\n \"urn:ietf:params:xml:ns:domain-1.0\",\n \"urn:ietf:params:xml:ns:contact-1.0\",\n ],\n )\n try:\n self._initialize_client()\n except Exception:\n logger.warning(\"Unable to configure epplib. Registrar cannot contact registry.\")\n\n def _initialize_client(self) -> None:\n \"\"\"Initialize a client, assuming _login defined. Sets _client to initialized\n client. Raises errors if initialization fails.\n This method will be called at app initialization, and also during retries.\"\"\"\n # establish a client object with a TCP socket transport\n # note that type: ignore added in several places because linter complains\n # about _client initially being set to None, and None type doesn't match code\n self._client = Client( # type: ignore\n SocketTransport(\n settings.SECRET_REGISTRY_HOSTNAME,\n cert_file=CERT.filename,\n key_file=KEY.filename,\n password=settings.SECRET_REGISTRY_KEY_PASSPHRASE,\n )\n )\n try:\n # use the _client object to connect\n self._client.connect() # type: ignore\n response = self._client.send(self._login) # type: ignore\n if response.code >= 2000: # type: ignore\n self._client.close() # type: ignore\n raise LoginError(response.msg) # type: ignore\n except TransportError as err:\n message = \"_initialize_client failed to execute due to a connection error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err\n except LoginError as err:\n raise err\n except Exception as err:\n message = \"_initialize_client failed to execute due to an unknown error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n\n def _disconnect(self) -> None:\n \"\"\"Close the connection.\"\"\"\n try:\n self._client.send(commands.Logout()) # type: ignore\n self._client.close() # type: ignore\n except Exception:\n logger.warning(\"Connection to registry was not cleanly closed.\")\n\n def _send(self, command):\n \"\"\"Helper function used by `send`.\"\"\"\n cmd_type = command.__class__.__name__\n\n try:\n # check for the condition that the _client was not initialized properly\n # at app initialization\n if self._client is None:\n self._initialize_client()\n response = self._client.send(command)\n except (ValueError, ParsingError) as err:\n message = f\"{cmd_type} failed to execute due to some syntax error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n except TransportError as err:\n message = f\"{cmd_type} failed to execute due to a connection error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err\n except LoginError as err:\n # For linter due to it not liking this line length\n text = \"failed to execute due to a registry login error.\"\n message = f\"{cmd_type} {text}\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n except Exception as err:\n message = f\"{cmd_type} failed to execute due to an unknown error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n else:\n if response.code >= 2000:\n raise RegistryError(response.msg, code=response.code)\n else:\n return response\n\n def _retry(self, command):\n \"\"\"Retry sending a command through EPP by re-initializing the client\n and then sending the command.\"\"\"\n # re-initialize by disconnecting and initial\n self._disconnect()\n self._initialize_client()\n return self._send(command)\n\n def send(self, command, *, cleaned=False):\n \"\"\"Login, the send the command. Retry once if an error is found\"\"\"\n # try to prevent use of this method without appropriate safeguards\n cmd_type = command.__class__.__name__\n if not cleaned:\n raise ValueError(\"Please sanitize user input before sending it.\")\n try:\n return self._send(command)\n except RegistryError as err:\n if (\n err.is_transport_error()\n or err.is_connection_error()\n or err.is_session_error()\n or err.is_server_error()\n or err.should_retry()\n ):\n message = f\"{cmd_type} failed and will be retried\"\n logger.info(f\"{message} Error: {err}\")\n return self._retry(command)\n else:\n raise err\n\n\ntry:\n # Initialize epplib\n CLIENT = EPPLibWrapper()\n logger.info(\"registry client initialized\")\nexcept Exception:\n logger.warning(\"Unable to configure epplib. Registrar cannot contact registry.\")\n", "path": "src/epplibwrapper/client.py"}]} | 2,566 | 860 |
gh_patches_debug_36070 | rasdani/github-patches | git_diff | getnikola__nikola-2178 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The `IPython.nbconvert` package has been deprecated.
IPython 4.0 was just released (http://blog.jupyter.org/2015/08/12/first-release-of-jupyter/). The `IPython.nbconvert` package has been deprecated. The [`nbconvert`-based ipynb compiler](https://github.com/getnikola/nikola/blob/15217bc93f0af0d70ffe33e0ea067d81ddf32403/nikola/plugins/compile/ipynb.py) should be updated where necessary to reflect any API changes in the 4.0 release.
</issue>
<code>
[start of nikola/plugins/compile/ipynb.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2013-2015 Damián Avila, Chris Warrick and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Implementation of compile_html based on nbconvert."""
28
29 from __future__ import unicode_literals, print_function
30 import io
31 import os
32 import sys
33
34 try:
35 import IPython
36 from IPython.nbconvert.exporters import HTMLExporter
37 if IPython.version_info[0] >= 3: # API changed with 3.0.0
38 from IPython import nbformat
39 current_nbformat = nbformat.current_nbformat
40 from IPython.kernel import kernelspec
41 else:
42 import IPython.nbformat.current as nbformat
43 current_nbformat = 'json'
44 kernelspec = None
45
46 from IPython.config import Config
47 flag = True
48 except ImportError:
49 flag = None
50
51 from nikola.plugin_categories import PageCompiler
52 from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER
53
54
55 class CompileIPynb(PageCompiler):
56 """Compile IPynb into HTML."""
57
58 name = "ipynb"
59 friendly_name = "Jupyter/IPython Notebook"
60 demote_headers = True
61 default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'
62
63 def set_site(self, site):
64 """Set Nikola site."""
65 self.logger = get_logger('compile_ipynb', STDERR_HANDLER)
66 super(CompileIPynb, self).set_site(site)
67
68 def compile_html_string(self, source, is_two_file=True):
69 """Export notebooks as HTML strings."""
70 if flag is None:
71 req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
72 HTMLExporter.default_template = 'basic'
73 c = Config(self.site.config['IPYNB_CONFIG'])
74 exportHtml = HTMLExporter(config=c)
75 with io.open(source, "r", encoding="utf8") as in_file:
76 nb_json = nbformat.read(in_file, current_nbformat)
77 (body, resources) = exportHtml.from_notebook_node(nb_json)
78 return body
79
80 def compile_html(self, source, dest, is_two_file=True):
81 """Compile source file into HTML and save as dest."""
82 makedirs(os.path.dirname(dest))
83 with io.open(dest, "w+", encoding="utf8") as out_file:
84 out_file.write(self.compile_html_string(source, is_two_file))
85
86 def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
87 """Read metadata directly from ipynb file.
88
89 As ipynb file support arbitrary metadata as json, the metadata used by Nikola
90 will be assume to be in the 'nikola' subfield.
91 """
92 if flag is None:
93 req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
94 source = post.source_path
95 with io.open(source, "r", encoding="utf8") as in_file:
96 nb_json = nbformat.read(in_file, current_nbformat)
97 # Metadata might not exist in two-file posts or in hand-crafted
98 # .ipynb files.
99 return nb_json.get('metadata', {}).get('nikola', {})
100
101 def create_post(self, path, **kw):
102 """Create a new post."""
103 if flag is None:
104 req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
105 content = kw.pop('content', None)
106 onefile = kw.pop('onefile', False)
107 kernel = kw.pop('ipython_kernel', None)
108 # is_page is not needed to create the file
109 kw.pop('is_page', False)
110
111 metadata = {}
112 metadata.update(self.default_metadata)
113 metadata.update(kw)
114
115 makedirs(os.path.dirname(path))
116
117 if content.startswith("{"):
118 # imported .ipynb file, guaranteed to start with "{" because it’s JSON.
119 nb = nbformat.reads(content, current_nbformat)
120 else:
121 if IPython.version_info[0] >= 3:
122 nb = nbformat.v4.new_notebook()
123 nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
124 else:
125 nb = nbformat.new_notebook()
126 nb["worksheets"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]
127
128 if kernelspec is not None:
129 if kernel is None:
130 kernel = self.default_kernel
131 self.logger.notice('No kernel specified, assuming "{0}".'.format(kernel))
132
133 IPYNB_KERNELS = {}
134 ksm = kernelspec.KernelSpecManager()
135 for k in ksm.find_kernel_specs():
136 IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()
137 IPYNB_KERNELS[k]['name'] = k
138 del IPYNB_KERNELS[k]['argv']
139
140 if kernel not in IPYNB_KERNELS:
141 self.logger.error('Unknown kernel "{0}". Maybe you mispelled it?'.format(kernel))
142 self.logger.info("Available kernels: {0}".format(", ".join(sorted(IPYNB_KERNELS))))
143 raise Exception('Unknown kernel "{0}"'.format(kernel))
144
145 nb["metadata"]["kernelspec"] = IPYNB_KERNELS[kernel]
146 else:
147 # Older IPython versions don’t need kernelspecs.
148 pass
149
150 if onefile:
151 nb["metadata"]["nikola"] = metadata
152
153 with io.open(path, "w+", encoding="utf8") as fd:
154 if IPython.version_info[0] >= 3:
155 nbformat.write(nb, fd, 4)
156 else:
157 nbformat.write(nb, fd, 'ipynb')
158
[end of nikola/plugins/compile/ipynb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/compile/ipynb.py b/nikola/plugins/compile/ipynb.py
--- a/nikola/plugins/compile/ipynb.py
+++ b/nikola/plugins/compile/ipynb.py
@@ -32,21 +32,33 @@
import sys
try:
- import IPython
- from IPython.nbconvert.exporters import HTMLExporter
- if IPython.version_info[0] >= 3: # API changed with 3.0.0
- from IPython import nbformat
- current_nbformat = nbformat.current_nbformat
- from IPython.kernel import kernelspec
- else:
- import IPython.nbformat.current as nbformat
- current_nbformat = 'json'
- kernelspec = None
-
- from IPython.config import Config
+ from nbconvert.exporters import HTMLExporter
+ import nbformat
+ current_nbformat = nbformat.current_nbformat
+ from jupyter_client import kernelspec
+ from traitlets.config import Config
flag = True
+ ipy_modern = True
except ImportError:
- flag = None
+ try:
+ import IPython
+ from IPython.nbconvert.exporters import HTMLExporter
+ if IPython.version_info[0] >= 3: # API changed with 3.0.0
+ from IPython import nbformat
+ current_nbformat = nbformat.current_nbformat
+ from IPython.kernel import kernelspec
+ ipy_modern = True
+ else:
+ import IPython.nbformat.current as nbformat
+ current_nbformat = 'json'
+ kernelspec = None
+ ipy_modern = False
+
+ from IPython.config import Config
+ flag = True
+ except ImportError:
+ flag = None
+ ipy_modern = None
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER
@@ -118,7 +130,7 @@
# imported .ipynb file, guaranteed to start with "{" because it’s JSON.
nb = nbformat.reads(content, current_nbformat)
else:
- if IPython.version_info[0] >= 3:
+ if ipy_modern:
nb = nbformat.v4.new_notebook()
nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
else:
@@ -151,7 +163,7 @@
nb["metadata"]["nikola"] = metadata
with io.open(path, "w+", encoding="utf8") as fd:
- if IPython.version_info[0] >= 3:
+ if ipy_modern:
nbformat.write(nb, fd, 4)
else:
nbformat.write(nb, fd, 'ipynb')
| {"golden_diff": "diff --git a/nikola/plugins/compile/ipynb.py b/nikola/plugins/compile/ipynb.py\n--- a/nikola/plugins/compile/ipynb.py\n+++ b/nikola/plugins/compile/ipynb.py\n@@ -32,21 +32,33 @@\n import sys\n \n try:\n- import IPython\n- from IPython.nbconvert.exporters import HTMLExporter\n- if IPython.version_info[0] >= 3: # API changed with 3.0.0\n- from IPython import nbformat\n- current_nbformat = nbformat.current_nbformat\n- from IPython.kernel import kernelspec\n- else:\n- import IPython.nbformat.current as nbformat\n- current_nbformat = 'json'\n- kernelspec = None\n-\n- from IPython.config import Config\n+ from nbconvert.exporters import HTMLExporter\n+ import nbformat\n+ current_nbformat = nbformat.current_nbformat\n+ from jupyter_client import kernelspec\n+ from traitlets.config import Config\n flag = True\n+ ipy_modern = True\n except ImportError:\n- flag = None\n+ try:\n+ import IPython\n+ from IPython.nbconvert.exporters import HTMLExporter\n+ if IPython.version_info[0] >= 3: # API changed with 3.0.0\n+ from IPython import nbformat\n+ current_nbformat = nbformat.current_nbformat\n+ from IPython.kernel import kernelspec\n+ ipy_modern = True\n+ else:\n+ import IPython.nbformat.current as nbformat\n+ current_nbformat = 'json'\n+ kernelspec = None\n+ ipy_modern = False\n+\n+ from IPython.config import Config\n+ flag = True\n+ except ImportError:\n+ flag = None\n+ ipy_modern = None\n \n from nikola.plugin_categories import PageCompiler\n from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER\n@@ -118,7 +130,7 @@\n # imported .ipynb file, guaranteed to start with \"{\" because it\u2019s JSON.\n nb = nbformat.reads(content, current_nbformat)\n else:\n- if IPython.version_info[0] >= 3:\n+ if ipy_modern:\n nb = nbformat.v4.new_notebook()\n nb[\"cells\"] = [nbformat.v4.new_markdown_cell(content)]\n else:\n@@ -151,7 +163,7 @@\n nb[\"metadata\"][\"nikola\"] = metadata\n \n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n- if IPython.version_info[0] >= 3:\n+ if ipy_modern:\n nbformat.write(nb, fd, 4)\n else:\n nbformat.write(nb, fd, 'ipynb')\n", "issue": " The `IPython.nbconvert` package has been deprecated.\nIPython 4.0 was just released (http://blog.jupyter.org/2015/08/12/first-release-of-jupyter/). The `IPython.nbconvert` package has been deprecated. The [`nbconvert`-based ipynb compiler](https://github.com/getnikola/nikola/blob/15217bc93f0af0d70ffe33e0ea067d81ddf32403/nikola/plugins/compile/ipynb.py) should be updated where necessary to reflect any API changes in the 4.0 release.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2013-2015 Dami\u00e1n Avila, Chris Warrick and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on nbconvert.\"\"\"\n\nfrom __future__ import unicode_literals, print_function\nimport io\nimport os\nimport sys\n\ntry:\n import IPython\n from IPython.nbconvert.exporters import HTMLExporter\n if IPython.version_info[0] >= 3: # API changed with 3.0.0\n from IPython import nbformat\n current_nbformat = nbformat.current_nbformat\n from IPython.kernel import kernelspec\n else:\n import IPython.nbformat.current as nbformat\n current_nbformat = 'json'\n kernelspec = None\n\n from IPython.config import Config\n flag = True\nexcept ImportError:\n flag = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER\n\n\nclass CompileIPynb(PageCompiler):\n \"\"\"Compile IPynb into HTML.\"\"\"\n\n name = \"ipynb\"\n friendly_name = \"Jupyter/IPython Notebook\"\n demote_headers = True\n default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.logger = get_logger('compile_ipynb', STDERR_HANDLER)\n super(CompileIPynb, self).set_site(site)\n\n def compile_html_string(self, source, is_two_file=True):\n \"\"\"Export notebooks as HTML strings.\"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n HTMLExporter.default_template = 'basic'\n c = Config(self.site.config['IPYNB_CONFIG'])\n exportHtml = HTMLExporter(config=c)\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n nb_json = nbformat.read(in_file, current_nbformat)\n (body, resources) = exportHtml.from_notebook_node(nb_json)\n return body\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile source file into HTML and save as dest.\"\"\"\n makedirs(os.path.dirname(dest))\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n out_file.write(self.compile_html_string(source, is_two_file))\n\n def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):\n \"\"\"Read metadata directly from ipynb file.\n\n As ipynb file support arbitrary metadata as json, the metadata used by Nikola\n will be assume to be in the 'nikola' subfield.\n \"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n source = post.source_path\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n nb_json = nbformat.read(in_file, current_nbformat)\n # Metadata might not exist in two-file posts or in hand-crafted\n # .ipynb files.\n return nb_json.get('metadata', {}).get('nikola', {})\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n kernel = kw.pop('ipython_kernel', None)\n # is_page is not needed to create the file\n kw.pop('is_page', False)\n\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n\n makedirs(os.path.dirname(path))\n\n if content.startswith(\"{\"):\n # imported .ipynb file, guaranteed to start with \"{\" because it\u2019s JSON.\n nb = nbformat.reads(content, current_nbformat)\n else:\n if IPython.version_info[0] >= 3:\n nb = nbformat.v4.new_notebook()\n nb[\"cells\"] = [nbformat.v4.new_markdown_cell(content)]\n else:\n nb = nbformat.new_notebook()\n nb[\"worksheets\"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]\n\n if kernelspec is not None:\n if kernel is None:\n kernel = self.default_kernel\n self.logger.notice('No kernel specified, assuming \"{0}\".'.format(kernel))\n\n IPYNB_KERNELS = {}\n ksm = kernelspec.KernelSpecManager()\n for k in ksm.find_kernel_specs():\n IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()\n IPYNB_KERNELS[k]['name'] = k\n del IPYNB_KERNELS[k]['argv']\n\n if kernel not in IPYNB_KERNELS:\n self.logger.error('Unknown kernel \"{0}\". Maybe you mispelled it?'.format(kernel))\n self.logger.info(\"Available kernels: {0}\".format(\", \".join(sorted(IPYNB_KERNELS))))\n raise Exception('Unknown kernel \"{0}\"'.format(kernel))\n\n nb[\"metadata\"][\"kernelspec\"] = IPYNB_KERNELS[kernel]\n else:\n # Older IPython versions don\u2019t need kernelspecs.\n pass\n\n if onefile:\n nb[\"metadata\"][\"nikola\"] = metadata\n\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if IPython.version_info[0] >= 3:\n nbformat.write(nb, fd, 4)\n else:\n nbformat.write(nb, fd, 'ipynb')\n", "path": "nikola/plugins/compile/ipynb.py"}]} | 2,551 | 646 |
gh_patches_debug_33032 | rasdani/github-patches | git_diff | projectmesa__mesa-1262 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Sigma.js backend for network visualization
Reading #388, I conclude that Sigma.js is less featureful than D3.js. We should just stick to polishing the D3.js version.
</issue>
<code>
[start of examples/virus_on_network/virus_on_network/server.py]
1 import math
2
3 from mesa.visualization.ModularVisualization import ModularServer
4 from mesa.visualization.UserParam import UserSettableParameter
5 from mesa.visualization.modules import ChartModule
6 from mesa.visualization.modules import NetworkModule
7 from mesa.visualization.modules import TextElement
8 from .model import VirusOnNetwork, State, number_infected
9
10
11 def network_portrayal(G):
12 # The model ensures there is always 1 agent per node
13
14 def node_color(agent):
15 return {State.INFECTED: "#FF0000", State.SUSCEPTIBLE: "#008000"}.get(
16 agent.state, "#808080"
17 )
18
19 def edge_color(agent1, agent2):
20 if State.RESISTANT in (agent1.state, agent2.state):
21 return "#000000"
22 return "#e8e8e8"
23
24 def edge_width(agent1, agent2):
25 if State.RESISTANT in (agent1.state, agent2.state):
26 return 3
27 return 2
28
29 def get_agents(source, target):
30 return G.nodes[source]["agent"][0], G.nodes[target]["agent"][0]
31
32 portrayal = dict()
33 portrayal["nodes"] = [
34 {
35 "size": 6,
36 "color": node_color(agents[0]),
37 "tooltip": f"id: {agents[0].unique_id}<br>state: {agents[0].state.name}",
38 }
39 for (_, agents) in G.nodes.data("agent")
40 ]
41
42 portrayal["edges"] = [
43 {
44 "source": source,
45 "target": target,
46 "color": edge_color(*get_agents(source, target)),
47 "width": edge_width(*get_agents(source, target)),
48 }
49 for (source, target) in G.edges
50 ]
51
52 return portrayal
53
54
55 network = NetworkModule(network_portrayal, 500, 500, library="d3")
56 chart = ChartModule(
57 [
58 {"Label": "Infected", "Color": "#FF0000"},
59 {"Label": "Susceptible", "Color": "#008000"},
60 {"Label": "Resistant", "Color": "#808080"},
61 ]
62 )
63
64
65 class MyTextElement(TextElement):
66 def render(self, model):
67 ratio = model.resistant_susceptible_ratio()
68 ratio_text = "∞" if ratio is math.inf else f"{ratio:.2f}"
69 infected_text = str(number_infected(model))
70
71 return "Resistant/Susceptible Ratio: {}<br>Infected Remaining: {}".format(
72 ratio_text, infected_text
73 )
74
75
76 model_params = {
77 "num_nodes": UserSettableParameter(
78 "slider",
79 "Number of agents",
80 10,
81 10,
82 100,
83 1,
84 description="Choose how many agents to include in the model",
85 ),
86 "avg_node_degree": UserSettableParameter(
87 "slider", "Avg Node Degree", 3, 3, 8, 1, description="Avg Node Degree"
88 ),
89 "initial_outbreak_size": UserSettableParameter(
90 "slider",
91 "Initial Outbreak Size",
92 1,
93 1,
94 10,
95 1,
96 description="Initial Outbreak Size",
97 ),
98 "virus_spread_chance": UserSettableParameter(
99 "slider",
100 "Virus Spread Chance",
101 0.4,
102 0.0,
103 1.0,
104 0.1,
105 description="Probability that susceptible neighbor will be infected",
106 ),
107 "virus_check_frequency": UserSettableParameter(
108 "slider",
109 "Virus Check Frequency",
110 0.4,
111 0.0,
112 1.0,
113 0.1,
114 description="Frequency the nodes check whether they are infected by " "a virus",
115 ),
116 "recovery_chance": UserSettableParameter(
117 "slider",
118 "Recovery Chance",
119 0.3,
120 0.0,
121 1.0,
122 0.1,
123 description="Probability that the virus will be removed",
124 ),
125 "gain_resistance_chance": UserSettableParameter(
126 "slider",
127 "Gain Resistance Chance",
128 0.5,
129 0.0,
130 1.0,
131 0.1,
132 description="Probability that a recovered agent will become "
133 "resistant to this virus in the future",
134 ),
135 }
136
137 server = ModularServer(
138 VirusOnNetwork, [network, MyTextElement(), chart], "Virus Model", model_params
139 )
140 server.port = 8521
141
[end of examples/virus_on_network/virus_on_network/server.py]
[start of examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py]
1 from mesa.visualization.ModularVisualization import ModularServer
2 from mesa.visualization.UserParam import UserSettableParameter
3 from mesa.visualization.modules import ChartModule
4 from mesa.visualization.modules import NetworkModule
5 from .model import BoltzmannWealthModelNetwork
6
7
8 def network_portrayal(G):
9 # The model ensures there is 0 or 1 agent per node
10
11 portrayal = dict()
12 portrayal["nodes"] = [
13 {
14 "id": node_id,
15 "size": 3 if agents else 1,
16 "color": "#CC0000" if not agents or agents[0].wealth == 0 else "#007959",
17 "label": None
18 if not agents
19 else f"Agent:{agents[0].unique_id} Wealth:{agents[0].wealth}",
20 }
21 for (node_id, agents) in G.nodes.data("agent")
22 ]
23
24 portrayal["edges"] = [
25 {"id": edge_id, "source": source, "target": target, "color": "#000000"}
26 for edge_id, (source, target) in enumerate(G.edges)
27 ]
28
29 return portrayal
30
31
32 grid = NetworkModule(network_portrayal, 500, 500, library="sigma")
33 chart = ChartModule(
34 [{"Label": "Gini", "Color": "Black"}], data_collector_name="datacollector"
35 )
36
37 model_params = {
38 "num_agents": UserSettableParameter(
39 "slider",
40 "Number of agents",
41 7,
42 2,
43 10,
44 1,
45 description="Choose how many agents to include in the model",
46 ),
47 "num_nodes": UserSettableParameter(
48 "slider",
49 "Number of nodes",
50 10,
51 3,
52 12,
53 1,
54 description="Choose how many nodes to include in the model, with at "
55 "least the same number of agents",
56 ),
57 }
58
59 server = ModularServer(
60 BoltzmannWealthModelNetwork, [grid, chart], "Money Model", model_params
61 )
62 server.port = 8521
63
[end of examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py]
[start of mesa/visualization/modules/NetworkVisualization.py]
1 """
2 Network Visualization Module
3 ============
4
5 Module for rendering the network, using [sigma.js](http://sigmajs.org/) or [d3.js](https://d3js.org/) frameworks.
6
7 """
8 from mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE
9
10
11 class NetworkModule(VisualizationElement):
12 package_includes = []
13
14 def __init__(
15 self, portrayal_method, canvas_height=500, canvas_width=500, library="sigma"
16 ):
17 library_types = ["sigma", "d3"]
18 if library not in library_types:
19 raise ValueError(
20 f"Invalid javascript library type. Expected one of: {library_types}"
21 )
22
23 NetworkModule.package_includes = (
24 ["NetworkModule_sigma.js", "sigma.min.js"]
25 if library == "sigma"
26 else ["NetworkModule_d3.js", D3_JS_FILE]
27 )
28
29 self.portrayal_method = portrayal_method
30 self.canvas_height = canvas_height
31 self.canvas_width = canvas_width
32 new_element = f"new NetworkModule({self.canvas_width}, {self.canvas_height})"
33 self.js_code = "elements.push(" + new_element + ");"
34
35 def render(self, model):
36 return self.portrayal_method(model.G)
37
[end of mesa/visualization/modules/NetworkVisualization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py
--- a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py
+++ b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py
@@ -29,7 +29,7 @@
return portrayal
-grid = NetworkModule(network_portrayal, 500, 500, library="sigma")
+grid = NetworkModule(network_portrayal, 500, 500)
chart = ChartModule(
[{"Label": "Gini", "Color": "Black"}], data_collector_name="datacollector"
)
diff --git a/examples/virus_on_network/virus_on_network/server.py b/examples/virus_on_network/virus_on_network/server.py
--- a/examples/virus_on_network/virus_on_network/server.py
+++ b/examples/virus_on_network/virus_on_network/server.py
@@ -52,7 +52,7 @@
return portrayal
-network = NetworkModule(network_portrayal, 500, 500, library="d3")
+network = NetworkModule(network_portrayal, 500, 500)
chart = ChartModule(
[
{"Label": "Infected", "Color": "#FF0000"},
diff --git a/mesa/visualization/modules/NetworkVisualization.py b/mesa/visualization/modules/NetworkVisualization.py
--- a/mesa/visualization/modules/NetworkVisualization.py
+++ b/mesa/visualization/modules/NetworkVisualization.py
@@ -2,7 +2,7 @@
Network Visualization Module
============
-Module for rendering the network, using [sigma.js](http://sigmajs.org/) or [d3.js](https://d3js.org/) frameworks.
+Module for rendering the network, using [d3.js](https://d3js.org/) framework.
"""
from mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE
@@ -12,19 +12,12 @@
package_includes = []
def __init__(
- self, portrayal_method, canvas_height=500, canvas_width=500, library="sigma"
+ self,
+ portrayal_method,
+ canvas_height=500,
+ canvas_width=500,
):
- library_types = ["sigma", "d3"]
- if library not in library_types:
- raise ValueError(
- f"Invalid javascript library type. Expected one of: {library_types}"
- )
-
- NetworkModule.package_includes = (
- ["NetworkModule_sigma.js", "sigma.min.js"]
- if library == "sigma"
- else ["NetworkModule_d3.js", D3_JS_FILE]
- )
+ NetworkModule.package_includes = ["NetworkModule_d3.js", D3_JS_FILE]
self.portrayal_method = portrayal_method
self.canvas_height = canvas_height
| {"golden_diff": "diff --git a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py\n--- a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py\n+++ b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py\n@@ -29,7 +29,7 @@\n return portrayal\n \n \n-grid = NetworkModule(network_portrayal, 500, 500, library=\"sigma\")\n+grid = NetworkModule(network_portrayal, 500, 500)\n chart = ChartModule(\n [{\"Label\": \"Gini\", \"Color\": \"Black\"}], data_collector_name=\"datacollector\"\n )\ndiff --git a/examples/virus_on_network/virus_on_network/server.py b/examples/virus_on_network/virus_on_network/server.py\n--- a/examples/virus_on_network/virus_on_network/server.py\n+++ b/examples/virus_on_network/virus_on_network/server.py\n@@ -52,7 +52,7 @@\n return portrayal\n \n \n-network = NetworkModule(network_portrayal, 500, 500, library=\"d3\")\n+network = NetworkModule(network_portrayal, 500, 500)\n chart = ChartModule(\n [\n {\"Label\": \"Infected\", \"Color\": \"#FF0000\"},\ndiff --git a/mesa/visualization/modules/NetworkVisualization.py b/mesa/visualization/modules/NetworkVisualization.py\n--- a/mesa/visualization/modules/NetworkVisualization.py\n+++ b/mesa/visualization/modules/NetworkVisualization.py\n@@ -2,7 +2,7 @@\n Network Visualization Module\n ============\n \n-Module for rendering the network, using [sigma.js](http://sigmajs.org/) or [d3.js](https://d3js.org/) frameworks.\n+Module for rendering the network, using [d3.js](https://d3js.org/) framework.\n \n \"\"\"\n from mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE\n@@ -12,19 +12,12 @@\n package_includes = []\n \n def __init__(\n- self, portrayal_method, canvas_height=500, canvas_width=500, library=\"sigma\"\n+ self,\n+ portrayal_method,\n+ canvas_height=500,\n+ canvas_width=500,\n ):\n- library_types = [\"sigma\", \"d3\"]\n- if library not in library_types:\n- raise ValueError(\n- f\"Invalid javascript library type. Expected one of: {library_types}\"\n- )\n-\n- NetworkModule.package_includes = (\n- [\"NetworkModule_sigma.js\", \"sigma.min.js\"]\n- if library == \"sigma\"\n- else [\"NetworkModule_d3.js\", D3_JS_FILE]\n- )\n+ NetworkModule.package_includes = [\"NetworkModule_d3.js\", D3_JS_FILE]\n \n self.portrayal_method = portrayal_method\n self.canvas_height = canvas_height\n", "issue": "Remove Sigma.js backend for network visualization\nReading #388, I conclude that Sigma.js is less featureful than D3.js. We should just stick to polishing the D3.js version.\n", "before_files": [{"content": "import math\n\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.visualization.modules import NetworkModule\nfrom mesa.visualization.modules import TextElement\nfrom .model import VirusOnNetwork, State, number_infected\n\n\ndef network_portrayal(G):\n # The model ensures there is always 1 agent per node\n\n def node_color(agent):\n return {State.INFECTED: \"#FF0000\", State.SUSCEPTIBLE: \"#008000\"}.get(\n agent.state, \"#808080\"\n )\n\n def edge_color(agent1, agent2):\n if State.RESISTANT in (agent1.state, agent2.state):\n return \"#000000\"\n return \"#e8e8e8\"\n\n def edge_width(agent1, agent2):\n if State.RESISTANT in (agent1.state, agent2.state):\n return 3\n return 2\n\n def get_agents(source, target):\n return G.nodes[source][\"agent\"][0], G.nodes[target][\"agent\"][0]\n\n portrayal = dict()\n portrayal[\"nodes\"] = [\n {\n \"size\": 6,\n \"color\": node_color(agents[0]),\n \"tooltip\": f\"id: {agents[0].unique_id}<br>state: {agents[0].state.name}\",\n }\n for (_, agents) in G.nodes.data(\"agent\")\n ]\n\n portrayal[\"edges\"] = [\n {\n \"source\": source,\n \"target\": target,\n \"color\": edge_color(*get_agents(source, target)),\n \"width\": edge_width(*get_agents(source, target)),\n }\n for (source, target) in G.edges\n ]\n\n return portrayal\n\n\nnetwork = NetworkModule(network_portrayal, 500, 500, library=\"d3\")\nchart = ChartModule(\n [\n {\"Label\": \"Infected\", \"Color\": \"#FF0000\"},\n {\"Label\": \"Susceptible\", \"Color\": \"#008000\"},\n {\"Label\": \"Resistant\", \"Color\": \"#808080\"},\n ]\n)\n\n\nclass MyTextElement(TextElement):\n def render(self, model):\n ratio = model.resistant_susceptible_ratio()\n ratio_text = \"∞\" if ratio is math.inf else f\"{ratio:.2f}\"\n infected_text = str(number_infected(model))\n\n return \"Resistant/Susceptible Ratio: {}<br>Infected Remaining: {}\".format(\n ratio_text, infected_text\n )\n\n\nmodel_params = {\n \"num_nodes\": UserSettableParameter(\n \"slider\",\n \"Number of agents\",\n 10,\n 10,\n 100,\n 1,\n description=\"Choose how many agents to include in the model\",\n ),\n \"avg_node_degree\": UserSettableParameter(\n \"slider\", \"Avg Node Degree\", 3, 3, 8, 1, description=\"Avg Node Degree\"\n ),\n \"initial_outbreak_size\": UserSettableParameter(\n \"slider\",\n \"Initial Outbreak Size\",\n 1,\n 1,\n 10,\n 1,\n description=\"Initial Outbreak Size\",\n ),\n \"virus_spread_chance\": UserSettableParameter(\n \"slider\",\n \"Virus Spread Chance\",\n 0.4,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that susceptible neighbor will be infected\",\n ),\n \"virus_check_frequency\": UserSettableParameter(\n \"slider\",\n \"Virus Check Frequency\",\n 0.4,\n 0.0,\n 1.0,\n 0.1,\n description=\"Frequency the nodes check whether they are infected by \" \"a virus\",\n ),\n \"recovery_chance\": UserSettableParameter(\n \"slider\",\n \"Recovery Chance\",\n 0.3,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that the virus will be removed\",\n ),\n \"gain_resistance_chance\": UserSettableParameter(\n \"slider\",\n \"Gain Resistance Chance\",\n 0.5,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that a recovered agent will become \"\n \"resistant to this virus in the future\",\n ),\n}\n\nserver = ModularServer(\n VirusOnNetwork, [network, MyTextElement(), chart], \"Virus Model\", model_params\n)\nserver.port = 8521\n", "path": "examples/virus_on_network/virus_on_network/server.py"}, {"content": "from mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.visualization.modules import NetworkModule\nfrom .model import BoltzmannWealthModelNetwork\n\n\ndef network_portrayal(G):\n # The model ensures there is 0 or 1 agent per node\n\n portrayal = dict()\n portrayal[\"nodes\"] = [\n {\n \"id\": node_id,\n \"size\": 3 if agents else 1,\n \"color\": \"#CC0000\" if not agents or agents[0].wealth == 0 else \"#007959\",\n \"label\": None\n if not agents\n else f\"Agent:{agents[0].unique_id} Wealth:{agents[0].wealth}\",\n }\n for (node_id, agents) in G.nodes.data(\"agent\")\n ]\n\n portrayal[\"edges\"] = [\n {\"id\": edge_id, \"source\": source, \"target\": target, \"color\": \"#000000\"}\n for edge_id, (source, target) in enumerate(G.edges)\n ]\n\n return portrayal\n\n\ngrid = NetworkModule(network_portrayal, 500, 500, library=\"sigma\")\nchart = ChartModule(\n [{\"Label\": \"Gini\", \"Color\": \"Black\"}], data_collector_name=\"datacollector\"\n)\n\nmodel_params = {\n \"num_agents\": UserSettableParameter(\n \"slider\",\n \"Number of agents\",\n 7,\n 2,\n 10,\n 1,\n description=\"Choose how many agents to include in the model\",\n ),\n \"num_nodes\": UserSettableParameter(\n \"slider\",\n \"Number of nodes\",\n 10,\n 3,\n 12,\n 1,\n description=\"Choose how many nodes to include in the model, with at \"\n \"least the same number of agents\",\n ),\n}\n\nserver = ModularServer(\n BoltzmannWealthModelNetwork, [grid, chart], \"Money Model\", model_params\n)\nserver.port = 8521\n", "path": "examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py"}, {"content": "\"\"\"\nNetwork Visualization Module\n============\n\nModule for rendering the network, using [sigma.js](http://sigmajs.org/) or [d3.js](https://d3js.org/) frameworks.\n\n\"\"\"\nfrom mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE\n\n\nclass NetworkModule(VisualizationElement):\n package_includes = []\n\n def __init__(\n self, portrayal_method, canvas_height=500, canvas_width=500, library=\"sigma\"\n ):\n library_types = [\"sigma\", \"d3\"]\n if library not in library_types:\n raise ValueError(\n f\"Invalid javascript library type. Expected one of: {library_types}\"\n )\n\n NetworkModule.package_includes = (\n [\"NetworkModule_sigma.js\", \"sigma.min.js\"]\n if library == \"sigma\"\n else [\"NetworkModule_d3.js\", D3_JS_FILE]\n )\n\n self.portrayal_method = portrayal_method\n self.canvas_height = canvas_height\n self.canvas_width = canvas_width\n new_element = f\"new NetworkModule({self.canvas_width}, {self.canvas_height})\"\n self.js_code = \"elements.push(\" + new_element + \");\"\n\n def render(self, model):\n return self.portrayal_method(model.G)\n", "path": "mesa/visualization/modules/NetworkVisualization.py"}]} | 2,908 | 671 |
gh_patches_debug_25965 | rasdani/github-patches | git_diff | facebookresearch__fairseq-4808 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()`
This is a no-op in eager and in ONNX export, but it's better for other
tracers if this is preserved as shapes directly instead of converted to
a tensor.
There is a little annoying code duplication with
`torch.jit.is_scripting()`, which is unforunately necessary because we
didn't implement compile-time short circuiting correctly in TorchScript
lol.
</issue>
<code>
[start of fairseq/modules/sinusoidal_positional_embedding.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import math
7 from typing import Any, Optional
8
9 import torch
10 import torch.onnx.operators
11 from fairseq import utils
12 from torch import Tensor, nn
13
14
15 class SinusoidalPositionalEmbedding(nn.Module):
16 """This module produces sinusoidal positional embeddings of any length.
17
18 Padding symbols are ignored.
19 """
20
21 def __init__(self, embedding_dim, padding_idx, init_size=1024):
22 super().__init__()
23 self.embedding_dim = embedding_dim
24 self.padding_idx = padding_idx if padding_idx is not None else 0
25 self.weights = SinusoidalPositionalEmbedding.get_embedding(
26 init_size, embedding_dim, padding_idx
27 )
28 self.onnx_trace = False
29 self.register_buffer("_float_tensor", torch.FloatTensor(1))
30 self.max_positions = int(1e5)
31
32 def prepare_for_onnx_export_(self):
33 self.onnx_trace = True
34
35 @staticmethod
36 def get_embedding(
37 num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
38 ):
39 """Build sinusoidal embeddings.
40
41 This matches the implementation in tensor2tensor, but differs slightly
42 from the description in Section 3.5 of "Attention Is All You Need".
43 """
44 half_dim = embedding_dim // 2
45 emb = math.log(10000) / (half_dim - 1)
46 emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
47 emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
48 1
49 ) * emb.unsqueeze(0)
50 emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
51 num_embeddings, -1
52 )
53 if embedding_dim % 2 == 1:
54 # zero pad
55 emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
56 if padding_idx is not None:
57 emb[padding_idx, :] = 0
58 return emb
59
60 def forward(
61 self,
62 input,
63 incremental_state: Optional[Any] = None,
64 timestep: Optional[Tensor] = None,
65 positions: Optional[Any] = None,
66 ):
67 """Input is expected to be of size [bsz x seqlen]."""
68 if torch.jit.is_scripting():
69 bspair = torch.onnx.operators.shape_as_tensor(input)
70 elif torch.onnx.is_in_onnx_export():
71 bspair = torch.onnx.operators.shape_as_tensor(input)
72 else:
73 bspair = input.size()
74 bsz, seq_len = bspair[0], bspair[1]
75 max_pos = self.padding_idx + 1 + seq_len
76 if self.weights is None or max_pos > self.weights.size(0):
77 # recompute/expand embeddings if needed
78 self.weights = SinusoidalPositionalEmbedding.get_embedding(
79 max_pos, self.embedding_dim, self.padding_idx
80 )
81 self.weights = self.weights.to(self._float_tensor)
82
83 if incremental_state is not None:
84 # positions is the same for every token when decoding a single step
85 pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
86 if self.onnx_trace:
87 return (
88 self.weights.index_select(index=self.padding_idx + pos, dim=0)
89 .unsqueeze(1)
90 .repeat(bsz, 1, 1)
91 )
92 return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
93
94 positions = utils.make_positions(
95 input, self.padding_idx, onnx_trace=self.onnx_trace
96 )
97 if self.onnx_trace:
98 flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
99 embedding_shape = torch.cat(
100 (bsz, seq_len, torch.tensor([-1], dtype=torch.long))
101 )
102 embeddings = torch.onnx.operators.reshape_from_tensor_shape(
103 flat_embeddings, embedding_shape
104 )
105 return embeddings
106 return (
107 self.weights.index_select(0, positions.view(-1))
108 .view(bsz, seq_len, -1)
109 .detach()
110 )
111
[end of fairseq/modules/sinusoidal_positional_embedding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py
--- a/fairseq/modules/sinusoidal_positional_embedding.py
+++ b/fairseq/modules/sinusoidal_positional_embedding.py
@@ -65,12 +65,7 @@
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
- if torch.jit.is_scripting():
- bspair = torch.onnx.operators.shape_as_tensor(input)
- elif torch.onnx.is_in_onnx_export():
- bspair = torch.onnx.operators.shape_as_tensor(input)
- else:
- bspair = input.size()
+ bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
@@ -97,7 +92,7 @@
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
- (bsz, seq_len, torch.tensor([-1], dtype=torch.long))
+ (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
| {"golden_diff": "diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py\n--- a/fairseq/modules/sinusoidal_positional_embedding.py\n+++ b/fairseq/modules/sinusoidal_positional_embedding.py\n@@ -65,12 +65,7 @@\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n- if torch.jit.is_scripting():\n- bspair = torch.onnx.operators.shape_as_tensor(input)\n- elif torch.onnx.is_in_onnx_export():\n- bspair = torch.onnx.operators.shape_as_tensor(input)\n- else:\n- bspair = input.size()\n+ bspair = torch.onnx.operators.shape_as_tensor(input)\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n@@ -97,7 +92,7 @@\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n- (bsz, seq_len, torch.tensor([-1], dtype=torch.long))\n+ (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n", "issue": "[fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()`\nThis is a no-op in eager and in ONNX export, but it's better for other\ntracers if this is preserved as shapes directly instead of converted to\na tensor.\n\nThere is a little annoying code duplication with\n`torch.jit.is_scripting()`, which is unforunately necessary because we\ndidn't implement compile-time short circuiting correctly in TorchScript\nlol.\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Optional\n\nimport torch\nimport torch.onnx.operators\nfrom fairseq import utils\nfrom torch import Tensor, nn\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx if padding_idx is not None else 0\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size, embedding_dim, padding_idx\n )\n self.onnx_trace = False\n self.register_buffer(\"_float_tensor\", torch.FloatTensor(1))\n self.max_positions = int(1e5)\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(\n num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None\n ):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(\n 1\n ) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(\n num_embeddings, -1\n )\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(\n self,\n input,\n incremental_state: Optional[Any] = None,\n timestep: Optional[Tensor] = None,\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n if torch.jit.is_scripting():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n elif torch.onnx.is_in_onnx_export():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n else:\n bspair = input.size()\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos, self.embedding_dim, self.padding_idx\n )\n self.weights = self.weights.to(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len\n if self.onnx_trace:\n return (\n self.weights.index_select(index=self.padding_idx + pos, dim=0)\n .unsqueeze(1)\n .repeat(bsz, 1, 1)\n )\n return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)\n\n positions = utils.make_positions(\n input, self.padding_idx, onnx_trace=self.onnx_trace\n )\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n (bsz, seq_len, torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n )\n return embeddings\n return (\n self.weights.index_select(0, positions.view(-1))\n .view(bsz, seq_len, -1)\n .detach()\n )\n", "path": "fairseq/modules/sinusoidal_positional_embedding.py"}]} | 1,818 | 338 |
gh_patches_debug_14196 | rasdani/github-patches | git_diff | mozilla__pontoon-2826 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pretranslation issues with multiline strings
Pretranslation struggles to translate Fluent strings where line breaks are used to limit the line width, as it treats each line as a separate sentence.
```
rec-pw-1-2 =
Make this password unique and different from any others you use.
A good strategy to follow is to combine two or more unrelated
words to create an entire pass phrase, and include numbers and symbols.
```
I don't think we can completely ignore line breaks, because there are some cases where these have an effect on display ([example](https://searchfox.org/mozilla-central/rev/169bf38e150667afac81ab73ef8b5ace8f1dfa8d/browser/locales/en-US/browser/downloads.ftl#244-248)). But maybe we can strip line breaks (i.e. replace them with a whitespace) when sending the translation to the machine translation engine, hoping that translators will catch edge cases when reviewing?
Cc @eemeli because that's a topic that we discussed at some point for the Fluent editor.
</issue>
<code>
[start of pontoon/pretranslation/pretranslate.py]
1 import logging
2 import operator
3 import re
4
5 from django.db.models import CharField, Value as V
6 from django.db.models.functions import Concat
7
8 from fluent.syntax import FluentParser, FluentSerializer
9 from functools import reduce
10
11 from pontoon.base.models import User, TranslatedResource
12 from pontoon.base.fluent import FlatTransformer, create_locale_plural_variants
13 from pontoon.machinery.utils import (
14 get_google_translate_data,
15 get_translation_memory_data,
16 )
17
18
19 log = logging.getLogger(__name__)
20
21 parser = FluentParser()
22 serializer = FluentSerializer()
23
24
25 class PretranslationTransformer(FlatTransformer):
26 def __init__(self, locale):
27 self.services = []
28 self.locale = locale
29
30 def visit_SelectExpression(self, node):
31 create_locale_plural_variants(node, self.locale)
32 return self.generic_visit(node)
33
34 def visit_TextElement(self, node):
35 pretranslation, service = get_pretranslated_data(node.value, self.locale)
36
37 if pretranslation is None:
38 raise ValueError(
39 f"Pretranslation for `{node.value}` to {self.locale.code} not available."
40 )
41
42 node.value = pretranslation
43 self.services.append(service)
44 return node
45
46
47 def get_pretranslations(entity, locale):
48 """
49 Get pretranslations for the entity-locale pair using internal translation memory and
50 Google's machine translation.
51
52 For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements
53 and then only pretranslate TextElements. Set the most frequent TextElement
54 pretranslation author as the author of the entire pretranslation.
55
56 :arg Entity entity: the Entity object
57 :arg Locale locale: the Locale object
58
59 :returns: a list of tuples, consisting of:
60 - a pretranslation of the entity
61 - a plural form
62 - a user (representing TM or GT service)
63 """
64 source = entity.string
65 services = {
66 "tm": User.objects.get(email="[email protected]"),
67 "gt": User.objects.get(email="[email protected]"),
68 }
69
70 if entity.resource.format == "ftl":
71 source_ast = parser.parse_entry(source)
72 pt_transformer = PretranslationTransformer(locale)
73
74 try:
75 pretranslated_ast = pt_transformer.visit(source_ast)
76 except ValueError as e:
77 log.info(f"Fluent pretranslation error: {e}")
78 return []
79
80 pretranslation = serializer.serialize_entry(pretranslated_ast)
81
82 authors = [services[service] for service in pt_transformer.services]
83 author = max(set(authors), key=authors.count) if authors else services["tm"]
84
85 return [(pretranslation, None, author)]
86
87 else:
88 pretranslation, service = get_pretranslated_data(source, locale)
89
90 if pretranslation is None:
91 return []
92
93 author = services[service]
94 if entity.string_plural == "":
95 return [(pretranslation, None, author)]
96 else:
97 plural_forms = range(0, locale.nplurals or 1)
98 return [
99 (pretranslation, plural_form, author) for plural_form in plural_forms
100 ]
101
102
103 def get_pretranslated_data(source, locale):
104 # Empty strings do not need translation
105 if re.search("^\\s*$", source):
106 return source, "tm"
107
108 # Try to get matches from Translation Memory
109 tm_response = get_translation_memory_data(text=source, locale=locale)
110 tm_perfect = [t for t in tm_response if int(t["quality"]) == 100]
111 if tm_perfect:
112 return tm_perfect[0]["target"], "tm"
113
114 # Fetch from Google Translate
115 elif locale.google_translate_code:
116 gt_response = get_google_translate_data(text=source, locale=locale)
117 if gt_response["status"]:
118 return gt_response["translation"], "gt"
119
120 return None, None
121
122
123 def update_changed_instances(tr_filter, tr_dict, translations):
124 """
125 Update the latest activity and stats for changed Locales, ProjectLocales
126 & TranslatedResources
127 """
128 tr_filter = tuple(tr_filter)
129 # Combine all generated filters with an OK operator.
130 # `operator.ior` is the '|' Python operator, which turns into a logical OR
131 # when used between django ORM query objects.
132 tr_query = reduce(operator.ior, tr_filter)
133
134 translatedresources = TranslatedResource.objects.filter(tr_query).annotate(
135 locale_resource=Concat(
136 "locale_id", V("-"), "resource_id", output_field=CharField()
137 )
138 )
139
140 translatedresources.update_stats()
141
142 for tr in translatedresources:
143 index = tr_dict[tr.locale_resource]
144 translation = translations[index]
145 translation.update_latest_translation()
146
[end of pontoon/pretranslation/pretranslate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py
--- a/pontoon/pretranslation/pretranslate.py
+++ b/pontoon/pretranslation/pretranslate.py
@@ -32,11 +32,15 @@
return self.generic_visit(node)
def visit_TextElement(self, node):
- pretranslation, service = get_pretranslated_data(node.value, self.locale)
+ # Machine translation treats each line as separate sentence,
+ # hence we replace newline characters with spaces.
+ source = node.value.replace("\n", " ")
+
+ pretranslation, service = get_pretranslated_data(source, self.locale)
if pretranslation is None:
raise ValueError(
- f"Pretranslation for `{node.value}` to {self.locale.code} not available."
+ f"Pretranslation for `{source}` to {self.locale.code} not available."
)
node.value = pretranslation
| {"golden_diff": "diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py\n--- a/pontoon/pretranslation/pretranslate.py\n+++ b/pontoon/pretranslation/pretranslate.py\n@@ -32,11 +32,15 @@\n return self.generic_visit(node)\n \n def visit_TextElement(self, node):\n- pretranslation, service = get_pretranslated_data(node.value, self.locale)\n+ # Machine translation treats each line as separate sentence,\n+ # hence we replace newline characters with spaces.\n+ source = node.value.replace(\"\\n\", \" \")\n+\n+ pretranslation, service = get_pretranslated_data(source, self.locale)\n \n if pretranslation is None:\n raise ValueError(\n- f\"Pretranslation for `{node.value}` to {self.locale.code} not available.\"\n+ f\"Pretranslation for `{source}` to {self.locale.code} not available.\"\n )\n \n node.value = pretranslation\n", "issue": "Pretranslation issues with multiline strings\nPretranslation struggles to translate Fluent strings where line breaks are used to limit the line width, as it treats each line as a separate sentence.\r\n\r\n```\r\nrec-pw-1-2 =\r\n Make this password unique and different from any others you use.\r\n A good strategy to follow is to combine two or more unrelated\r\n words to create an entire pass phrase, and include numbers and symbols.\r\n```\r\n\r\nI don't think we can completely ignore line breaks, because there are some cases where these have an effect on display ([example](https://searchfox.org/mozilla-central/rev/169bf38e150667afac81ab73ef8b5ace8f1dfa8d/browser/locales/en-US/browser/downloads.ftl#244-248)). But maybe we can strip line breaks (i.e. replace them with a whitespace) when sending the translation to the machine translation engine, hoping that translators will catch edge cases when reviewing?\r\n\r\nCc @eemeli because that's a topic that we discussed at some point for the Fluent editor.\n", "before_files": [{"content": "import logging\nimport operator\nimport re\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom fluent.syntax import FluentParser, FluentSerializer\nfrom functools import reduce\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.base.fluent import FlatTransformer, create_locale_plural_variants\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\n\n\nlog = logging.getLogger(__name__)\n\nparser = FluentParser()\nserializer = FluentSerializer()\n\n\nclass PretranslationTransformer(FlatTransformer):\n def __init__(self, locale):\n self.services = []\n self.locale = locale\n\n def visit_SelectExpression(self, node):\n create_locale_plural_variants(node, self.locale)\n return self.generic_visit(node)\n\n def visit_TextElement(self, node):\n pretranslation, service = get_pretranslated_data(node.value, self.locale)\n\n if pretranslation is None:\n raise ValueError(\n f\"Pretranslation for `{node.value}` to {self.locale.code} not available.\"\n )\n\n node.value = pretranslation\n self.services.append(service)\n return node\n\n\ndef get_pretranslations(entity, locale):\n \"\"\"\n Get pretranslations for the entity-locale pair using internal translation memory and\n Google's machine translation.\n\n For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements\n and then only pretranslate TextElements. Set the most frequent TextElement\n pretranslation author as the author of the entire pretranslation.\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n\n :returns: a list of tuples, consisting of:\n - a pretranslation of the entity\n - a plural form\n - a user (representing TM or GT service)\n \"\"\"\n source = entity.string\n services = {\n \"tm\": User.objects.get(email=\"[email protected]\"),\n \"gt\": User.objects.get(email=\"[email protected]\"),\n }\n\n if entity.resource.format == \"ftl\":\n source_ast = parser.parse_entry(source)\n pt_transformer = PretranslationTransformer(locale)\n\n try:\n pretranslated_ast = pt_transformer.visit(source_ast)\n except ValueError as e:\n log.info(f\"Fluent pretranslation error: {e}\")\n return []\n\n pretranslation = serializer.serialize_entry(pretranslated_ast)\n\n authors = [services[service] for service in pt_transformer.services]\n author = max(set(authors), key=authors.count) if authors else services[\"tm\"]\n\n return [(pretranslation, None, author)]\n\n else:\n pretranslation, service = get_pretranslated_data(source, locale)\n\n if pretranslation is None:\n return []\n\n author = services[service]\n if entity.string_plural == \"\":\n return [(pretranslation, None, author)]\n else:\n plural_forms = range(0, locale.nplurals or 1)\n return [\n (pretranslation, plural_form, author) for plural_form in plural_forms\n ]\n\n\ndef get_pretranslated_data(source, locale):\n # Empty strings do not need translation\n if re.search(\"^\\\\s*$\", source):\n return source, \"tm\"\n\n # Try to get matches from Translation Memory\n tm_response = get_translation_memory_data(text=source, locale=locale)\n tm_perfect = [t for t in tm_response if int(t[\"quality\"]) == 100]\n if tm_perfect:\n return tm_perfect[0][\"target\"], \"tm\"\n\n # Fetch from Google Translate\n elif locale.google_translate_code:\n gt_response = get_google_translate_data(text=source, locale=locale)\n if gt_response[\"status\"]:\n return gt_response[\"translation\"], \"gt\"\n\n return None, None\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}]} | 2,111 | 205 |
gh_patches_debug_7431 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3501 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow TinyMCE to be used in inline-mode
Inline-mode of TinyMCE (https://www.tiny.cloud/docs/demo/inline) is nice among other reasons because the edited text looks exactly the way as the saved result.
To enable it you could simply set `{"inline": "true"}` as the value in the field "Other settings" of the TinyMCE controlpanel. That is enough to trigger it.
But that fails with `Could not initialize inline editor on invalid inline target element` since the html-tag that renders the TinyMCE is a `<textarea>`. See https://www.tiny.cloud/blog/tinymce-inline-option-not-working for details about that constraint.
The `<textarea>` thing is defined in `plone.app.z3cform.widget.RichTextWidget` which has `TextareaWidget` as `_base`. In `render_input_mode` of the widget it is possible to conditionally render a different html element like `div` around the text. But then fallback to textarea no longer works and saving also fails and and and...
So it seems that using inline-mode in not straightforward. The question is do we like that option enough to do the required work?
</issue>
<code>
[start of Products/CMFPlone/patterns/settings.py]
1 from Acquisition import aq_inner
2 from Acquisition import aq_parent
3 from borg.localrole.interfaces import IFactoryTempFolder
4 from plone.app.content.browser.interfaces import IFolderContentsView
5 from plone.app.widgets.utils import get_relateditems_options
6 from plone.app.z3cform.utils import call_callables
7 from plone.registry.interfaces import IRegistry
8 from plone.uuid.interfaces import IUUID
9 from Products.CMFCore.interfaces._content import IFolderish
10 from plone.base.interfaces import ILinkSchema
11 from plone.base.interfaces import IPatternsSettings
12 from plone.base.interfaces import IPloneSiteRoot
13 from Products.CMFPlone.patterns.tinymce import TinyMCESettingsGenerator
14 from Products.CMFPlone.utils import get_portal
15 from zope.component import getUtility
16 from zope.i18n import translate
17 from zope.interface import implementer
18 from zope.schema.interfaces import IVocabularyFactory
19
20 import json
21
22
23 @implementer(IPatternsSettings)
24 class PatternSettingsAdapter:
25 """
26 Provides default plone settings relevant for patterns.
27 """
28
29 def __init__(self, context, request, field):
30 self.request = request
31 self.context = context
32 self.field = field
33
34 def __call__(self):
35 data = {}
36 data.update(self.mark_special_links())
37 data.update(self.structure_updater())
38 return data
39
40 def structure_updater(self):
41 """Generate the options for the structure updater pattern.
42 If we're not in folder contents view, do not expose these options.
43 """
44 data = {}
45 view = self.request.get("PUBLISHED", None)
46 if IFolderContentsView.providedBy(view):
47 data = {
48 "data-pat-structureupdater": json.dumps(
49 {
50 "titleSelector": ".documentFirstHeading",
51 "descriptionSelector": ".documentDescription",
52 }
53 )
54 }
55 return data
56
57 def mark_special_links(self):
58 result = {}
59
60 registry = getUtility(IRegistry)
61 settings = registry.forInterface(ILinkSchema, prefix="plone", check=False)
62
63 msl = settings.mark_special_links
64 elonw = settings.external_links_open_new_window
65 if msl or elonw:
66 result = {
67 "data-pat-markspeciallinks": json.dumps(
68 {"external_links_open_new_window": elonw, "mark_special_links": msl}
69 )
70 }
71 return result
72
73 @property
74 def image_scales(self):
75 factory = getUtility(IVocabularyFactory, "plone.app.vocabularies.ImagesScales")
76 vocabulary = factory(self.context)
77 ret = [{"title": translate(it.title), "value": it.value} for it in vocabulary]
78 ret = sorted(ret, key=lambda it: it["title"])
79 return json.dumps(ret)
80
81 def tinymce(self):
82 """
83 data-pat-tinymce : JSON.stringify({
84 relatedItems: {
85 vocabularyUrl: config.portal_url +
86 '/@@getVocabulary?name=plone.app.vocabularies.Catalog'
87 },
88 tiny: config,
89 prependToUrl: 'resolveuid/',
90 linkAttribute: 'UID',
91 prependToScalePart: '/@@images/image/'
92 })
93 """
94
95 generator = TinyMCESettingsGenerator(self.context, self.request)
96 settings = generator.settings
97 folder = aq_inner(self.context)
98
99 # Test if we are currently creating an Archetype object
100 if IFactoryTempFolder.providedBy(aq_parent(folder)):
101 folder = aq_parent(aq_parent(aq_parent(folder)))
102 if not IFolderish.providedBy(folder):
103 folder = aq_parent(folder)
104
105 if IPloneSiteRoot.providedBy(folder):
106 initial = None
107 else:
108 initial = IUUID(folder, None)
109
110 portal = get_portal()
111 portal_url = portal.absolute_url()
112 current_path = folder.absolute_url()[len(portal_url) :]
113
114 image_types = settings.image_objects or []
115
116 server_url = self.request.get("SERVER_URL", "")
117 site_path = portal_url[len(server_url) :]
118
119 related_items_config = get_relateditems_options(
120 context=self.context,
121 value=None,
122 separator=";",
123 vocabulary_name="plone.app.vocabularies.Catalog",
124 vocabulary_view="@@getVocabulary",
125 field_name=None,
126 )
127 related_items_config = call_callables(related_items_config, self.context)
128
129 configuration = {
130 "base_url": self.context.absolute_url(),
131 "imageTypes": image_types,
132 "imageScales": self.image_scales,
133 "linkAttribute": "UID",
134 # This is for loading the languages on tinymce
135 "loadingBaseUrl": "{}/++plone++static/components/tinymce-builded/"
136 "js/tinymce".format(portal_url),
137 "relatedItems": related_items_config,
138 "prependToScalePart": "/@@images/image/",
139 "prependToUrl": "{}/resolveuid/".format(site_path.rstrip("/")),
140 "tiny": generator.get_tiny_config(),
141 "upload": {
142 "baseUrl": portal_url,
143 "currentPath": current_path,
144 "initialFolder": initial,
145 "maxFiles": 1,
146 "relativePath": "@@fileUpload",
147 "showTitle": False,
148 "uploadMultiple": False,
149 },
150 }
151 return {"data-pat-tinymce": json.dumps(configuration)}
152
[end of Products/CMFPlone/patterns/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Products/CMFPlone/patterns/settings.py b/Products/CMFPlone/patterns/settings.py
--- a/Products/CMFPlone/patterns/settings.py
+++ b/Products/CMFPlone/patterns/settings.py
@@ -137,6 +137,7 @@
"relatedItems": related_items_config,
"prependToScalePart": "/@@images/image/",
"prependToUrl": "{}/resolveuid/".format(site_path.rstrip("/")),
+ "inline": settings.inline,
"tiny": generator.get_tiny_config(),
"upload": {
"baseUrl": portal_url,
| {"golden_diff": "diff --git a/Products/CMFPlone/patterns/settings.py b/Products/CMFPlone/patterns/settings.py\n--- a/Products/CMFPlone/patterns/settings.py\n+++ b/Products/CMFPlone/patterns/settings.py\n@@ -137,6 +137,7 @@\n \"relatedItems\": related_items_config,\n \"prependToScalePart\": \"/@@images/image/\",\n \"prependToUrl\": \"{}/resolveuid/\".format(site_path.rstrip(\"/\")),\n+ \"inline\": settings.inline,\n \"tiny\": generator.get_tiny_config(),\n \"upload\": {\n \"baseUrl\": portal_url,\n", "issue": "Allow TinyMCE to be used in inline-mode\nInline-mode of TinyMCE (https://www.tiny.cloud/docs/demo/inline) is nice among other reasons because the edited text looks exactly the way as the saved result. \r\n\r\nTo enable it you could simply set `{\"inline\": \"true\"}` as the value in the field \"Other settings\" of the TinyMCE controlpanel. That is enough to trigger it. \r\n\r\nBut that fails with `Could not initialize inline editor on invalid inline target element` since the html-tag that renders the TinyMCE is a `<textarea>`. See https://www.tiny.cloud/blog/tinymce-inline-option-not-working for details about that constraint.\r\n\r\nThe `<textarea>` thing is defined in `plone.app.z3cform.widget.RichTextWidget` which has `TextareaWidget` as `_base`. In `render_input_mode` of the widget it is possible to conditionally render a different html element like `div` around the text. But then fallback to textarea no longer works and saving also fails and and and... \r\n\r\nSo it seems that using inline-mode in not straightforward. The question is do we like that option enough to do the required work?\r\n\n", "before_files": [{"content": "from Acquisition import aq_inner\nfrom Acquisition import aq_parent\nfrom borg.localrole.interfaces import IFactoryTempFolder\nfrom plone.app.content.browser.interfaces import IFolderContentsView\nfrom plone.app.widgets.utils import get_relateditems_options\nfrom plone.app.z3cform.utils import call_callables\nfrom plone.registry.interfaces import IRegistry\nfrom plone.uuid.interfaces import IUUID\nfrom Products.CMFCore.interfaces._content import IFolderish\nfrom plone.base.interfaces import ILinkSchema\nfrom plone.base.interfaces import IPatternsSettings\nfrom plone.base.interfaces import IPloneSiteRoot\nfrom Products.CMFPlone.patterns.tinymce import TinyMCESettingsGenerator\nfrom Products.CMFPlone.utils import get_portal\nfrom zope.component import getUtility\nfrom zope.i18n import translate\nfrom zope.interface import implementer\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport json\n\n\n@implementer(IPatternsSettings)\nclass PatternSettingsAdapter:\n \"\"\"\n Provides default plone settings relevant for patterns.\n \"\"\"\n\n def __init__(self, context, request, field):\n self.request = request\n self.context = context\n self.field = field\n\n def __call__(self):\n data = {}\n data.update(self.mark_special_links())\n data.update(self.structure_updater())\n return data\n\n def structure_updater(self):\n \"\"\"Generate the options for the structure updater pattern.\n If we're not in folder contents view, do not expose these options.\n \"\"\"\n data = {}\n view = self.request.get(\"PUBLISHED\", None)\n if IFolderContentsView.providedBy(view):\n data = {\n \"data-pat-structureupdater\": json.dumps(\n {\n \"titleSelector\": \".documentFirstHeading\",\n \"descriptionSelector\": \".documentDescription\",\n }\n )\n }\n return data\n\n def mark_special_links(self):\n result = {}\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ILinkSchema, prefix=\"plone\", check=False)\n\n msl = settings.mark_special_links\n elonw = settings.external_links_open_new_window\n if msl or elonw:\n result = {\n \"data-pat-markspeciallinks\": json.dumps(\n {\"external_links_open_new_window\": elonw, \"mark_special_links\": msl}\n )\n }\n return result\n\n @property\n def image_scales(self):\n factory = getUtility(IVocabularyFactory, \"plone.app.vocabularies.ImagesScales\")\n vocabulary = factory(self.context)\n ret = [{\"title\": translate(it.title), \"value\": it.value} for it in vocabulary]\n ret = sorted(ret, key=lambda it: it[\"title\"])\n return json.dumps(ret)\n\n def tinymce(self):\n \"\"\"\n data-pat-tinymce : JSON.stringify({\n relatedItems: {\n vocabularyUrl: config.portal_url +\n '/@@getVocabulary?name=plone.app.vocabularies.Catalog'\n },\n tiny: config,\n prependToUrl: 'resolveuid/',\n linkAttribute: 'UID',\n prependToScalePart: '/@@images/image/'\n })\n \"\"\"\n\n generator = TinyMCESettingsGenerator(self.context, self.request)\n settings = generator.settings\n folder = aq_inner(self.context)\n\n # Test if we are currently creating an Archetype object\n if IFactoryTempFolder.providedBy(aq_parent(folder)):\n folder = aq_parent(aq_parent(aq_parent(folder)))\n if not IFolderish.providedBy(folder):\n folder = aq_parent(folder)\n\n if IPloneSiteRoot.providedBy(folder):\n initial = None\n else:\n initial = IUUID(folder, None)\n\n portal = get_portal()\n portal_url = portal.absolute_url()\n current_path = folder.absolute_url()[len(portal_url) :]\n\n image_types = settings.image_objects or []\n\n server_url = self.request.get(\"SERVER_URL\", \"\")\n site_path = portal_url[len(server_url) :]\n\n related_items_config = get_relateditems_options(\n context=self.context,\n value=None,\n separator=\";\",\n vocabulary_name=\"plone.app.vocabularies.Catalog\",\n vocabulary_view=\"@@getVocabulary\",\n field_name=None,\n )\n related_items_config = call_callables(related_items_config, self.context)\n\n configuration = {\n \"base_url\": self.context.absolute_url(),\n \"imageTypes\": image_types,\n \"imageScales\": self.image_scales,\n \"linkAttribute\": \"UID\",\n # This is for loading the languages on tinymce\n \"loadingBaseUrl\": \"{}/++plone++static/components/tinymce-builded/\"\n \"js/tinymce\".format(portal_url),\n \"relatedItems\": related_items_config,\n \"prependToScalePart\": \"/@@images/image/\",\n \"prependToUrl\": \"{}/resolveuid/\".format(site_path.rstrip(\"/\")),\n \"tiny\": generator.get_tiny_config(),\n \"upload\": {\n \"baseUrl\": portal_url,\n \"currentPath\": current_path,\n \"initialFolder\": initial,\n \"maxFiles\": 1,\n \"relativePath\": \"@@fileUpload\",\n \"showTitle\": False,\n \"uploadMultiple\": False,\n },\n }\n return {\"data-pat-tinymce\": json.dumps(configuration)}\n", "path": "Products/CMFPlone/patterns/settings.py"}]} | 2,295 | 143 |
gh_patches_debug_19528 | rasdani/github-patches | git_diff | akvo__akvo-rsr-4450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Indicator target and disaggregation targets bugs
</issue>
<code>
[start of akvo/rest/serializers/indicator.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from akvo.rest.serializers.indicator_period import (
8 IndicatorPeriodFrameworkSerializer, IndicatorPeriodFrameworkLiteSerializer,
9 IndicatorPeriodFrameworkNotSoLiteSerializer, create_or_update_disaggregation_targets)
10 from akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer
11 from akvo.rest.serializers.indicator_custom_field import IndicatorCustomValueSerializer
12 from akvo.rest.serializers.indicator_reference import IndicatorReferenceSerializer
13 from akvo.rest.serializers.rsr_serializer import BaseRSRSerializer
14 from akvo.rsr.models import (
15 Indicator, IndicatorDimensionName, IndicatorLabel, IndicatorDisaggregationTarget)
16
17 from rest_framework import serializers
18
19
20 def serialize_disaggregation_targets(indicator):
21 return [
22 {
23 'id': t.id,
24 'value': t.value,
25 'dimension_value': t.dimension_value_id,
26 'indicator': indicator.id,
27 }
28 for t in indicator.disaggregation_targets.all()
29 ]
30
31
32 class IndicatorDisaggregationTargetNestedSerializer(BaseRSRSerializer):
33 id = serializers.IntegerField()
34
35 class Meta:
36 model = IndicatorDisaggregationTarget
37 fields = ('id', 'value', 'dimension_value', 'indicator')
38 read_only_fields = ('id', 'indicator')
39
40 def to_internal_value(self, data):
41 if 'value' in data:
42 data['value'] = str(data['value']).replace(',', '.')
43 return super().to_internal_value(data)
44
45
46 class LabelListingField(serializers.RelatedField):
47
48 def to_representation(self, labels):
49 if isinstance(labels, IndicatorLabel):
50 value = labels.label_id
51 else:
52 value = list(labels.values_list('label_id', flat=True))
53 return value
54
55 def to_internal_value(self, org_label_ids):
56 indicator = self.root.instance
57 existing_labels = set(indicator.labels.values_list('label_id', flat=True))
58 new_labels = set(org_label_ids) - existing_labels
59 deleted_labels = existing_labels - set(org_label_ids)
60 labels = [IndicatorLabel(indicator=indicator, label_id=org_label_id) for org_label_id in new_labels]
61 IndicatorLabel.objects.bulk_create(labels)
62 if deleted_labels:
63 IndicatorLabel.objects.filter(label_id__in=deleted_labels).delete()
64
65 return indicator.labels.all()
66
67
68 class IndicatorSerializer(BaseRSRSerializer):
69
70 result_unicode = serializers.ReadOnlyField(source='result.__str__')
71 measure_label = serializers.ReadOnlyField(source='iati_measure_unicode')
72 children_aggregate_percentage = serializers.ReadOnlyField()
73 dimension_names = serializers.PrimaryKeyRelatedField(
74 many=True, queryset=IndicatorDimensionName.objects.all())
75 disaggregation_targets = serializers.SerializerMethodField()
76
77 def get_disaggregation_targets(self, obj):
78 return serialize_disaggregation_targets(obj)
79
80 class Meta:
81 model = Indicator
82 exclude = ['enumerators']
83
84 # TODO: add validation for parent_indicator
85
86
87 class IndicatorFrameworkSerializer(BaseRSRSerializer):
88
89 periods = IndicatorPeriodFrameworkSerializer(many=True, required=False, read_only=True)
90 parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')
91 children_aggregate_percentage = serializers.ReadOnlyField()
92 dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)
93 labels = LabelListingField(queryset=IndicatorLabel.objects.all(), required=False)
94 disaggregation_targets = IndicatorDisaggregationTargetNestedSerializer(many=True, required=False)
95
96 class Meta:
97 model = Indicator
98 exclude = ['enumerators']
99
100 def update(self, instance, validated_data):
101 disaggregation_targets = validated_data.pop('disaggregation_targets', [])
102 instance = super().update(instance, validated_data)
103 create_or_update_disaggregation_targets(instance, disaggregation_targets)
104 return instance
105
106 def validate_disaggregation_targets(self, data):
107 for target in data:
108 if 'value' not in target:
109 raise serializers.ValidationError('Disaggregation targets should have a value')
110 if 'dimension_value' not in target:
111 raise serializers.ValidationError(
112 'Disaggregation targets should have "dimension_value"')
113 return data
114
115 def to_internal_value(self, data):
116 if 'target_value' in data:
117 data['target_value'] = str(data['target_value']).replace(',', '.')
118 return super().to_internal_value(data)
119
120
121 class IndicatorFrameworkLiteSerializer(BaseRSRSerializer):
122
123 periods = IndicatorPeriodFrameworkLiteSerializer(many=True, required=False, read_only=True)
124 references = IndicatorReferenceSerializer(many=True, required=False, read_only=True)
125 parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')
126 children_aggregate_percentage = serializers.ReadOnlyField()
127 dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)
128 labels = LabelListingField(read_only=True)
129 disaggregation_targets = serializers.SerializerMethodField()
130 custom_values = IndicatorCustomValueSerializer(many=True, required=False)
131
132 def get_disaggregation_targets(self, obj):
133 return serialize_disaggregation_targets(obj)
134
135 class Meta:
136 model = Indicator
137 exclude = ['enumerators']
138
139
140 class IndicatorFrameworkNotSoLiteSerializer(BaseRSRSerializer):
141
142 periods = IndicatorPeriodFrameworkNotSoLiteSerializer(many=True, required=False, read_only=True)
143 parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')
144 children_aggregate_percentage = serializers.ReadOnlyField()
145 labels = LabelListingField(read_only=True)
146 disaggregation_targets = serializers.SerializerMethodField()
147 dimension_names = serializers.SerializerMethodField()
148
149 def get_disaggregation_targets(self, obj):
150 return serialize_disaggregation_targets(obj)
151
152 def get_dimension_names(self, obj):
153 return [
154 {
155 'id': n.id,
156 'name': n.name,
157 'dimension_values': [{'id': v.id, 'value': v.value} for v in n.dimension_values.all()]
158 }
159 for n in obj.dimension_names.all()
160 ]
161
162 class Meta:
163 model = Indicator
164 fields = (
165 'id',
166 'periods',
167 'parent_indicator',
168 'children_aggregate_percentage',
169 'labels',
170 'title',
171 'type',
172 'measure',
173 'ascending',
174 'description',
175 'baseline_year',
176 'baseline_value',
177 'baseline_comment',
178 'order',
179 'export_to_iati',
180 'result',
181 'disaggregation_targets',
182 'dimension_names',
183 'scores',
184 )
185
[end of akvo/rest/serializers/indicator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/serializers/indicator.py b/akvo/rest/serializers/indicator.py
--- a/akvo/rest/serializers/indicator.py
+++ b/akvo/rest/serializers/indicator.py
@@ -37,8 +37,8 @@
read_only_fields = ('id', 'indicator')
def to_internal_value(self, data):
- if 'value' in data:
- data['value'] = str(data['value']).replace(',', '.')
+ value = data.get('value', None)
+ data['value'] = str(value).replace(',', '.') if value is not None else None
return super().to_internal_value(data)
@@ -112,8 +112,10 @@
return data
def to_internal_value(self, data):
- if 'target_value' in data:
+ if 'target_value' in data and data['target_value'] is not None:
data['target_value'] = str(data['target_value']).replace(',', '.')
+ if 'disaggregation_targets' in data:
+ data['disaggregation_targets'] = [dt for dt in data['disaggregation_targets'] if dt]
return super().to_internal_value(data)
| {"golden_diff": "diff --git a/akvo/rest/serializers/indicator.py b/akvo/rest/serializers/indicator.py\n--- a/akvo/rest/serializers/indicator.py\n+++ b/akvo/rest/serializers/indicator.py\n@@ -37,8 +37,8 @@\n read_only_fields = ('id', 'indicator')\n \n def to_internal_value(self, data):\n- if 'value' in data:\n- data['value'] = str(data['value']).replace(',', '.')\n+ value = data.get('value', None)\n+ data['value'] = str(value).replace(',', '.') if value is not None else None\n return super().to_internal_value(data)\n \n \n@@ -112,8 +112,10 @@\n return data\n \n def to_internal_value(self, data):\n- if 'target_value' in data:\n+ if 'target_value' in data and data['target_value'] is not None:\n data['target_value'] = str(data['target_value']).replace(',', '.')\n+ if 'disaggregation_targets' in data:\n+ data['disaggregation_targets'] = [dt for dt in data['disaggregation_targets'] if dt]\n return super().to_internal_value(data)\n", "issue": "Indicator target and disaggregation targets bugs\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom akvo.rest.serializers.indicator_period import (\n IndicatorPeriodFrameworkSerializer, IndicatorPeriodFrameworkLiteSerializer,\n IndicatorPeriodFrameworkNotSoLiteSerializer, create_or_update_disaggregation_targets)\nfrom akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer\nfrom akvo.rest.serializers.indicator_custom_field import IndicatorCustomValueSerializer\nfrom akvo.rest.serializers.indicator_reference import IndicatorReferenceSerializer\nfrom akvo.rest.serializers.rsr_serializer import BaseRSRSerializer\nfrom akvo.rsr.models import (\n Indicator, IndicatorDimensionName, IndicatorLabel, IndicatorDisaggregationTarget)\n\nfrom rest_framework import serializers\n\n\ndef serialize_disaggregation_targets(indicator):\n return [\n {\n 'id': t.id,\n 'value': t.value,\n 'dimension_value': t.dimension_value_id,\n 'indicator': indicator.id,\n }\n for t in indicator.disaggregation_targets.all()\n ]\n\n\nclass IndicatorDisaggregationTargetNestedSerializer(BaseRSRSerializer):\n id = serializers.IntegerField()\n\n class Meta:\n model = IndicatorDisaggregationTarget\n fields = ('id', 'value', 'dimension_value', 'indicator')\n read_only_fields = ('id', 'indicator')\n\n def to_internal_value(self, data):\n if 'value' in data:\n data['value'] = str(data['value']).replace(',', '.')\n return super().to_internal_value(data)\n\n\nclass LabelListingField(serializers.RelatedField):\n\n def to_representation(self, labels):\n if isinstance(labels, IndicatorLabel):\n value = labels.label_id\n else:\n value = list(labels.values_list('label_id', flat=True))\n return value\n\n def to_internal_value(self, org_label_ids):\n indicator = self.root.instance\n existing_labels = set(indicator.labels.values_list('label_id', flat=True))\n new_labels = set(org_label_ids) - existing_labels\n deleted_labels = existing_labels - set(org_label_ids)\n labels = [IndicatorLabel(indicator=indicator, label_id=org_label_id) for org_label_id in new_labels]\n IndicatorLabel.objects.bulk_create(labels)\n if deleted_labels:\n IndicatorLabel.objects.filter(label_id__in=deleted_labels).delete()\n\n return indicator.labels.all()\n\n\nclass IndicatorSerializer(BaseRSRSerializer):\n\n result_unicode = serializers.ReadOnlyField(source='result.__str__')\n measure_label = serializers.ReadOnlyField(source='iati_measure_unicode')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = serializers.PrimaryKeyRelatedField(\n many=True, queryset=IndicatorDimensionName.objects.all())\n disaggregation_targets = serializers.SerializerMethodField()\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n # TODO: add validation for parent_indicator\n\n\nclass IndicatorFrameworkSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)\n labels = LabelListingField(queryset=IndicatorLabel.objects.all(), required=False)\n disaggregation_targets = IndicatorDisaggregationTargetNestedSerializer(many=True, required=False)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n def update(self, instance, validated_data):\n disaggregation_targets = validated_data.pop('disaggregation_targets', [])\n instance = super().update(instance, validated_data)\n create_or_update_disaggregation_targets(instance, disaggregation_targets)\n return instance\n\n def validate_disaggregation_targets(self, data):\n for target in data:\n if 'value' not in target:\n raise serializers.ValidationError('Disaggregation targets should have a value')\n if 'dimension_value' not in target:\n raise serializers.ValidationError(\n 'Disaggregation targets should have \"dimension_value\"')\n return data\n\n def to_internal_value(self, data):\n if 'target_value' in data:\n data['target_value'] = str(data['target_value']).replace(',', '.')\n return super().to_internal_value(data)\n\n\nclass IndicatorFrameworkLiteSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkLiteSerializer(many=True, required=False, read_only=True)\n references = IndicatorReferenceSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)\n labels = LabelListingField(read_only=True)\n disaggregation_targets = serializers.SerializerMethodField()\n custom_values = IndicatorCustomValueSerializer(many=True, required=False)\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n\nclass IndicatorFrameworkNotSoLiteSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkNotSoLiteSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n labels = LabelListingField(read_only=True)\n disaggregation_targets = serializers.SerializerMethodField()\n dimension_names = serializers.SerializerMethodField()\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n def get_dimension_names(self, obj):\n return [\n {\n 'id': n.id,\n 'name': n.name,\n 'dimension_values': [{'id': v.id, 'value': v.value} for v in n.dimension_values.all()]\n }\n for n in obj.dimension_names.all()\n ]\n\n class Meta:\n model = Indicator\n fields = (\n 'id',\n 'periods',\n 'parent_indicator',\n 'children_aggregate_percentage',\n 'labels',\n 'title',\n 'type',\n 'measure',\n 'ascending',\n 'description',\n 'baseline_year',\n 'baseline_value',\n 'baseline_comment',\n 'order',\n 'export_to_iati',\n 'result',\n 'disaggregation_targets',\n 'dimension_names',\n 'scores',\n )\n", "path": "akvo/rest/serializers/indicator.py"}]} | 2,417 | 272 |
gh_patches_debug_1148 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-19 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix some problems in the ctc beam search decoder
- [x] Make character's index in FST starting from one, otherwise wrong decoding results would be produced especially when space is the first character in the vocabulary;
- [x] Add version check in the setup script;
- [x] Remove unused code.
</issue>
<code>
[start of decoders/swig/setup.py]
1 """Script to build and install decoder package."""
2 from __future__ import absolute_import
3 from __future__ import division
4 from __future__ import print_function
5
6 from setuptools import setup, Extension, distutils
7 import glob
8 import platform
9 import os, sys
10 import multiprocessing.pool
11 import argparse
12
13 parser = argparse.ArgumentParser(description=__doc__)
14 parser.add_argument(
15 "--num_processes",
16 default=1,
17 type=int,
18 help="Number of cpu processes to build package. (default: %(default)d)")
19 args = parser.parse_known_args()
20
21 # reconstruct sys.argv to pass to setup below
22 sys.argv = [sys.argv[0]] + args[1]
23
24
25 # monkey-patch for parallel compilation
26 # See: https://stackoverflow.com/a/13176803
27 def parallelCCompile(self,
28 sources,
29 output_dir=None,
30 macros=None,
31 include_dirs=None,
32 debug=0,
33 extra_preargs=None,
34 extra_postargs=None,
35 depends=None):
36 # those lines are copied from distutils.ccompiler.CCompiler directly
37 macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
38 output_dir, macros, include_dirs, sources, depends, extra_postargs)
39 cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
40
41 # parallel code
42 def _single_compile(obj):
43 try:
44 src, ext = build[obj]
45 except KeyError:
46 return
47 self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
48
49 # convert to list, imap is evaluated on-demand
50 thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)
51 list(thread_pool.imap(_single_compile, objects))
52 return objects
53
54
55 def compile_test(header, library):
56 dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
57 command = "bash -c \"g++ -include " + header \
58 + " -l" + library + " -x c++ - <<<'int main() {}' -o " \
59 + dummy_path + " >/dev/null 2>/dev/null && rm " \
60 + dummy_path + " 2>/dev/null\""
61 return os.system(command) == 0
62
63
64 # hack compile to support parallel compiling
65 distutils.ccompiler.CCompiler.compile = parallelCCompile
66
67 FILES = glob.glob('kenlm/util/*.cc') \
68 + glob.glob('kenlm/lm/*.cc') \
69 + glob.glob('kenlm/util/double-conversion/*.cc')
70
71 FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')
72
73 FILES = [
74 fn for fn in FILES
75 if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(
76 'unittest.cc'))
77 ]
78
79 LIBS = ['stdc++']
80 if platform.system() != 'Darwin':
81 LIBS.append('rt')
82
83 ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']
84
85 if compile_test('zlib.h', 'z'):
86 ARGS.append('-DHAVE_ZLIB')
87 LIBS.append('z')
88
89 if compile_test('bzlib.h', 'bz2'):
90 ARGS.append('-DHAVE_BZLIB')
91 LIBS.append('bz2')
92
93 if compile_test('lzma.h', 'lzma'):
94 ARGS.append('-DHAVE_XZLIB')
95 LIBS.append('lzma')
96
97 os.system('swig -python -c++ ./decoders.i')
98
99 decoders_module = [
100 Extension(
101 name='_swig_decoders',
102 sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),
103 language='c++',
104 include_dirs=[
105 '.',
106 'kenlm',
107 'openfst-1.6.3/src/include',
108 'ThreadPool',
109 ],
110 libraries=LIBS,
111 extra_compile_args=ARGS)
112 ]
113
114 setup(
115 name='swig_decoders',
116 version='1.0',
117 description="""CTC decoders""",
118 ext_modules=decoders_module,
119 py_modules=['swig_decoders'], )
120
[end of decoders/swig/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/decoders/swig/setup.py b/decoders/swig/setup.py
--- a/decoders/swig/setup.py
+++ b/decoders/swig/setup.py
@@ -113,7 +113,7 @@
setup(
name='swig_decoders',
- version='1.0',
+ version='1.1',
description="""CTC decoders""",
ext_modules=decoders_module,
py_modules=['swig_decoders'], )
| {"golden_diff": "diff --git a/decoders/swig/setup.py b/decoders/swig/setup.py\n--- a/decoders/swig/setup.py\n+++ b/decoders/swig/setup.py\n@@ -113,7 +113,7 @@\n \n setup(\n name='swig_decoders',\n- version='1.0',\n+ version='1.1',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "issue": "Fix some problems in the ctc beam search decoder\n- [x] Make character's index in FST starting from one, otherwise wrong decoding results would be produced especially when space is the first character in the vocabulary;\r\n- [x] Add version check in the setup script;\r\n- [x] Remove unused code. \r\n\n", "before_files": [{"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='1.0',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "path": "decoders/swig/setup.py"}]} | 1,746 | 107 |
gh_patches_debug_12031 | rasdani/github-patches | git_diff | pyca__cryptography-2766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
x509.CertifcateBuilder().sign() fails with "Unknown OpenSSL error' when subject has bad country code
The x509.CertifcateBuilder().sign() function fails with an unknown OpenSSL error when the builder is provided with an invalid country code:
```
Traceback (most recent call last):
File "./demo.py", line 30, in <module>
ca_crt = builder.sign(private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend())
File "/usr/local/lib/python3.4/dist-packages/cryptography/x509/base.py", line 520, in sign
return backend.create_x509_certificate(self, private_key, algorithm)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/multibackend.py", line 381, in create_x509_certificate
return b.create_x509_certificate(builder, private_key, algorithm)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 1402, in create_x509_certificate
x509_cert, _encode_name_gc(self, list(builder._subject_name))
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 158, in _encode_name_gc
subject = _encode_name(backend, attributes)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 153, in _encode_name
backend.openssl_assert(res == 1)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 719, in openssl_assert
return binding._openssl_assert(self._lib, ok)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/bindings/openssl/binding.py", line 43, in _openssl_assert
errors
cryptography.exceptions.InternalError: Unknown OpenSSL error. Please file an issue at https://github.com/pyca/cryptography/issues with information on how to reproduce this. ([_OpenSSLError(code=218603671, lib=13, func=122, reason=151)])
```
The demo code below reproduces this error. Tested on Ubuntu 14.04.3 x64 w/ Python 3.4.3, cryptography 1.2.1, and libssl-dev 1.0.1f-1ubuntu2.16.
```
#!/usr/bin/env python3
import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
DUR_ONE_DAY = datetime.timedelta(1, 0, 0)
DUR_ONE_YEAR = datetime.timedelta(366, 0, 0)
ca_key = rsa.generate_private_key(65537, 4096, default_backend())
sub_attr = [x509.NameAttribute(x509.NameOID.COUNTRY_NAME, "InvalidCC")]
builder = x509.CertificateBuilder()
builder = builder.issuer_name(x509.Name(sub_attr))
builder = builder.subject_name(x509.Name(sub_attr))
builder = builder.not_valid_before(datetime.datetime.today() - DUR_ONE_DAY)
builder = builder.not_valid_after(datetime.datetime.today() + DUR_ONE_YEAR)
builder = builder.serial_number(3)
builder = builder.public_key(ca_key.public_key())
extensions = []
extensions.append(x509.BasicConstraints(ca=True, path_length=1))
for ext in extensions:
builder = builder.add_extension(ext, critical=True)
ca_crt = builder.sign(private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend())
# builder.sign() will fail with when CONTRY_NAME is invalid country code:
# cryptography.exceptions.InternalError: Unknown
# OpenSSL error. Please file an issue at
# https://github.com/pyca/cryptography/issues with information on
# how to reproduce this. ([_OpenSSLError(code=218603671, lib=13,
# func=122, reason=151)])
```
Might be nice to raise a more relevant error message. Or catch this even sooner when the NameAttribute is added.
</issue>
<code>
[start of src/cryptography/x509/name.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import six
8
9 from cryptography import utils
10 from cryptography.x509.oid import ObjectIdentifier
11
12
13 class NameAttribute(object):
14 def __init__(self, oid, value):
15 if not isinstance(oid, ObjectIdentifier):
16 raise TypeError(
17 "oid argument must be an ObjectIdentifier instance."
18 )
19
20 if not isinstance(value, six.text_type):
21 raise TypeError(
22 "value argument must be a text type."
23 )
24
25 self._oid = oid
26 self._value = value
27
28 oid = utils.read_only_property("_oid")
29 value = utils.read_only_property("_value")
30
31 def __eq__(self, other):
32 if not isinstance(other, NameAttribute):
33 return NotImplemented
34
35 return (
36 self.oid == other.oid and
37 self.value == other.value
38 )
39
40 def __ne__(self, other):
41 return not self == other
42
43 def __hash__(self):
44 return hash((self.oid, self.value))
45
46 def __repr__(self):
47 return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self)
48
49
50 class Name(object):
51 def __init__(self, attributes):
52 self._attributes = attributes
53
54 def get_attributes_for_oid(self, oid):
55 return [i for i in self if i.oid == oid]
56
57 def __eq__(self, other):
58 if not isinstance(other, Name):
59 return NotImplemented
60
61 return self._attributes == other._attributes
62
63 def __ne__(self, other):
64 return not self == other
65
66 def __hash__(self):
67 # TODO: this is relatively expensive, if this looks like a bottleneck
68 # for you, consider optimizing!
69 return hash(tuple(self._attributes))
70
71 def __iter__(self):
72 return iter(self._attributes)
73
74 def __len__(self):
75 return len(self._attributes)
76
77 def __repr__(self):
78 return "<Name({0!r})>".format(self._attributes)
79
[end of src/cryptography/x509/name.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py
--- a/src/cryptography/x509/name.py
+++ b/src/cryptography/x509/name.py
@@ -7,7 +7,7 @@
import six
from cryptography import utils
-from cryptography.x509.oid import ObjectIdentifier
+from cryptography.x509.oid import NameOID, ObjectIdentifier
class NameAttribute(object):
@@ -22,6 +22,11 @@
"value argument must be a text type."
)
+ if oid == NameOID.COUNTRY_NAME and len(value.encode("utf8")) != 2:
+ raise ValueError(
+ "Country name must be a 2 character country code"
+ )
+
self._oid = oid
self._value = value
| {"golden_diff": "diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py\n--- a/src/cryptography/x509/name.py\n+++ b/src/cryptography/x509/name.py\n@@ -7,7 +7,7 @@\n import six\n \n from cryptography import utils\n-from cryptography.x509.oid import ObjectIdentifier\n+from cryptography.x509.oid import NameOID, ObjectIdentifier\n \n \n class NameAttribute(object):\n@@ -22,6 +22,11 @@\n \"value argument must be a text type.\"\n )\n \n+ if oid == NameOID.COUNTRY_NAME and len(value.encode(\"utf8\")) != 2:\n+ raise ValueError(\n+ \"Country name must be a 2 character country code\"\n+ )\n+\n self._oid = oid\n self._value = value\n", "issue": "x509.CertifcateBuilder().sign() fails with \"Unknown OpenSSL error' when subject has bad country code\nThe x509.CertifcateBuilder().sign() function fails with an unknown OpenSSL error when the builder is provided with an invalid country code:\n\n```\nTraceback (most recent call last):\n File \"./demo.py\", line 30, in <module>\n ca_crt = builder.sign(private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend())\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/x509/base.py\", line 520, in sign\n return backend.create_x509_certificate(self, private_key, algorithm)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/multibackend.py\", line 381, in create_x509_certificate\n return b.create_x509_certificate(builder, private_key, algorithm)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 1402, in create_x509_certificate\n x509_cert, _encode_name_gc(self, list(builder._subject_name))\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 158, in _encode_name_gc\n subject = _encode_name(backend, attributes)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 153, in _encode_name\n backend.openssl_assert(res == 1)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 719, in openssl_assert\n return binding._openssl_assert(self._lib, ok)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/bindings/openssl/binding.py\", line 43, in _openssl_assert\n errors\ncryptography.exceptions.InternalError: Unknown OpenSSL error. Please file an issue at https://github.com/pyca/cryptography/issues with information on how to reproduce this. ([_OpenSSLError(code=218603671, lib=13, func=122, reason=151)])\n```\n\nThe demo code below reproduces this error. Tested on Ubuntu 14.04.3 x64 w/ Python 3.4.3, cryptography 1.2.1, and libssl-dev 1.0.1f-1ubuntu2.16.\n\n```\n#!/usr/bin/env python3\n\nimport datetime\n\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import rsa\n\nDUR_ONE_DAY = datetime.timedelta(1, 0, 0)\nDUR_ONE_YEAR = datetime.timedelta(366, 0, 0)\n\nca_key = rsa.generate_private_key(65537, 4096, default_backend())\n\nsub_attr = [x509.NameAttribute(x509.NameOID.COUNTRY_NAME, \"InvalidCC\")]\n\nbuilder = x509.CertificateBuilder()\nbuilder = builder.issuer_name(x509.Name(sub_attr))\nbuilder = builder.subject_name(x509.Name(sub_attr))\nbuilder = builder.not_valid_before(datetime.datetime.today() - DUR_ONE_DAY)\nbuilder = builder.not_valid_after(datetime.datetime.today() + DUR_ONE_YEAR)\nbuilder = builder.serial_number(3)\nbuilder = builder.public_key(ca_key.public_key())\n\nextensions = []\nextensions.append(x509.BasicConstraints(ca=True, path_length=1))\nfor ext in extensions:\n builder = builder.add_extension(ext, critical=True)\n\nca_crt = builder.sign(private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend())\n\n# builder.sign() will fail with when CONTRY_NAME is invalid country code:\n# cryptography.exceptions.InternalError: Unknown\n# OpenSSL error. Please file an issue at\n# https://github.com/pyca/cryptography/issues with information on\n# how to reproduce this. ([_OpenSSLError(code=218603671, lib=13,\n# func=122, reason=151)])\n```\n\nMight be nice to raise a more relevant error message. Or catch this even sooner when the NameAttribute is added. \n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.x509.oid import ObjectIdentifier\n\n\nclass NameAttribute(object):\n def __init__(self, oid, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(value, six.text_type):\n raise TypeError(\n \"value argument must be a text type.\"\n )\n\n self._oid = oid\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n value = utils.read_only_property(\"_value\")\n\n def __eq__(self, other):\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.oid, self.value))\n\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass Name(object):\n def __init__(self, attributes):\n self._attributes = attributes\n\n def get_attributes_for_oid(self, oid):\n return [i for i in self if i.oid == oid]\n\n def __eq__(self, other):\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self):\n return iter(self._attributes)\n\n def __len__(self):\n return len(self._attributes)\n\n def __repr__(self):\n return \"<Name({0!r})>\".format(self._attributes)\n", "path": "src/cryptography/x509/name.py"}]} | 2,158 | 185 |
gh_patches_debug_14688 | rasdani/github-patches | git_diff | yt-project__yt-3569 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OffAxisProjectionPlot Failed when Log Level Set to Debug
### Bug report
**Bug summary**
If log level set to `debug`, then `OffAxisProjectionPlot` would fail when following the tutorial at [website](https://yt-project.org/doc/visualizing/plots.html#off-axis-projection-plots).
Alhough this can be avoid by setting log level to `info`, it is kind of annoying when debugging.
**Code for reproduction**
```python
import yt
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
L = [1, 1, 0] # vector normal to cutting plane
north_vector = [-1, 1, 0]
prj = yt.OffAxisProjectionPlot(
ds, L, ("gas", "density"), width=(25, "kpc"), north_vector=north_vector
)
prj.save()
```
**Actual outcome**
```
yt : [DEBUG ] 2021-10-15 13:54:47,845 Setting normalized vectors [-0.5 -0.5 -0.5] dimensionless None
Traceback (most recent call last):
File "OffAxisProjectionPlot.py", line 6, in <module>
prj = yt.OffAxisProjectionPlot(
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 2254, in __init__
PWViewerMPL.__init__(
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 873, in __init__
PlotWindow.__init__(self, *args, **kwargs)
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 257, in __init__
self._setup_plots()
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 994, in _setup_plots
self._recreate_frb()
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 317, in _recreate_frb
self._frb._get_data_source_fields()
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/fixed_resolution.py", line 176, in _get_data_source_fields
self[f]
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/fixed_resolution.py", line 602, in __getitem__
buff = off_axis_projection(
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/off_axis_projection.py", line 341, in off_axis_projection
camera.set_width(width)
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py", line 374, in set_width
self.width = width
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py", line 208, in fset
self.switch_orientation()
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py", line 441, in switch_orientation
self._setup_normalized_vectors(normal_vector, north_vector)
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/utilities/orientation.py", line 65, in _setup_normalized_vectors
mylog.debug("Setting normalized vectors %s %s", normal_vector, north_vector)
File "/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py", line 1422, in debug
self._log(DEBUG, msg, args, **kwargs)
File "/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py", line 1577, in _log
self.handle(record)
File "/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py", line 1586, in handle
if (not self.disabled) and self.filter(record):
File "/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py", line 807, in filter
result = f.filter(record)
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/utilities/logger.py", line 78, in filter
if current_log != getattr(self, "last_log", None):
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
```
**Expected outcome**

**Version Information**
* Operating System: Ubuntu 20.04
* Python Version: 3.8
* yt version: 4.1.dev `yt/main` commit `e387a2`
* Install from source code via pip.
</issue>
<code>
[start of yt/utilities/orientation.py]
1 import numpy as np
2
3 from yt.funcs import mylog
4 from yt.units.yt_array import YTArray
5 from yt.utilities.exceptions import YTException
6
7
8 def _aligned(a, b):
9 aligned_component = np.abs(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))
10 return np.isclose(aligned_component, 1.0, 1.0e-13)
11
12
13 def _validate_unit_vectors(normal_vector, north_vector):
14
15 # Make sure vectors are unitless
16 if north_vector is not None:
17 north_vector = YTArray(north_vector, "", dtype="float64")
18 if normal_vector is not None:
19 normal_vector = YTArray(normal_vector, "", dtype="float64")
20
21 if not np.dot(normal_vector, normal_vector) > 0:
22 raise YTException("normal_vector cannot be the zero vector.")
23 if north_vector is not None and _aligned(north_vector, normal_vector):
24 raise YTException("normal_vector and north_vector cannot be aligned.")
25
26 return normal_vector, north_vector
27
28
29 class Orientation:
30 def __init__(self, normal_vector, north_vector=None, steady_north=False):
31 r"""An object that returns a set of basis vectors for orienting
32 cameras a data containers.
33
34 Parameters
35 ----------
36 normal_vector : array_like
37 A vector normal to the image plane
38 north_vector : array_like, optional
39 The 'up' direction to orient the image plane.
40 If not specified, gets calculated automatically
41 steady_north : bool, optional
42 Boolean to control whether to normalize the north_vector
43 by subtracting off the dot product of it and the normal
44 vector. Makes it easier to do rotations along a single
45 axis. If north_vector is specified, is switched to
46 True. Default: False
47
48 """
49
50 normal_vector, north_vector = _validate_unit_vectors(
51 normal_vector, north_vector
52 )
53 self.steady_north = steady_north
54 if north_vector is not None:
55 self.steady_north = True
56 self.north_vector = north_vector
57 self._setup_normalized_vectors(normal_vector, north_vector)
58 if self.north_vector is None:
59 self.north_vector = self.unit_vectors[1]
60
61 def _setup_normalized_vectors(self, normal_vector, north_vector):
62 normal_vector, north_vector = _validate_unit_vectors(
63 normal_vector, north_vector
64 )
65 mylog.debug("Setting normalized vectors %s %s", normal_vector, north_vector)
66 # Now we set up our various vectors
67 normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))
68 if north_vector is None:
69 vecs = np.identity(3)
70 t = np.cross(normal_vector, vecs).sum(axis=1)
71 ax = t.argmax()
72 east_vector = np.cross(vecs[ax, :], normal_vector).ravel()
73 # self.north_vector must remain None otherwise rotations about a fixed axis
74 # will break. The north_vector calculated here will still be included
75 # in self.unit_vectors.
76 north_vector = np.cross(normal_vector, east_vector).ravel()
77 else:
78 if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0):
79 north_vector = (
80 north_vector - np.dot(north_vector, normal_vector) * normal_vector
81 )
82 east_vector = np.cross(north_vector, normal_vector).ravel()
83 north_vector /= np.sqrt(np.dot(north_vector, north_vector))
84 east_vector /= np.sqrt(np.dot(east_vector, east_vector))
85 self.normal_vector = normal_vector
86 self.north_vector = north_vector
87 self.unit_vectors = YTArray([east_vector, north_vector, normal_vector], "")
88 self.inv_mat = np.linalg.pinv(self.unit_vectors)
89
[end of yt/utilities/orientation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/yt/utilities/orientation.py b/yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -1,6 +1,5 @@
import numpy as np
-from yt.funcs import mylog
from yt.units.yt_array import YTArray
from yt.utilities.exceptions import YTException
@@ -62,7 +61,6 @@
normal_vector, north_vector = _validate_unit_vectors(
normal_vector, north_vector
)
- mylog.debug("Setting normalized vectors %s %s", normal_vector, north_vector)
# Now we set up our various vectors
normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))
if north_vector is None:
| {"golden_diff": "diff --git a/yt/utilities/orientation.py b/yt/utilities/orientation.py\n--- a/yt/utilities/orientation.py\n+++ b/yt/utilities/orientation.py\n@@ -1,6 +1,5 @@\n import numpy as np\n \n-from yt.funcs import mylog\n from yt.units.yt_array import YTArray\n from yt.utilities.exceptions import YTException\n \n@@ -62,7 +61,6 @@\n normal_vector, north_vector = _validate_unit_vectors(\n normal_vector, north_vector\n )\n- mylog.debug(\"Setting normalized vectors %s %s\", normal_vector, north_vector)\n # Now we set up our various vectors\n normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))\n if north_vector is None:\n", "issue": "OffAxisProjectionPlot Failed when Log Level Set to Debug\n### Bug report\r\n\r\n**Bug summary**\r\nIf log level set to `debug`, then `OffAxisProjectionPlot` would fail when following the tutorial at [website](https://yt-project.org/doc/visualizing/plots.html#off-axis-projection-plots). \r\nAlhough this can be avoid by setting log level to `info`, it is kind of annoying when debugging.\r\n\r\n**Code for reproduction**\r\n\r\n```python\r\nimport yt\r\n\r\nds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\r\nL = [1, 1, 0] # vector normal to cutting plane\r\nnorth_vector = [-1, 1, 0]\r\nprj = yt.OffAxisProjectionPlot(\r\n ds, L, (\"gas\", \"density\"), width=(25, \"kpc\"), north_vector=north_vector\r\n)\r\nprj.save()\r\n```\r\n\r\n**Actual outcome**\r\n\r\n```\r\nyt : [DEBUG ] 2021-10-15 13:54:47,845 Setting normalized vectors [-0.5 -0.5 -0.5] dimensionless None\r\nTraceback (most recent call last):\r\n File \"OffAxisProjectionPlot.py\", line 6, in <module>\r\n prj = yt.OffAxisProjectionPlot(\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 2254, in __init__\r\n PWViewerMPL.__init__(\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 873, in __init__\r\n PlotWindow.__init__(self, *args, **kwargs)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 257, in __init__\r\n self._setup_plots()\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 994, in _setup_plots\r\n self._recreate_frb()\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 317, in _recreate_frb\r\n self._frb._get_data_source_fields()\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/fixed_resolution.py\", line 176, in _get_data_source_fields\r\n self[f]\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/fixed_resolution.py\", line 602, in __getitem__\r\n buff = off_axis_projection(\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/off_axis_projection.py\", line 341, in off_axis_projection\r\n camera.set_width(width)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py\", line 374, in set_width\r\n self.width = width\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py\", line 208, in fset\r\n self.switch_orientation()\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py\", line 441, in switch_orientation\r\n self._setup_normalized_vectors(normal_vector, north_vector)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/utilities/orientation.py\", line 65, in _setup_normalized_vectors\r\n mylog.debug(\"Setting normalized vectors %s %s\", normal_vector, north_vector)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py\", line 1422, in debug\r\n self._log(DEBUG, msg, args, **kwargs)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py\", line 1577, in _log\r\n self.handle(record)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py\", line 1586, in handle\r\n if (not self.disabled) and self.filter(record):\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py\", line 807, in filter\r\n result = f.filter(record)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/utilities/logger.py\", line 78, in filter\r\n if current_log != getattr(self, \"last_log\", None):\r\nValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\r\n```\r\n\r\n**Expected outcome**\r\n\r\n\r\n\r\n**Version Information**\r\n * Operating System: Ubuntu 20.04\r\n * Python Version: 3.8\r\n * yt version: 4.1.dev `yt/main` commit `e387a2`\r\n * Install from source code via pip.\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom yt.funcs import mylog\nfrom yt.units.yt_array import YTArray\nfrom yt.utilities.exceptions import YTException\n\n\ndef _aligned(a, b):\n aligned_component = np.abs(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n return np.isclose(aligned_component, 1.0, 1.0e-13)\n\n\ndef _validate_unit_vectors(normal_vector, north_vector):\n\n # Make sure vectors are unitless\n if north_vector is not None:\n north_vector = YTArray(north_vector, \"\", dtype=\"float64\")\n if normal_vector is not None:\n normal_vector = YTArray(normal_vector, \"\", dtype=\"float64\")\n\n if not np.dot(normal_vector, normal_vector) > 0:\n raise YTException(\"normal_vector cannot be the zero vector.\")\n if north_vector is not None and _aligned(north_vector, normal_vector):\n raise YTException(\"normal_vector and north_vector cannot be aligned.\")\n\n return normal_vector, north_vector\n\n\nclass Orientation:\n def __init__(self, normal_vector, north_vector=None, steady_north=False):\n r\"\"\"An object that returns a set of basis vectors for orienting\n cameras a data containers.\n\n Parameters\n ----------\n normal_vector : array_like\n A vector normal to the image plane\n north_vector : array_like, optional\n The 'up' direction to orient the image plane.\n If not specified, gets calculated automatically\n steady_north : bool, optional\n Boolean to control whether to normalize the north_vector\n by subtracting off the dot product of it and the normal\n vector. Makes it easier to do rotations along a single\n axis. If north_vector is specified, is switched to\n True. Default: False\n\n \"\"\"\n\n normal_vector, north_vector = _validate_unit_vectors(\n normal_vector, north_vector\n )\n self.steady_north = steady_north\n if north_vector is not None:\n self.steady_north = True\n self.north_vector = north_vector\n self._setup_normalized_vectors(normal_vector, north_vector)\n if self.north_vector is None:\n self.north_vector = self.unit_vectors[1]\n\n def _setup_normalized_vectors(self, normal_vector, north_vector):\n normal_vector, north_vector = _validate_unit_vectors(\n normal_vector, north_vector\n )\n mylog.debug(\"Setting normalized vectors %s %s\", normal_vector, north_vector)\n # Now we set up our various vectors\n normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))\n if north_vector is None:\n vecs = np.identity(3)\n t = np.cross(normal_vector, vecs).sum(axis=1)\n ax = t.argmax()\n east_vector = np.cross(vecs[ax, :], normal_vector).ravel()\n # self.north_vector must remain None otherwise rotations about a fixed axis\n # will break. The north_vector calculated here will still be included\n # in self.unit_vectors.\n north_vector = np.cross(normal_vector, east_vector).ravel()\n else:\n if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0):\n north_vector = (\n north_vector - np.dot(north_vector, normal_vector) * normal_vector\n )\n east_vector = np.cross(north_vector, normal_vector).ravel()\n north_vector /= np.sqrt(np.dot(north_vector, north_vector))\n east_vector /= np.sqrt(np.dot(east_vector, east_vector))\n self.normal_vector = normal_vector\n self.north_vector = north_vector\n self.unit_vectors = YTArray([east_vector, north_vector, normal_vector], \"\")\n self.inv_mat = np.linalg.pinv(self.unit_vectors)\n", "path": "yt/utilities/orientation.py"}]} | 2,876 | 169 |
gh_patches_debug_25364 | rasdani/github-patches | git_diff | pypa__setuptools-1312 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
find_packages() doesn't find PEP 420 packages
Originally reported by: **gwideman (Bitbucket: [gwideman](http://bitbucket.org/gwideman), GitHub: [gwideman](http://github.com/gwideman))**
---
```
#!python
setup(...
packages=find_packages(..)
)
```
on the developer machine will fail to find packages that lack a `__init__.py` file, as is allowed in Python 3.3. However, such packages listed explicitly: packages=['mypkg'] do appear to get included and later installed.
Note: When testing this, before each test be sure to delete all generated metadata, including that which setup may previously have placed in the original source directory, as it seems that setup may use metadata created on a previous run in order to include files.
This is part of a general problem reported in issue #83, but I've logged it separately as it's specifically about setuptools.
---
- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/97
</issue>
<code>
[start of setuptools/__init__.py]
1 """Extensions to the 'distutils' for large or complex distributions"""
2
3 import os
4 import functools
5 import distutils.core
6 import distutils.filelist
7 from distutils.util import convert_path
8 from fnmatch import fnmatchcase
9
10 from setuptools.extern.six.moves import filter, map
11
12 import setuptools.version
13 from setuptools.extension import Extension
14 from setuptools.dist import Distribution, Feature
15 from setuptools.depends import Require
16 from . import monkey
17
18 __metaclass__ = type
19
20 __all__ = [
21 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
22 'find_packages',
23 ]
24
25 __version__ = setuptools.version.__version__
26
27 bootstrap_install_from = None
28
29 # If we run 2to3 on .py files, should we also convert docstrings?
30 # Default: yes; assume that we can detect doctests reliably
31 run_2to3_on_doctests = True
32 # Standard package names for fixer packages
33 lib2to3_fixer_packages = ['lib2to3.fixes']
34
35
36 class PackageFinder:
37 """
38 Generate a list of all Python packages found within a directory
39 """
40
41 @classmethod
42 def find(cls, where='.', exclude=(), include=('*',)):
43 """Return a list all Python packages found within directory 'where'
44
45 'where' is the root directory which will be searched for packages. It
46 should be supplied as a "cross-platform" (i.e. URL-style) path; it will
47 be converted to the appropriate local path syntax.
48
49 'exclude' is a sequence of package names to exclude; '*' can be used
50 as a wildcard in the names, such that 'foo.*' will exclude all
51 subpackages of 'foo' (but not 'foo' itself).
52
53 'include' is a sequence of package names to include. If it's
54 specified, only the named packages will be included. If it's not
55 specified, all found packages will be included. 'include' can contain
56 shell style wildcard patterns just like 'exclude'.
57 """
58
59 return list(cls._find_packages_iter(
60 convert_path(where),
61 cls._build_filter('ez_setup', '*__pycache__', *exclude),
62 cls._build_filter(*include)))
63
64 @classmethod
65 def _find_packages_iter(cls, where, exclude, include):
66 """
67 All the packages found in 'where' that pass the 'include' filter, but
68 not the 'exclude' filter.
69 """
70 for root, dirs, files in os.walk(where, followlinks=True):
71 # Copy dirs to iterate over it, then empty dirs.
72 all_dirs = dirs[:]
73 dirs[:] = []
74
75 for dir in all_dirs:
76 full_path = os.path.join(root, dir)
77 rel_path = os.path.relpath(full_path, where)
78 package = rel_path.replace(os.path.sep, '.')
79
80 # Skip directory trees that are not valid packages
81 if ('.' in dir or not cls._looks_like_package(full_path)):
82 continue
83
84 # Should this package be included?
85 if include(package) and not exclude(package):
86 yield package
87
88 # Keep searching subdirectories, as there may be more packages
89 # down there, even if the parent was excluded.
90 dirs.append(dir)
91
92 @staticmethod
93 def _looks_like_package(path):
94 """Does a directory look like a package?"""
95 return os.path.isfile(os.path.join(path, '__init__.py'))
96
97 @staticmethod
98 def _build_filter(*patterns):
99 """
100 Given a list of patterns, return a callable that will be true only if
101 the input matches at least one of the patterns.
102 """
103 return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
104
105
106 class PEP420PackageFinder(PackageFinder):
107 @staticmethod
108 def _looks_like_package(path):
109 return True
110
111
112 find_packages = PackageFinder.find
113
114
115 def _install_setup_requires(attrs):
116 # Note: do not use `setuptools.Distribution` directly, as
117 # our PEP 517 backend patch `distutils.core.Distribution`.
118 dist = distutils.core.Distribution(dict(
119 (k, v) for k, v in attrs.items()
120 if k in ('dependency_links', 'setup_requires')
121 ))
122 # Honor setup.cfg's options.
123 dist.parse_config_files(ignore_option_errors=True)
124 if dist.setup_requires:
125 dist.fetch_build_eggs(dist.setup_requires)
126
127
128 def setup(**attrs):
129 # Make sure we have any requirements needed to interpret 'attrs'.
130 _install_setup_requires(attrs)
131 return distutils.core.setup(**attrs)
132
133 setup.__doc__ = distutils.core.setup.__doc__
134
135
136 _Command = monkey.get_unpatched(distutils.core.Command)
137
138
139 class Command(_Command):
140 __doc__ = _Command.__doc__
141
142 command_consumes_arguments = False
143
144 def __init__(self, dist, **kw):
145 """
146 Construct the command for dist, updating
147 vars(self) with any keyword parameters.
148 """
149 _Command.__init__(self, dist)
150 vars(self).update(kw)
151
152 def reinitialize_command(self, command, reinit_subcommands=0, **kw):
153 cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
154 vars(cmd).update(kw)
155 return cmd
156
157
158 def _find_all_simple(path):
159 """
160 Find all files under 'path'
161 """
162 results = (
163 os.path.join(base, file)
164 for base, dirs, files in os.walk(path, followlinks=True)
165 for file in files
166 )
167 return filter(os.path.isfile, results)
168
169
170 def findall(dir=os.curdir):
171 """
172 Find all files under 'dir' and return the list of full filenames.
173 Unless dir is '.', return full filenames with dir prepended.
174 """
175 files = _find_all_simple(dir)
176 if dir == os.curdir:
177 make_rel = functools.partial(os.path.relpath, start=dir)
178 files = map(make_rel, files)
179 return list(files)
180
181
182 monkey.patch_all()
183
[end of setuptools/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/__init__.py b/setuptools/__init__.py
--- a/setuptools/__init__.py
+++ b/setuptools/__init__.py
@@ -1,12 +1,14 @@
"""Extensions to the 'distutils' for large or complex distributions"""
import os
+import sys
import functools
import distutils.core
import distutils.filelist
from distutils.util import convert_path
from fnmatch import fnmatchcase
+from setuptools.extern.six import PY3
from setuptools.extern.six.moves import filter, map
import setuptools.version
@@ -17,11 +19,15 @@
__metaclass__ = type
+
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
- 'find_packages',
+ 'find_packages'
]
+if PY3:
+ __all__.append('find_packages_ns')
+
__version__ = setuptools.version.__version__
bootstrap_install_from = None
@@ -111,6 +117,9 @@
find_packages = PackageFinder.find
+if PY3:
+ find_packages_ns = PEP420PackageFinder.find
+
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
| {"golden_diff": "diff --git a/setuptools/__init__.py b/setuptools/__init__.py\n--- a/setuptools/__init__.py\n+++ b/setuptools/__init__.py\n@@ -1,12 +1,14 @@\n \"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n \n import os\n+import sys\n import functools\n import distutils.core\n import distutils.filelist\n from distutils.util import convert_path\n from fnmatch import fnmatchcase\n \n+from setuptools.extern.six import PY3\n from setuptools.extern.six.moves import filter, map\n \n import setuptools.version\n@@ -17,11 +19,15 @@\n \n __metaclass__ = type\n \n+\n __all__ = [\n 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',\n- 'find_packages',\n+ 'find_packages'\n ]\n \n+if PY3:\n+ __all__.append('find_packages_ns')\n+\n __version__ = setuptools.version.__version__\n \n bootstrap_install_from = None\n@@ -111,6 +117,9 @@\n \n find_packages = PackageFinder.find\n \n+if PY3:\n+ find_packages_ns = PEP420PackageFinder.find\n+\n \n def _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n", "issue": "find_packages() doesn't find PEP 420 packages\nOriginally reported by: **gwideman (Bitbucket: [gwideman](http://bitbucket.org/gwideman), GitHub: [gwideman](http://github.com/gwideman))**\n\n---\n\n```\n#!python\n\nsetup(...\n packages=find_packages(..)\n )\n```\n\non the developer machine will fail to find packages that lack a `__init__.py` file, as is allowed in Python 3.3. However, such packages listed explicitly: packages=['mypkg'] do appear to get included and later installed.\n\nNote: When testing this, before each test be sure to delete all generated metadata, including that which setup may previously have placed in the original source directory, as it seems that setup may use metadata created on a previous run in order to include files.\n\nThis is part of a general problem reported in issue #83, but I've logged it separately as it's specifically about setuptools.\n\n---\n- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/97\n\n", "before_files": [{"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport os\nimport functools\nimport distutils.core\nimport distutils.filelist\nfrom distutils.util import convert_path\nfrom fnmatch import fnmatchcase\n\nfrom setuptools.extern.six.moves import filter, map\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution, Feature\nfrom setuptools.depends import Require\nfrom . import monkey\n\n__metaclass__ = type\n\n__all__ = [\n 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',\n 'find_packages',\n]\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n# If we run 2to3 on .py files, should we also convert docstrings?\n# Default: yes; assume that we can detect doctests reliably\nrun_2to3_on_doctests = True\n# Standard package names for fixer packages\nlib2to3_fixer_packages = ['lib2to3.fixes']\n\n\nclass PackageFinder:\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n @classmethod\n def find(cls, where='.', exclude=(), include=('*',)):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' is the root directory which will be searched for packages. It\n should be supplied as a \"cross-platform\" (i.e. URL-style) path; it will\n be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of package names to exclude; '*' can be used\n as a wildcard in the names, such that 'foo.*' will exclude all\n subpackages of 'foo' (but not 'foo' itself).\n\n 'include' is a sequence of package names to include. If it's\n specified, only the named packages will be included. If it's not\n specified, all found packages will be included. 'include' can contain\n shell style wildcard patterns just like 'exclude'.\n \"\"\"\n\n return list(cls._find_packages_iter(\n convert_path(where),\n cls._build_filter('ez_setup', '*__pycache__', *exclude),\n cls._build_filter(*include)))\n\n @classmethod\n def _find_packages_iter(cls, where, exclude, include):\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(where, followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if ('.' in dir or not cls._looks_like_package(full_path)):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path):\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n @staticmethod\n def _build_filter(*patterns):\n \"\"\"\n Given a list of patterns, return a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(path):\n return True\n\n\nfind_packages = PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n dist = distutils.core.Distribution(dict(\n (k, v) for k, v in attrs.items()\n if k in ('dependency_links', 'setup_requires')\n ))\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n _Command.__init__(self, dist)\n vars(self).update(kw)\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}]} | 2,518 | 287 |
gh_patches_debug_9979 | rasdani/github-patches | git_diff | open-mmlab__mmaction2-1355 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ssn_head.py device unaligned
**Describe the bug**
I trained a SSN successfully on a new dataset. But when I test the checkpoint, an error raised. And I think it's a inner bug.
**Reproduction**
1. What command or script did you run?
```
PYTHONPATH=$PWD:$PYTHONPATH mim test mmaction configs/localization/ssn/ssn_r50_450e_dfmad_rgb_test.py --checkpoint work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth --gpus 1 --eval mAP
```
**Environment**
```
sys.platform: linux
Python: 3.9.7 (default, Sep 16 2021, 13:09:58) [GCC 7.5.0]
CUDA available: True
GPU 0,1: NVIDIA GeForce RTX 2080 Ti
CUDA_HOME: /usr/local/cuda
NVCC: Build cuda_11.5.r11.5/compiler.30672275_0
GCC: gcc (Ubuntu 8.4.0-3ubuntu2) 8.4.0
PyTorch: 1.10.1
PyTorch compiling details: PyTorch built with:
- GCC 7.3
- C++ Version: 201402
- Intel(R) oneAPI Math Kernel Library Version 2021.4-Product Build 20210904 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- LAPACK is enabled (usually provided by MKL)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.3
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_37,code=compute_37
- CuDNN 8.2
- Magma 2.5.2
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON,
TorchVision: 0.11.2
OpenCV: 4.5.4
MMCV: 1.4.1
MMCV Compiler: GCC 7.3
MMCV CUDA Compiler: 11.3
MMAction2: 0.20.0+3758171
```
**Error traceback**
```shell
Testing command is python /home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py configs/localization/ssn/ssn_r50_450e_dfmad_rgb_test.py work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth --launcher none --eval mAP.
2021-12-24 12:03:40,264 - mmaction - INFO - 13 out of 13 videos are valid.
2021-12-24 12:03:40,290 - mmaction - INFO - SSNDataset: proposal file my_data/dfmad70/proposals/dfmad_bmn_test_100_proposal_list.txt parsed.
load checkpoint from local path: work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth
[ ] 0/13, elapsed: 0s, ETA:Traceback (most recent call last):
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py", line 365, in <module>
main()
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py", line 350, in main
outputs = inference_pytorch(args, cfg, distributed, data_loader)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py", line 161, in inference_pytorch
outputs = single_gpu_test(model, data_loader)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmcv/engine/test.py", line 33, in single_gpu_test
result = model(return_loss=False, **data)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmcv/parallel/data_parallel.py", line 50, in forward
return super().forward(*inputs, **kwargs)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward
return self.module(*inputs[0], **kwargs[0])
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/models/localizers/base.py", line 166, in forward
return self.forward_test(*args, **kwargs)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/models/localizers/ssn.py", line 120, in forward_test
bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
```
**Bug fix**
the bug come from here:
https://github.com/open-mmlab/mmaction2/blob/6f98109b682c5689dc0548a98b89ed1c3b956ddb/mmaction/models/localizers/ssn.py#L114-L121
We can see that all tensor are transformed to ``cpu().numpy()`` except the ``reg_norm_consts``.
Adding the below line solved the error:
```python
reg_norm_consts = reg_norm_consts.cpu().numpy()
```
I'd like to create a PR if need.
</issue>
<code>
[start of mmaction/models/localizers/ssn.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import torch
3 import torch.nn as nn
4
5 from .. import builder
6 from ..builder import LOCALIZERS
7 from .base import BaseTAGClassifier
8
9
10 @LOCALIZERS.register_module()
11 class SSN(BaseTAGClassifier):
12 """Temporal Action Detection with Structured Segment Networks.
13
14 Args:
15 backbone (dict): Config for building backbone.
16 cls_head (dict): Config for building classification head.
17 in_channels (int): Number of channels for input data.
18 Default: 3.
19 spatial_type (str): Type of spatial pooling.
20 Default: 'avg'.
21 dropout_ratio (float): Ratio of dropout.
22 Default: 0.5.
23 loss_cls (dict): Config for building loss.
24 Default: ``dict(type='SSNLoss')``.
25 train_cfg (dict | None): Config for training. Default: None.
26 test_cfg (dict | None): Config for testing. Default: None.
27 """
28
29 def __init__(self,
30 backbone,
31 cls_head,
32 in_channels=3,
33 spatial_type='avg',
34 dropout_ratio=0.5,
35 loss_cls=dict(type='SSNLoss'),
36 train_cfg=None,
37 test_cfg=None):
38
39 super().__init__(backbone, cls_head, train_cfg, test_cfg)
40
41 self.is_test_prepared = False
42 self.in_channels = in_channels
43
44 self.spatial_type = spatial_type
45 if self.spatial_type == 'avg':
46 self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)
47 elif self.spatial_type == 'max':
48 self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)
49 else:
50 self.pool = None
51
52 self.dropout_ratio = dropout_ratio
53 if self.dropout_ratio != 0:
54 self.dropout = nn.Dropout(p=self.dropout_ratio)
55 else:
56 self.dropout = None
57 self.loss_cls = builder.build_loss(loss_cls)
58
59 def forward_train(self, imgs, proposal_scale_factor, proposal_type,
60 proposal_labels, reg_targets, **kwargs):
61 """Define the computation performed at every call when training."""
62 imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])
63
64 x = self.extract_feat(imgs)
65
66 if self.pool:
67 x = self.pool(x)
68 if self.dropout is not None:
69 x = self.dropout(x)
70
71 activity_scores, completeness_scores, bbox_preds = self.cls_head(
72 (x, proposal_scale_factor))
73
74 loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,
75 proposal_type, proposal_labels, reg_targets,
76 self.train_cfg)
77 loss_dict = dict(**loss)
78
79 return loss_dict
80
81 def forward_test(self, imgs, relative_proposal_list, scale_factor_list,
82 proposal_tick_list, reg_norm_consts, **kwargs):
83 """Define the computation performed at every call when testing."""
84 num_crops = imgs.shape[0]
85 imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])
86 num_ticks = imgs.shape[1]
87
88 output = []
89 minibatch_size = self.test_cfg.ssn.sampler.batch_size
90 for idx in range(0, num_ticks, minibatch_size):
91 chunk = imgs[:, idx:idx +
92 minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])
93 x = self.extract_feat(chunk)
94 if self.pool:
95 x = self.pool(x)
96 # Merge crop to save memory.
97 x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)
98 output.append(x)
99 output = torch.cat(output, dim=0)
100
101 relative_proposal_list = relative_proposal_list.squeeze(0)
102 proposal_tick_list = proposal_tick_list.squeeze(0)
103 scale_factor_list = scale_factor_list.squeeze(0)
104 reg_norm_consts = reg_norm_consts.squeeze(0)
105
106 if not self.is_test_prepared:
107 self.is_test_prepared = self.cls_head.prepare_test_fc(
108 self.cls_head.consensus.num_multipliers)
109
110 (output, activity_scores, completeness_scores,
111 bbox_preds) = self.cls_head(
112 (output, proposal_tick_list, scale_factor_list), test_mode=True)
113
114 relative_proposal_list = relative_proposal_list.cpu().numpy()
115 activity_scores = activity_scores.cpu().numpy()
116 completeness_scores = completeness_scores.cpu().numpy()
117 if bbox_preds is not None:
118 bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
119 bbox_preds[:, :, 0] = (
120 bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
121 reg_norm_consts[0, 0])
122 bbox_preds[:, :, 1] = (
123 bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +
124 reg_norm_consts[0, 1])
125 bbox_preds = bbox_preds.cpu().numpy()
126
127 result = [
128 dict(
129 relative_proposal_list=relative_proposal_list,
130 activity_scores=activity_scores,
131 completeness_scores=completeness_scores,
132 bbox_preds=bbox_preds)
133 ]
134
135 return result
136
[end of mmaction/models/localizers/ssn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmaction/models/localizers/ssn.py b/mmaction/models/localizers/ssn.py
--- a/mmaction/models/localizers/ssn.py
+++ b/mmaction/models/localizers/ssn.py
@@ -114,6 +114,7 @@
relative_proposal_list = relative_proposal_list.cpu().numpy()
activity_scores = activity_scores.cpu().numpy()
completeness_scores = completeness_scores.cpu().numpy()
+ reg_norm_consts = reg_norm_consts.cpu().numpy()
if bbox_preds is not None:
bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
bbox_preds[:, :, 0] = (
| {"golden_diff": "diff --git a/mmaction/models/localizers/ssn.py b/mmaction/models/localizers/ssn.py\n--- a/mmaction/models/localizers/ssn.py\n+++ b/mmaction/models/localizers/ssn.py\n@@ -114,6 +114,7 @@\n relative_proposal_list = relative_proposal_list.cpu().numpy()\n activity_scores = activity_scores.cpu().numpy()\n completeness_scores = completeness_scores.cpu().numpy()\n+ reg_norm_consts = reg_norm_consts.cpu().numpy()\n if bbox_preds is not None:\n bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)\n bbox_preds[:, :, 0] = (\n", "issue": "ssn_head.py device unaligned\n**Describe the bug**\r\n\r\nI trained a SSN successfully on a new dataset. But when I test the checkpoint, an error raised. And I think it's a inner bug.\r\n\r\n\r\n**Reproduction**\r\n\r\n1. What command or script did you run?\r\n\r\n```\r\nPYTHONPATH=$PWD:$PYTHONPATH mim test mmaction configs/localization/ssn/ssn_r50_450e_dfmad_rgb_test.py --checkpoint work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth --gpus 1 --eval mAP\r\n```\r\n\r\n\r\n**Environment**\r\n```\r\nsys.platform: linux\r\nPython: 3.9.7 (default, Sep 16 2021, 13:09:58) [GCC 7.5.0]\r\nCUDA available: True\r\nGPU 0,1: NVIDIA GeForce RTX 2080 Ti\r\nCUDA_HOME: /usr/local/cuda\r\nNVCC: Build cuda_11.5.r11.5/compiler.30672275_0\r\nGCC: gcc (Ubuntu 8.4.0-3ubuntu2) 8.4.0\r\nPyTorch: 1.10.1\r\nPyTorch compiling details: PyTorch built with:\r\n - GCC 7.3\r\n - C++ Version: 201402\r\n - Intel(R) oneAPI Math Kernel Library Version 2021.4-Product Build 20210904 for Intel(R) 64 architecture applications\r\n - Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740)\r\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n - LAPACK is enabled (usually provided by MKL)\r\n - NNPACK is enabled\r\n - CPU capability usage: AVX2\r\n - CUDA Runtime 11.3\r\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_37,code=compute_37\r\n - CuDNN 8.2\r\n - Magma 2.5.2\r\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \r\n\r\nTorchVision: 0.11.2\r\nOpenCV: 4.5.4\r\nMMCV: 1.4.1\r\nMMCV Compiler: GCC 7.3\r\nMMCV CUDA Compiler: 11.3\r\nMMAction2: 0.20.0+3758171\r\n```\r\n\r\n**Error traceback**\r\n```shell\r\nTesting command is python /home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py configs/localization/ssn/ssn_r50_450e_dfmad_rgb_test.py work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth --launcher none --eval mAP. \r\n2021-12-24 12:03:40,264 - mmaction - INFO - 13 out of 13 videos are valid.\r\n2021-12-24 12:03:40,290 - mmaction - INFO - SSNDataset: proposal file my_data/dfmad70/proposals/dfmad_bmn_test_100_proposal_list.txt parsed.\r\nload checkpoint from local path: work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth\r\n[ ] 0/13, elapsed: 0s, ETA:Traceback (most recent call last):\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py\", line 365, in <module>\r\n main()\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py\", line 350, in main\r\n outputs = inference_pytorch(args, cfg, distributed, data_loader)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py\", line 161, in inference_pytorch\r\n outputs = single_gpu_test(model, data_loader)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmcv/engine/test.py\", line 33, in single_gpu_test\r\n result = model(return_loss=False, **data)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmcv/parallel/data_parallel.py\", line 50, in forward\r\n return super().forward(*inputs, **kwargs)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/parallel/data_parallel.py\", line 166, in forward\r\n return self.module(*inputs[0], **kwargs[0])\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/models/localizers/base.py\", line 166, in forward\r\n return self.forward_test(*args, **kwargs)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/models/localizers/ssn.py\", line 120, in forward_test\r\n bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +\r\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\r\n```\r\n\r\n**Bug fix**\r\nthe bug come from here:\r\nhttps://github.com/open-mmlab/mmaction2/blob/6f98109b682c5689dc0548a98b89ed1c3b956ddb/mmaction/models/localizers/ssn.py#L114-L121\r\nWe can see that all tensor are transformed to ``cpu().numpy()`` except the ``reg_norm_consts``.\r\nAdding the below line solved the error:\r\n```python\r\nreg_norm_consts = reg_norm_consts.cpu().numpy()\r\n```\r\nI'd like to create a PR if need.\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom .. import builder\nfrom ..builder import LOCALIZERS\nfrom .base import BaseTAGClassifier\n\n\[email protected]_module()\nclass SSN(BaseTAGClassifier):\n \"\"\"Temporal Action Detection with Structured Segment Networks.\n\n Args:\n backbone (dict): Config for building backbone.\n cls_head (dict): Config for building classification head.\n in_channels (int): Number of channels for input data.\n Default: 3.\n spatial_type (str): Type of spatial pooling.\n Default: 'avg'.\n dropout_ratio (float): Ratio of dropout.\n Default: 0.5.\n loss_cls (dict): Config for building loss.\n Default: ``dict(type='SSNLoss')``.\n train_cfg (dict | None): Config for training. Default: None.\n test_cfg (dict | None): Config for testing. Default: None.\n \"\"\"\n\n def __init__(self,\n backbone,\n cls_head,\n in_channels=3,\n spatial_type='avg',\n dropout_ratio=0.5,\n loss_cls=dict(type='SSNLoss'),\n train_cfg=None,\n test_cfg=None):\n\n super().__init__(backbone, cls_head, train_cfg, test_cfg)\n\n self.is_test_prepared = False\n self.in_channels = in_channels\n\n self.spatial_type = spatial_type\n if self.spatial_type == 'avg':\n self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)\n elif self.spatial_type == 'max':\n self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)\n else:\n self.pool = None\n\n self.dropout_ratio = dropout_ratio\n if self.dropout_ratio != 0:\n self.dropout = nn.Dropout(p=self.dropout_ratio)\n else:\n self.dropout = None\n self.loss_cls = builder.build_loss(loss_cls)\n\n def forward_train(self, imgs, proposal_scale_factor, proposal_type,\n proposal_labels, reg_targets, **kwargs):\n \"\"\"Define the computation performed at every call when training.\"\"\"\n imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])\n\n x = self.extract_feat(imgs)\n\n if self.pool:\n x = self.pool(x)\n if self.dropout is not None:\n x = self.dropout(x)\n\n activity_scores, completeness_scores, bbox_preds = self.cls_head(\n (x, proposal_scale_factor))\n\n loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,\n proposal_type, proposal_labels, reg_targets,\n self.train_cfg)\n loss_dict = dict(**loss)\n\n return loss_dict\n\n def forward_test(self, imgs, relative_proposal_list, scale_factor_list,\n proposal_tick_list, reg_norm_consts, **kwargs):\n \"\"\"Define the computation performed at every call when testing.\"\"\"\n num_crops = imgs.shape[0]\n imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])\n num_ticks = imgs.shape[1]\n\n output = []\n minibatch_size = self.test_cfg.ssn.sampler.batch_size\n for idx in range(0, num_ticks, minibatch_size):\n chunk = imgs[:, idx:idx +\n minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])\n x = self.extract_feat(chunk)\n if self.pool:\n x = self.pool(x)\n # Merge crop to save memory.\n x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)\n output.append(x)\n output = torch.cat(output, dim=0)\n\n relative_proposal_list = relative_proposal_list.squeeze(0)\n proposal_tick_list = proposal_tick_list.squeeze(0)\n scale_factor_list = scale_factor_list.squeeze(0)\n reg_norm_consts = reg_norm_consts.squeeze(0)\n\n if not self.is_test_prepared:\n self.is_test_prepared = self.cls_head.prepare_test_fc(\n self.cls_head.consensus.num_multipliers)\n\n (output, activity_scores, completeness_scores,\n bbox_preds) = self.cls_head(\n (output, proposal_tick_list, scale_factor_list), test_mode=True)\n\n relative_proposal_list = relative_proposal_list.cpu().numpy()\n activity_scores = activity_scores.cpu().numpy()\n completeness_scores = completeness_scores.cpu().numpy()\n if bbox_preds is not None:\n bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)\n bbox_preds[:, :, 0] = (\n bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +\n reg_norm_consts[0, 0])\n bbox_preds[:, :, 1] = (\n bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +\n reg_norm_consts[0, 1])\n bbox_preds = bbox_preds.cpu().numpy()\n\n result = [\n dict(\n relative_proposal_list=relative_proposal_list,\n activity_scores=activity_scores,\n completeness_scores=completeness_scores,\n bbox_preds=bbox_preds)\n ]\n\n return result\n", "path": "mmaction/models/localizers/ssn.py"}]} | 4,063 | 142 |
gh_patches_debug_39376 | rasdani/github-patches | git_diff | google__TensorNetwork-149 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ncon throws an obscure error for a valid contraction order
Hey, I just noticed that the following code throws an obscure error:
```python
d = 2
D1 = 4
D2= 5
a = tf.random_uniform(shape = [D1,D2,d,d])
b = tf.random_uniform(shape = [D1,d,D1])
c = tf.random_uniform(shape = [D2,d,D2])
f = tf.random_uniform(shape = [d,d,d,d])
tn.ncon([a, b, c, f], [[1,3,5,4], [1,2,-1], [3,4,-2], [5,-4,2,-3]])
```
The order above is not chosen smartly, but `ncon` should either throw an interpretable error, or do the contraction
</issue>
<code>
[start of tensornetwork/ncon_interface.py]
1 # Copyright 2019 The TensorNetwork Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """NCON interface to TensorNetwork."""
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19 from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any
20 from tensornetwork import network
21 from tensornetwork import network_components
22
23 Tensor = Any
24
25
26 def ncon(tensors: Sequence[Tensor],
27 network_structure: Sequence[Sequence],
28 con_order: Optional[Sequence] = None,
29 out_order: Optional[Sequence] = None) -> Tensor:
30 r"""Contracts a list of tensors according to a tensor network specification.
31
32 The network is provided as a list of lists, one for each
33 tensor, specifying labels for the edges connected to that tensor.
34
35 If a contraction order `con_order` and an output order `out_order`
36 are both provided, the edge labels can be anything.
37 Otherwise (`con_order == None or out_order == None`), the edge labels
38 must be nonzero integers and edges will be contracted in ascending order.
39 Negative integers denote the (dangling) indices of the output tensor,
40 which will be in descending order, e.g. [-1,-2,-3,...].
41
42 For example, matrix multiplication:
43
44 ```python
45 A = np.array([[1.0, 2.0], [3.0, 4.0]])
46 B = np.array([[1.0, 1.0], [0.0, 1.0]])
47 ncon([A,B], [(-1, 1), (1, -2)])
48 ```
49
50 Matrix trace:
51
52 ```python
53 A = np.array([[1.0, 2.0], [3.0, 4.0]])
54 ncon([A], [(1, 1)]) # 5.0
55 ```
56
57 Note: The reason `0` is not allowed as an edge label without manually
58 specifying the contraction order is to maintain compatibility with the
59 [original NCON implementation](https://arxiv.org/abs/1402.0939). However,
60 the use of `0` in `con_order` to denote outer products is not (currently)
61 supported in this implementation.
62
63 Args:
64 tensors: List of `Tensor`s.
65 network_structure: List of lists specifying the tensor network
66 structure.
67 con_order: List of edge labels specifying the contraction order.
68 out_order: List of edge labels specifying the output order.
69
70 Returns:
71 A `Tensor` resulting from the contraction of the tensor network.
72 """
73 tn, con_edges, out_edges = ncon_network(
74 tensors, network_structure, con_order=con_order, out_order=out_order)
75
76 # Contract assuming all edges connecting a given pair of nodes are adjacent
77 # in con_order. If this is not the case, the contraction is sub-optimal
78 # so we throw an exception.
79 prev_nodes = []
80 while con_edges:
81 e = con_edges.pop(0) # pop so that older nodes can be deallocated
82 nodes = e.get_nodes()
83
84 nodes_set = set(nodes)
85 if nodes_set != set(prev_nodes):
86 if not nodes_set.issubset(tn.nodes_set):
87 # the node pair was already contracted
88 raise ValueError("Edge '{}' is not adjacent to other edges connecting "
89 "'{}' and '{}' in the contraction order.".format(
90 e, nodes[0], nodes[1]))
91 if not con_edges and len(tn.nodes_set) == 2:
92 # If this already produces the final output, order the edges
93 # here to avoid transposes in some cases.
94 tn.contract_between(
95 *nodes,
96 name="con({},{})".format(*nodes),
97 output_edge_order=out_edges)
98 else:
99 tn.contract_between(*nodes, name="con({},{})".format(*nodes))
100 prev_nodes = nodes
101
102 # TODO: More efficient ordering of products based on out_edges
103 res_node = tn.outer_product_final_nodes(out_edges)
104
105 return res_node.tensor
106
107
108 def ncon_network(
109 tensors: Sequence[Tensor],
110 network_structure: Sequence[Sequence],
111 con_order: Optional[Sequence] = None,
112 out_order: Optional[Sequence] = None) -> Tuple[network.TensorNetwork, List[
113 network_components.Edge], List[network_components.Edge]]:
114 r"""Creates a TensorNetwork from a list of tensors according to `network`.
115
116 The network is provided as a list of lists, one for each
117 tensor, specifying labels for the edges connected to that tensor.
118
119 If a contraction order `con_order` and an output order `out_order`
120 are both provided, the edge labels can be anything.
121 Otherwise (`con_order == None or out_order == None`), the edge labels
122 must be integers and edges will be contracted in ascending order.
123 Negative integers denote the (dangling) indices of the output tensor,
124 which will be in descending order, e.g. [-1,-2,-3,...].
125
126 This is used internally by `ncon()`.
127
128 Args:
129 tensors: List of `Tensor`s.
130 network_structure: List of lists specifying the tensor network.
131 con_order: List of edge labels specifying the contraction order.
132 out_order: List of edge labels specifying the output order.
133
134 Returns:
135 net: `TensorNetwork` with the structure given by `network`.
136 con_edges: List of internal `Edge` objects in contraction order.
137 out_edges: List of dangling `Edge` objects in output order.
138 """
139 if len(tensors) != len(network_structure):
140 raise ValueError('len(tensors) != len(network_structure)')
141
142 tn, edges = _build_network(tensors, network_structure)
143
144 if con_order is None:
145 try:
146 con_order = sorted((k for k in edges if k >= 0))
147 if con_order and con_order[0] == 0:
148 raise ValueError("'0' is not a valid edge label when the "
149 "contraction order is not specified separately.")
150 except TypeError:
151 raise ValueError("Non-integer edge label(s): {}".format(
152 list(edges.keys())))
153 else:
154 if len(con_order) != len(set(con_order)):
155 raise ValueError("Duplicate labels in con_order: {}".format(con_order))
156
157 if out_order is None:
158 try:
159 out_order = sorted((k for k in edges if k < 0), reverse=True)
160 except TypeError:
161 raise ValueError("Non-integer edge label(s): {}".format(
162 list(edges.keys())))
163 else:
164 if len(out_order) != len(set(out_order)):
165 raise ValueError("Duplicate labels in out_order: {}".format(out_order))
166
167 try:
168 con_edges = [edges[k] for k in con_order]
169 out_edges = [edges[k] for k in out_order]
170 except KeyError as err:
171 raise ValueError("Order contained an unknown edge label: {}".format(
172 err.args[0]))
173
174 if len(con_edges) + len(out_edges) != len(edges):
175 raise ValueError(
176 "Edges {} were not included in the contraction and output "
177 "ordering.".format(
178 list(set(edges.keys()) - set(con_order) - set(out_order))))
179
180 for e in con_edges:
181 if e.is_dangling():
182 raise ValueError(
183 "Contraction edge {} appears only once in the network.".format(
184 str(e)))
185
186 for e in out_edges:
187 if not e.is_dangling():
188 raise ValueError(
189 "Output edge {} appears more than once in the network.".format(
190 str(e)))
191
192 return tn, con_edges, out_edges
193
194
195 def _build_network(
196 tensors: Sequence[Tensor], network_structure: Sequence[Sequence]
197 ) -> Tuple[network.TensorNetwork, Dict[Any, network_components.Edge]]:
198 tn = network.TensorNetwork()
199 nodes = []
200 edges = {}
201 for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):
202 if len(tensor.shape) != len(edge_lbls):
203 raise ValueError(
204 "Incorrect number of edge labels specified tensor {}".format(i))
205
206 node = tn.add_node(tensor, name="tensor_{}".format(i))
207 nodes.append(node)
208
209 for (axis_num, edge_lbl) in enumerate(edge_lbls):
210 if edge_lbl not in edges:
211 e = node[axis_num]
212 e.set_name(str(edge_lbl))
213 edges[edge_lbl] = e
214 else:
215 # This will raise an error if the edges are not dangling.
216 e = tn.connect(edges[edge_lbl], node[axis_num], name=str(edge_lbl))
217 edges[edge_lbl] = e
218 return tn, edges
219
[end of tensornetwork/ncon_interface.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py
--- a/tensornetwork/ncon_interface.py
+++ b/tensornetwork/ncon_interface.py
@@ -16,6 +16,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import warnings
from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any
from tensornetwork import network
from tensornetwork import network_components
@@ -73,31 +74,46 @@
tn, con_edges, out_edges = ncon_network(
tensors, network_structure, con_order=con_order, out_order=out_order)
- # Contract assuming all edges connecting a given pair of nodes are adjacent
- # in con_order. If this is not the case, the contraction is sub-optimal
- # so we throw an exception.
- prev_nodes = []
+ # Reverse the list so we can pop from the end: O(1).
+ con_edges = con_edges[::-1]
while con_edges:
- e = con_edges.pop(0) # pop so that older nodes can be deallocated
- nodes = e.get_nodes()
-
- nodes_set = set(nodes)
- if nodes_set != set(prev_nodes):
- if not nodes_set.issubset(tn.nodes_set):
- # the node pair was already contracted
- raise ValueError("Edge '{}' is not adjacent to other edges connecting "
- "'{}' and '{}' in the contraction order.".format(
- e, nodes[0], nodes[1]))
- if not con_edges and len(tn.nodes_set) == 2:
- # If this already produces the final output, order the edges
- # here to avoid transposes in some cases.
- tn.contract_between(
- *nodes,
- name="con({},{})".format(*nodes),
- output_edge_order=out_edges)
+ nodes_to_contract = con_edges[-1].get_nodes()
+ edges_to_contract = tn.get_shared_edges(*nodes_to_contract)
+
+ # Eat up all parallel edges that are adjacent in the ordering.
+ adjacent_parallel_edges = set()
+ for edge in reversed(con_edges):
+ if edge in edges_to_contract:
+ adjacent_parallel_edges.add(edge)
else:
- tn.contract_between(*nodes, name="con({},{})".format(*nodes))
- prev_nodes = nodes
+ break
+ con_edges = con_edges[:-len(adjacent_parallel_edges)]
+
+ # In an optimal ordering, all edges connecting a given pair of nodes are
+ # adjacent in con_order. If this is not the case, warn the user.
+ leftovers = edges_to_contract - adjacent_parallel_edges
+ if leftovers:
+ warnings.warn(
+ "Suboptimal ordering detected. Edges {} are not adjacent in the "
+ "contraction order to edges {}, connecting nodes {}. Deviating from "
+ "the specified ordering!".format(
+ list(map(str, leftovers)),
+ list(map(str, adjacent_parallel_edges)),
+ list(map(str, nodes_to_contract)))
+ )
+ con_edges = [e for e in con_edges if e not in edges_to_contract]
+
+ if set(nodes_to_contract) == tn.nodes_set:
+ # If this already produces the final output, order the edges
+ # here to avoid transposes in some cases.
+ tn.contract_between(
+ *nodes_to_contract,
+ name="con({},{})".format(*nodes_to_contract),
+ output_edge_order=out_edges)
+ else:
+ tn.contract_between(
+ *nodes_to_contract,
+ name="con({},{})".format(*nodes_to_contract))
# TODO: More efficient ordering of products based on out_edges
res_node = tn.outer_product_final_nodes(out_edges)
| {"golden_diff": "diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py\n--- a/tensornetwork/ncon_interface.py\n+++ b/tensornetwork/ncon_interface.py\n@@ -16,6 +16,7 @@\n from __future__ import absolute_import\n from __future__ import division\n from __future__ import print_function\n+import warnings\n from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any\n from tensornetwork import network\n from tensornetwork import network_components\n@@ -73,31 +74,46 @@\n tn, con_edges, out_edges = ncon_network(\n tensors, network_structure, con_order=con_order, out_order=out_order)\n \n- # Contract assuming all edges connecting a given pair of nodes are adjacent\n- # in con_order. If this is not the case, the contraction is sub-optimal\n- # so we throw an exception.\n- prev_nodes = []\n+ # Reverse the list so we can pop from the end: O(1).\n+ con_edges = con_edges[::-1]\n while con_edges:\n- e = con_edges.pop(0) # pop so that older nodes can be deallocated\n- nodes = e.get_nodes()\n-\n- nodes_set = set(nodes)\n- if nodes_set != set(prev_nodes):\n- if not nodes_set.issubset(tn.nodes_set):\n- # the node pair was already contracted\n- raise ValueError(\"Edge '{}' is not adjacent to other edges connecting \"\n- \"'{}' and '{}' in the contraction order.\".format(\n- e, nodes[0], nodes[1]))\n- if not con_edges and len(tn.nodes_set) == 2:\n- # If this already produces the final output, order the edges \n- # here to avoid transposes in some cases.\n- tn.contract_between(\n- *nodes,\n- name=\"con({},{})\".format(*nodes),\n- output_edge_order=out_edges)\n+ nodes_to_contract = con_edges[-1].get_nodes()\n+ edges_to_contract = tn.get_shared_edges(*nodes_to_contract)\n+\n+ # Eat up all parallel edges that are adjacent in the ordering.\n+ adjacent_parallel_edges = set()\n+ for edge in reversed(con_edges):\n+ if edge in edges_to_contract:\n+ adjacent_parallel_edges.add(edge)\n else:\n- tn.contract_between(*nodes, name=\"con({},{})\".format(*nodes))\n- prev_nodes = nodes\n+ break\n+ con_edges = con_edges[:-len(adjacent_parallel_edges)]\n+\n+ # In an optimal ordering, all edges connecting a given pair of nodes are\n+ # adjacent in con_order. If this is not the case, warn the user.\n+ leftovers = edges_to_contract - adjacent_parallel_edges\n+ if leftovers:\n+ warnings.warn(\n+ \"Suboptimal ordering detected. Edges {} are not adjacent in the \"\n+ \"contraction order to edges {}, connecting nodes {}. Deviating from \"\n+ \"the specified ordering!\".format(\n+ list(map(str, leftovers)),\n+ list(map(str, adjacent_parallel_edges)),\n+ list(map(str, nodes_to_contract)))\n+ )\n+ con_edges = [e for e in con_edges if e not in edges_to_contract]\n+\n+ if set(nodes_to_contract) == tn.nodes_set:\n+ # If this already produces the final output, order the edges\n+ # here to avoid transposes in some cases.\n+ tn.contract_between(\n+ *nodes_to_contract,\n+ name=\"con({},{})\".format(*nodes_to_contract),\n+ output_edge_order=out_edges)\n+ else:\n+ tn.contract_between(\n+ *nodes_to_contract,\n+ name=\"con({},{})\".format(*nodes_to_contract))\n \n # TODO: More efficient ordering of products based on out_edges\n res_node = tn.outer_product_final_nodes(out_edges)\n", "issue": "ncon throws an obscure error for a valid contraction order\nHey, I just noticed that the following code throws an obscure error:\r\n```python \r\nd = 2\r\nD1 = 4\r\nD2= 5\r\na = tf.random_uniform(shape = [D1,D2,d,d])\r\nb = tf.random_uniform(shape = [D1,d,D1])\r\nc = tf.random_uniform(shape = [D2,d,D2])\r\nf = tf.random_uniform(shape = [d,d,d,d])\r\n\r\ntn.ncon([a, b, c, f], [[1,3,5,4], [1,2,-1], [3,4,-2], [5,-4,2,-3]])\r\n```\r\nThe order above is not chosen smartly, but `ncon` should either throw an interpretable error, or do the contraction\n", "before_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"NCON interface to TensorNetwork.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any\nfrom tensornetwork import network\nfrom tensornetwork import network_components\n\nTensor = Any\n\n\ndef ncon(tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None) -> Tensor:\n r\"\"\"Contracts a list of tensors according to a tensor network specification.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be nonzero integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. [-1,-2,-3,...].\n\n For example, matrix multiplication:\n\n ```python\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n B = np.array([[1.0, 1.0], [0.0, 1.0]])\n ncon([A,B], [(-1, 1), (1, -2)])\n ```\n\n Matrix trace:\n\n ```python\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n ncon([A], [(1, 1)]) # 5.0\n ```\n\n Note: The reason `0` is not allowed as an edge label without manually\n specifying the contraction order is to maintain compatibility with the\n [original NCON implementation](https://arxiv.org/abs/1402.0939). However,\n the use of `0` in `con_order` to denote outer products is not (currently) \n supported in this implementation.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network\n structure.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n\n Returns:\n A `Tensor` resulting from the contraction of the tensor network.\n \"\"\"\n tn, con_edges, out_edges = ncon_network(\n tensors, network_structure, con_order=con_order, out_order=out_order)\n\n # Contract assuming all edges connecting a given pair of nodes are adjacent\n # in con_order. If this is not the case, the contraction is sub-optimal\n # so we throw an exception.\n prev_nodes = []\n while con_edges:\n e = con_edges.pop(0) # pop so that older nodes can be deallocated\n nodes = e.get_nodes()\n\n nodes_set = set(nodes)\n if nodes_set != set(prev_nodes):\n if not nodes_set.issubset(tn.nodes_set):\n # the node pair was already contracted\n raise ValueError(\"Edge '{}' is not adjacent to other edges connecting \"\n \"'{}' and '{}' in the contraction order.\".format(\n e, nodes[0], nodes[1]))\n if not con_edges and len(tn.nodes_set) == 2:\n # If this already produces the final output, order the edges \n # here to avoid transposes in some cases.\n tn.contract_between(\n *nodes,\n name=\"con({},{})\".format(*nodes),\n output_edge_order=out_edges)\n else:\n tn.contract_between(*nodes, name=\"con({},{})\".format(*nodes))\n prev_nodes = nodes\n\n # TODO: More efficient ordering of products based on out_edges\n res_node = tn.outer_product_final_nodes(out_edges)\n\n return res_node.tensor\n\n\ndef ncon_network(\n tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None) -> Tuple[network.TensorNetwork, List[\n network_components.Edge], List[network_components.Edge]]:\n r\"\"\"Creates a TensorNetwork from a list of tensors according to `network`.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. [-1,-2,-3,...].\n\n This is used internally by `ncon()`.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n\n Returns:\n net: `TensorNetwork` with the structure given by `network`.\n con_edges: List of internal `Edge` objects in contraction order.\n out_edges: List of dangling `Edge` objects in output order.\n \"\"\"\n if len(tensors) != len(network_structure):\n raise ValueError('len(tensors) != len(network_structure)')\n\n tn, edges = _build_network(tensors, network_structure)\n\n if con_order is None:\n try:\n con_order = sorted((k for k in edges if k >= 0))\n if con_order and con_order[0] == 0:\n raise ValueError(\"'0' is not a valid edge label when the \"\n \"contraction order is not specified separately.\")\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(con_order) != len(set(con_order)):\n raise ValueError(\"Duplicate labels in con_order: {}\".format(con_order))\n\n if out_order is None:\n try:\n out_order = sorted((k for k in edges if k < 0), reverse=True)\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(out_order) != len(set(out_order)):\n raise ValueError(\"Duplicate labels in out_order: {}\".format(out_order))\n\n try:\n con_edges = [edges[k] for k in con_order]\n out_edges = [edges[k] for k in out_order]\n except KeyError as err:\n raise ValueError(\"Order contained an unknown edge label: {}\".format(\n err.args[0]))\n\n if len(con_edges) + len(out_edges) != len(edges):\n raise ValueError(\n \"Edges {} were not included in the contraction and output \"\n \"ordering.\".format(\n list(set(edges.keys()) - set(con_order) - set(out_order))))\n\n for e in con_edges:\n if e.is_dangling():\n raise ValueError(\n \"Contraction edge {} appears only once in the network.\".format(\n str(e)))\n\n for e in out_edges:\n if not e.is_dangling():\n raise ValueError(\n \"Output edge {} appears more than once in the network.\".format(\n str(e)))\n\n return tn, con_edges, out_edges\n\n\ndef _build_network(\n tensors: Sequence[Tensor], network_structure: Sequence[Sequence]\n) -> Tuple[network.TensorNetwork, Dict[Any, network_components.Edge]]:\n tn = network.TensorNetwork()\n nodes = []\n edges = {}\n for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):\n if len(tensor.shape) != len(edge_lbls):\n raise ValueError(\n \"Incorrect number of edge labels specified tensor {}\".format(i))\n\n node = tn.add_node(tensor, name=\"tensor_{}\".format(i))\n nodes.append(node)\n\n for (axis_num, edge_lbl) in enumerate(edge_lbls):\n if edge_lbl not in edges:\n e = node[axis_num]\n e.set_name(str(edge_lbl))\n edges[edge_lbl] = e\n else:\n # This will raise an error if the edges are not dangling.\n e = tn.connect(edges[edge_lbl], node[axis_num], name=str(edge_lbl))\n edges[edge_lbl] = e\n return tn, edges\n", "path": "tensornetwork/ncon_interface.py"}]} | 3,258 | 856 |
gh_patches_debug_822 | rasdani/github-patches | git_diff | opendatacube__datacube-core-348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unnecessary dependency on `pathlib` when running in python3
### Expected behaviour
Datacube shouldn't depend on unnecessary packages when running in Python 3.
### Actual behaviour
There's a dependency on `pathlib`, which is included in the Python 3 standard library, and so doesn't need to be installed.
This causes trouble on the NCI deployment when trying to load `stats` modules which use the `setuptools` entry_points for their registration. And returns error messages to users trying to load them.
### Steps to reproduce the behaviour
```
module load agdc-py3-prod agdc_statistics
dra547@raijin4:~ $ python
Python 3.6.3 | packaged by conda-forge | (default, Nov 4 2017, 10:10:56)
[GCC 4.8.2 20140120 (Red Hat 4.8.2-15)] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import datacube_stats.statistics
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/g/data/v10/public/modules/agdc_statistics/0.9a7/lib/python3.6/site-packages/datacube_stats/statistics.py", line 769, in <module>
STATS[entry_point.name] = entry_point.load()
File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2404, in load
self.require(*args, **kwargs)
File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2427, in require
items = working_set.resolve(reqs, env, installer, extras=self.extras)
File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 870, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'pathlib' distribution was not found and is required by datacube
>>>
```
### The Fix
Modify `setup.py` to use [platform specific dependencies](https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies) to only require `pathlib` when not running on python 3.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import versioneer
4 from setuptools import setup, find_packages
5
6 tests_require = [
7 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'
8 ]
9
10 extras_require = {
11 'performance': ['ciso8601', 'bottleneck'],
12 'interactive': ['matplotlib', 'fiona'],
13 'distributed': ['distributed', 'dask[distributed]'],
14 'analytics': ['scipy', 'pyparsing', 'numexpr'],
15 'doc': ['Sphinx', 'setuptools'],
16 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],
17 'celery': ['celery>=4', 'redis'],
18 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],
19 'test': tests_require,
20 }
21 # An 'all' option, following ipython naming conventions.
22 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
23
24 setup(
25 name='datacube',
26 version=versioneer.get_version(),
27 cmdclass=versioneer.get_cmdclass(),
28
29 url='https://github.com/opendatacube/datacube-core',
30 author='AGDC Collaboration',
31 maintainer='AGDC Collaboration',
32 maintainer_email='',
33 description='An analysis environment for satellite and other earth observation data',
34 long_description=open('README.rst').read(),
35 license='Apache License 2.0',
36 classifiers=[
37 "Development Status :: 4 - Beta",
38 "Intended Audience :: Developers",
39 "Intended Audience :: Science/Research",
40 "License :: OSI Approved :: Apache Software License",
41 "Natural Language :: English",
42 "Operating System :: MacOS :: MacOS X",
43 "Operating System :: POSIX",
44 "Operating System :: POSIX :: BSD",
45 "Operating System :: POSIX :: Linux",
46 "Operating System :: Microsoft :: Windows",
47 "Programming Language :: Python",
48 "Programming Language :: Python :: 2",
49 "Programming Language :: Python :: 2.7",
50 "Programming Language :: Python :: 3",
51 "Programming Language :: Python :: 3.5",
52 "Topic :: Scientific/Engineering :: GIS",
53 "Topic :: Scientific/Engineering :: Information Analysis",
54 ],
55
56 packages=find_packages(
57 exclude=('tests', 'tests.*',
58 'integration_tests', 'integration_tests.*')
59 ),
60 package_data={
61 '': ['*.yaml', '*/*.yaml'],
62 },
63 scripts=[
64 'datacube_apps/scripts/pbs_helpers.sh'
65 ],
66 setup_requires=[
67 'pytest-runner'
68 ],
69 install_requires=[
70 'affine',
71 'cachetools',
72 'click>=5.0',
73 'cloudpickle>=0.4',
74 'dask[array]',
75 'gdal>=1.9',
76 'jsonschema',
77 'netcdf4',
78 'numpy',
79 'pathlib',
80 'psycopg2',
81 'pypeg2',
82 'python-dateutil',
83 'pyyaml',
84 'rasterio>=0.9', # required for zip reading, 0.9 gets around 1.0a ordering problems
85 'singledispatch',
86 'sqlalchemy',
87 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost
88 ],
89 extras_require=extras_require,
90 tests_require=tests_require,
91
92 entry_points={
93 'console_scripts': [
94 'datacube-search = datacube.scripts.search_tool:cli',
95 'datacube = datacube.scripts.cli_app:cli',
96 'datacube-stacker = datacube_apps.stacker:main',
97 'datacube-worker = datacube.execution.worker:main',
98 'datacube-fixer = datacube_apps.stacker:fixer_main',
99 'datacube-ncml = datacube_apps.ncml:ncml_app',
100 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',
101 'movie_generator = datacube_apps.movie_generator:main',
102 'datacube-simple-replica = datacube_apps.simple_replica:replicate'
103 ]
104 },
105 )
106
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,7 +76,7 @@
'jsonschema',
'netcdf4',
'numpy',
- 'pathlib',
+ 'pathlib;python_version<"3"',
'psycopg2',
'pypeg2',
'python-dateutil',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -76,7 +76,7 @@\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n- 'pathlib',\n+ 'pathlib;python_version<\"3\"',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n", "issue": "Unnecessary dependency on `pathlib` when running in python3\n### Expected behaviour\r\nDatacube shouldn't depend on unnecessary packages when running in Python 3.\r\n\r\n### Actual behaviour\r\nThere's a dependency on `pathlib`, which is included in the Python 3 standard library, and so doesn't need to be installed.\r\n\r\nThis causes trouble on the NCI deployment when trying to load `stats` modules which use the `setuptools` entry_points for their registration. And returns error messages to users trying to load them.\r\n\r\n### Steps to reproduce the behaviour\r\n```\r\nmodule load agdc-py3-prod agdc_statistics\r\ndra547@raijin4:~ $ python\r\nPython 3.6.3 | packaged by conda-forge | (default, Nov 4 2017, 10:10:56)\r\n[GCC 4.8.2 20140120 (Red Hat 4.8.2-15)] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import datacube_stats.statistics\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/g/data/v10/public/modules/agdc_statistics/0.9a7/lib/python3.6/site-packages/datacube_stats/statistics.py\", line 769, in <module>\r\n STATS[entry_point.name] = entry_point.load()\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 2404, in load\r\n self.require(*args, **kwargs)\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 2427, in require\r\n items = working_set.resolve(reqs, env, installer, extras=self.extras)\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 870, in resolve\r\n raise DistributionNotFound(req, requirers)\r\npkg_resources.DistributionNotFound: The 'pathlib' distribution was not found and is required by datacube\r\n>>>\r\n```\r\n\r\n### The Fix\r\nModify `setup.py` to use [platform specific dependencies](https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies) to only require `pathlib` when not running on python 3.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'pathlib',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate'\n ]\n },\n)\n", "path": "setup.py"}]} | 2,234 | 83 |
gh_patches_debug_47844 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1448 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
POSTGRES_PORT isn't respected during server setup
**Describe the bug**
`POSTGRES_PORT` isn't respected - `docker-compose up --build` starts the postgres instance on port 5432 regardless.
**To Reproduce**
Steps to reproduce the behavior:
1. Follow the [documentation for setting up a new instance](https://docs.joinbookwyrm.com/installing-in-production.html)
2. In `.env`, set `POSTGRES_PORT` to `5433` (and make the corresponding changes in `docker-compose.yml`)
3. Run `docker-compose up --build`
**Expected behavior**
The database is started on port 5433
**Actual behavior**
The database is started on port 5432
**Additional context**
I also tried setting the fallback to `5433` in `bookwyrm/settings.py` in case something was going wrong with the env propagation, but that didn't help either.
I was finally able to workaround by additionally setting `PGPORT` in `.env`
---
**Server:**
- OS: Raspberry Pi OS (raspbian) 10.4
- docker 20.10.8
- docker-compose 1.29.2
</issue>
<code>
[start of bookwyrm/settings.py]
1 """ bookwyrm settings and configuration """
2 import os
3 from environs import Env
4
5 import requests
6 from django.utils.translation import gettext_lazy as _
7
8
9 env = Env()
10 DOMAIN = env("DOMAIN")
11 VERSION = "0.0.1"
12
13 PAGE_LENGTH = env("PAGE_LENGTH", 15)
14 DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
15
16 JS_CACHE = "7f2343cf"
17
18 # email
19 EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
20 EMAIL_HOST = env("EMAIL_HOST")
21 EMAIL_PORT = env("EMAIL_PORT", 587)
22 EMAIL_HOST_USER = env("EMAIL_HOST_USER")
23 EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
24 EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS", True)
25 EMAIL_USE_SSL = env.bool("EMAIL_USE_SSL", False)
26 DEFAULT_FROM_EMAIL = f"admin@{DOMAIN}"
27
28 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
29 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
30 LOCALE_PATHS = [
31 os.path.join(BASE_DIR, "locale"),
32 ]
33
34 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
35
36 # Preview image
37 ENABLE_PREVIEW_IMAGES = env.bool("ENABLE_PREVIEW_IMAGES", False)
38 PREVIEW_BG_COLOR = env.str("PREVIEW_BG_COLOR", "use_dominant_color_light")
39 PREVIEW_TEXT_COLOR = env.str("PREVIEW_TEXT_COLOR", "#363636")
40 PREVIEW_IMG_WIDTH = env.int("PREVIEW_IMG_WIDTH", 1200)
41 PREVIEW_IMG_HEIGHT = env.int("PREVIEW_IMG_HEIGHT", 630)
42 PREVIEW_DEFAULT_COVER_COLOR = env.str("PREVIEW_DEFAULT_COVER_COLOR", "#002549")
43
44 # Quick-start development settings - unsuitable for production
45 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
46
47 # SECURITY WARNING: keep the secret key used in production secret!
48 SECRET_KEY = env("SECRET_KEY")
49
50 # SECURITY WARNING: don't run with debug turned on in production!
51 DEBUG = env.bool("DEBUG", True)
52 USE_HTTPS = env.bool("USE_HTTPS", False)
53
54 ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", ["*"])
55
56 # Application definition
57
58 INSTALLED_APPS = [
59 "django.contrib.admin",
60 "django.contrib.auth",
61 "django.contrib.contenttypes",
62 "django.contrib.sessions",
63 "django.contrib.messages",
64 "django.contrib.staticfiles",
65 "django.contrib.humanize",
66 "django_rename_app",
67 "bookwyrm",
68 "celery",
69 "imagekit",
70 "storages",
71 ]
72
73 MIDDLEWARE = [
74 "django.middleware.security.SecurityMiddleware",
75 "django.contrib.sessions.middleware.SessionMiddleware",
76 "django.middleware.locale.LocaleMiddleware",
77 "django.middleware.common.CommonMiddleware",
78 "django.middleware.csrf.CsrfViewMiddleware",
79 "django.contrib.auth.middleware.AuthenticationMiddleware",
80 "bookwyrm.middleware.TimezoneMiddleware",
81 "bookwyrm.middleware.IPBlocklistMiddleware",
82 "django.contrib.messages.middleware.MessageMiddleware",
83 "django.middleware.clickjacking.XFrameOptionsMiddleware",
84 ]
85
86 ROOT_URLCONF = "bookwyrm.urls"
87
88 TEMPLATES = [
89 {
90 "BACKEND": "django.template.backends.django.DjangoTemplates",
91 "DIRS": ["templates"],
92 "APP_DIRS": True,
93 "OPTIONS": {
94 "context_processors": [
95 "django.template.context_processors.debug",
96 "django.template.context_processors.request",
97 "django.contrib.auth.context_processors.auth",
98 "django.contrib.messages.context_processors.messages",
99 "bookwyrm.context_processors.site_settings",
100 ],
101 },
102 },
103 ]
104
105
106 WSGI_APPLICATION = "bookwyrm.wsgi.application"
107
108 # redis/activity streams settings
109 REDIS_ACTIVITY_HOST = env("REDIS_ACTIVITY_HOST", "localhost")
110 REDIS_ACTIVITY_PORT = env("REDIS_ACTIVITY_PORT", 6379)
111 REDIS_ACTIVITY_PASSWORD = env("REDIS_ACTIVITY_PASSWORD", None)
112
113 MAX_STREAM_LENGTH = int(env("MAX_STREAM_LENGTH", 200))
114
115 STREAMS = [
116 {"key": "home", "name": _("Home Timeline"), "shortname": _("Home")},
117 {"key": "books", "name": _("Books Timeline"), "shortname": _("Books")},
118 ]
119
120 # Database
121 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
122
123 DATABASES = {
124 "default": {
125 "ENGINE": "django.db.backends.postgresql_psycopg2",
126 "NAME": env("POSTGRES_DB", "fedireads"),
127 "USER": env("POSTGRES_USER", "fedireads"),
128 "PASSWORD": env("POSTGRES_PASSWORD", "fedireads"),
129 "HOST": env("POSTGRES_HOST", ""),
130 "PORT": env("POSTGRES_PORT", 5432),
131 },
132 }
133
134
135 LOGIN_URL = "/login/"
136 AUTH_USER_MODEL = "bookwyrm.User"
137
138 # Password validation
139 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
140
141 # pylint: disable=line-too-long
142 AUTH_PASSWORD_VALIDATORS = [
143 {
144 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
145 },
146 {
147 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
148 },
149 {
150 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
151 },
152 {
153 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
154 },
155 ]
156
157
158 # Internationalization
159 # https://docs.djangoproject.com/en/3.2/topics/i18n/
160
161 LANGUAGE_CODE = "en-us"
162 LANGUAGES = [
163 ("en-us", _("English")),
164 ("de-de", _("German")),
165 ("es", _("Spanish")),
166 ("fr-fr", _("French")),
167 ("zh-hans", _("Simplified Chinese")),
168 ("zh-hant", _("Traditional Chinese")),
169 ]
170
171
172 TIME_ZONE = "UTC"
173
174 USE_I18N = True
175
176 USE_L10N = True
177
178 USE_TZ = True
179
180
181 agent = requests.utils.default_user_agent()
182 USER_AGENT = f"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)"
183
184 # Imagekit generated thumbnails
185 ENABLE_THUMBNAIL_GENERATION = env.bool("ENABLE_THUMBNAIL_GENERATION", False)
186 IMAGEKIT_CACHEFILE_DIR = "thumbnails"
187
188 # Static files (CSS, JavaScript, Images)
189 # https://docs.djangoproject.com/en/3.2/howto/static-files/
190
191 PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
192
193 # Storage
194
195 PROTOCOL = "http"
196 if USE_HTTPS:
197 PROTOCOL = "https"
198
199 USE_S3 = env.bool("USE_S3", False)
200
201 if USE_S3:
202 # AWS settings
203 AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID")
204 AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY")
205 AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME")
206 AWS_S3_CUSTOM_DOMAIN = env("AWS_S3_CUSTOM_DOMAIN")
207 AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME", "")
208 AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
209 AWS_DEFAULT_ACL = "public-read"
210 AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
211 # S3 Static settings
212 STATIC_LOCATION = "static"
213 STATIC_URL = f"https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/"
214 STATICFILES_STORAGE = "bookwyrm.storage_backends.StaticStorage"
215 # S3 Media settings
216 MEDIA_LOCATION = "images"
217 MEDIA_URL = f"https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/"
218 MEDIA_FULL_URL = MEDIA_URL
219 DEFAULT_FILE_STORAGE = "bookwyrm.storage_backends.ImagesStorage"
220 # I don't know if it's used, but the site crashes without it
221 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
222 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
223 else:
224 STATIC_URL = "/static/"
225 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
226 MEDIA_URL = "/images/"
227 MEDIA_FULL_URL = f"{PROTOCOL}://{DOMAIN}{MEDIA_URL}"
228 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
229
[end of bookwyrm/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py
--- a/bookwyrm/settings.py
+++ b/bookwyrm/settings.py
@@ -127,7 +127,7 @@
"USER": env("POSTGRES_USER", "fedireads"),
"PASSWORD": env("POSTGRES_PASSWORD", "fedireads"),
"HOST": env("POSTGRES_HOST", ""),
- "PORT": env("POSTGRES_PORT", 5432),
+ "PORT": env("PGPORT", 5432),
},
}
| {"golden_diff": "diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py\n--- a/bookwyrm/settings.py\n+++ b/bookwyrm/settings.py\n@@ -127,7 +127,7 @@\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n- \"PORT\": env(\"POSTGRES_PORT\", 5432),\n+ \"PORT\": env(\"PGPORT\", 5432),\n },\n }\n", "issue": "POSTGRES_PORT isn't respected during server setup\n**Describe the bug**\r\n`POSTGRES_PORT` isn't respected - `docker-compose up --build` starts the postgres instance on port 5432 regardless.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Follow the [documentation for setting up a new instance](https://docs.joinbookwyrm.com/installing-in-production.html)\r\n2. In `.env`, set `POSTGRES_PORT` to `5433` (and make the corresponding changes in `docker-compose.yml`)\r\n3. Run `docker-compose up --build`\r\n\r\n**Expected behavior**\r\nThe database is started on port 5433\r\n\r\n**Actual behavior**\r\nThe database is started on port 5432\r\n\r\n**Additional context**\r\nI also tried setting the fallback to `5433` in `bookwyrm/settings.py` in case something was going wrong with the env propagation, but that didn't help either.\r\nI was finally able to workaround by additionally setting `PGPORT` in `.env`\r\n\r\n---\r\n\r\n**Server:**\r\n - OS: Raspberry Pi OS (raspbian) 10.4\r\n - docker 20.10.8\r\n - docker-compose 1.29.2\r\n\n", "before_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"7f2343cf\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = f\"admin@{DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"POSTGRES_PORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}]} | 3,155 | 123 |
gh_patches_debug_16992 | rasdani/github-patches | git_diff | getsentry__sentry-45065 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Alert Page broken after delete project
### Self-Hosted Version
22.8.0
### CPU Architecture
x86_64
### Docker Version
20.10.17, build 100c701
### Docker Compose Version
2.6.0
### Steps to Reproduce
1. create project
2. set a alert rule for it
3. delete this project
### Expected Result
Alert will be deleted too.
### Actual Result
Alert Page broken after delete project.
Returns 403, detail is you don't have permission.
Manually fixed by:
```
\dt
select * from sentry_alertrule;
select * from sentry_alertruleactivity;
select * from sentry_alertruleexcludedprojects;
select * from sentry_alertruletrigger;
select * from sentry_alertruletriggeraction;
select * from sentry_alertruletriggerexclusion;
\dt
select * from sentry_rule;
delete from sentry_rule where id = 2;
select * from sentry_grouprulestatus;
delete from sentry_grouprulestatus where rule_id = 2;
delete from sentry_rule where id = 2;
select * from sentry_ruleactivity;
delete from sentry_ruleactivity where rule_id = 2;
delete from sentry_rule where id = 2;
delete from sentry_rulefirehistory where rule_id = 2;
delete from sentry_rule where id = 2;
select * from sentry_rule;
\dt
select * from sentry_userrole;
select * from sentry_userpermission;
select * from sentry_userrole_users;
select * from sentry_ruleactivity;
select * from sentry_rule;
select * from sentry_ruleactivity_id_seq;
select * from sentry_rulefirehistory;
select * from sentry_rulefirehistory_id_seq;
select * from sentry_rule_id_seq;
\q
```
</issue>
<code>
[start of src/sentry/incidents/endpoints/organization_alert_rule_index.py]
1 from datetime import datetime
2
3 from django.db.models import DateTimeField, IntegerField, OuterRef, Q, Subquery, Value
4 from django.db.models.functions import Coalesce
5 from django.utils.timezone import make_aware
6 from rest_framework import status
7 from rest_framework.request import Request
8 from rest_framework.response import Response
9
10 from sentry import features
11 from sentry.api.base import region_silo_endpoint
12 from sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint
13 from sentry.api.exceptions import ResourceDoesNotExist
14 from sentry.api.paginator import (
15 CombinedQuerysetIntermediary,
16 CombinedQuerysetPaginator,
17 OffsetPaginator,
18 )
19 from sentry.api.serializers import serialize
20 from sentry.api.serializers.models.alert_rule import CombinedRuleSerializer
21 from sentry.api.utils import InvalidParams
22 from sentry.incidents.models import AlertRule, Incident
23 from sentry.incidents.serializers import AlertRuleSerializer
24 from sentry.models import OrganizationMemberTeam, Project, ProjectStatus, Rule, RuleStatus, Team
25 from sentry.snuba.dataset import Dataset
26 from sentry.utils.cursors import Cursor, StringCursor
27
28 from .utils import parse_team_params
29
30
31 @region_silo_endpoint
32 class OrganizationCombinedRuleIndexEndpoint(OrganizationEndpoint):
33 def get(self, request: Request, organization) -> Response:
34 """
35 Fetches alert rules and legacy rules for an organization
36 """
37 project_ids = self.get_requested_project_ids_unchecked(request) or None
38 if project_ids == {-1}: # All projects for org:
39 project_ids = Project.objects.filter(
40 organization=organization, status=ProjectStatus.VISIBLE
41 ).values_list("id", flat=True)
42 elif project_ids is None: # All projects for user
43 org_team_list = Team.objects.filter(organization=organization).values_list(
44 "id", flat=True
45 )
46 user_team_list = OrganizationMemberTeam.objects.filter(
47 organizationmember__user=request.user, team__in=org_team_list
48 ).values_list("team", flat=True)
49 project_ids = Project.objects.filter(teams__in=user_team_list).values_list(
50 "id", flat=True
51 )
52
53 # Materialize the project ids here. This helps us to not overwhelm the query planner with
54 # overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal
55 # index to filter on. Also enforces permission checks.
56 projects = self.get_projects(request, organization, project_ids=set(project_ids))
57
58 teams = request.GET.getlist("team", [])
59 team_filter_query = None
60 if len(teams) > 0:
61 try:
62 teams_query, unassigned = parse_team_params(request, organization, teams)
63 except InvalidParams as err:
64 return Response(str(err), status=status.HTTP_400_BAD_REQUEST)
65
66 team_filter_query = Q(owner_id__in=teams_query.values_list("actor_id", flat=True))
67 if unassigned:
68 team_filter_query = team_filter_query | Q(owner_id=None)
69
70 alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)
71 if not features.has("organizations:performance-view", organization):
72 # Filter to only error alert rules
73 alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)
74 issue_rules = Rule.objects.filter(
75 status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE], project__in=projects
76 )
77 name = request.GET.get("name", None)
78 if name:
79 alert_rules = alert_rules.filter(Q(name__icontains=name))
80 issue_rules = issue_rules.filter(Q(label__icontains=name))
81
82 if team_filter_query:
83 alert_rules = alert_rules.filter(team_filter_query)
84 issue_rules = issue_rules.filter(team_filter_query)
85
86 expand = request.GET.getlist("expand", [])
87 if "latestIncident" in expand:
88 alert_rules = alert_rules.annotate(
89 incident_id=Coalesce(
90 Subquery(
91 Incident.objects.filter(alert_rule=OuterRef("pk"))
92 .order_by("-date_started")
93 .values("id")[:1]
94 ),
95 Value("-1"),
96 )
97 )
98
99 is_asc = request.GET.get("asc", False) == "1"
100 sort_key = request.GET.getlist("sort", ["date_added"])
101 rule_sort_key = [
102 "label" if x == "name" else x for x in sort_key
103 ] # Rule's don't share the same field name for their title/label/name...so we account for that here.
104 case_insensitive = sort_key == ["name"]
105
106 if "incident_status" in sort_key:
107 alert_rules = alert_rules.annotate(
108 incident_status=Coalesce(
109 Subquery(
110 Incident.objects.filter(alert_rule=OuterRef("pk"))
111 .order_by("-date_started")
112 .values("status")[:1]
113 ),
114 Value(-1, output_field=IntegerField()),
115 )
116 )
117 issue_rules = issue_rules.annotate(
118 incident_status=Value(-2, output_field=IntegerField())
119 )
120
121 if "date_triggered" in sort_key:
122 far_past_date = Value(make_aware(datetime.min), output_field=DateTimeField())
123 alert_rules = alert_rules.annotate(
124 date_triggered=Coalesce(
125 Subquery(
126 Incident.objects.filter(alert_rule=OuterRef("pk"))
127 .order_by("-date_started")
128 .values("date_started")[:1]
129 ),
130 far_past_date,
131 ),
132 )
133 issue_rules = issue_rules.annotate(date_triggered=far_past_date)
134 alert_rules_count = alert_rules.count()
135 issue_rules_count = issue_rules.count()
136 alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, sort_key)
137 rule_intermediary = CombinedQuerysetIntermediary(issue_rules, rule_sort_key)
138 response = self.paginate(
139 request,
140 paginator_cls=CombinedQuerysetPaginator,
141 on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer(expand=expand)),
142 default_per_page=25,
143 intermediaries=[alert_rule_intermediary, rule_intermediary],
144 desc=not is_asc,
145 cursor_cls=StringCursor if case_insensitive else Cursor,
146 case_insensitive=case_insensitive,
147 )
148 response["X-Sentry-Issue-Rule-Hits"] = issue_rules_count
149 response["X-Sentry-Alert-Rule-Hits"] = alert_rules_count
150 return response
151
152
153 @region_silo_endpoint
154 class OrganizationAlertRuleIndexEndpoint(OrganizationEndpoint):
155 permission_classes = (OrganizationAlertRulePermission,)
156
157 def get(self, request: Request, organization) -> Response:
158 """
159 Fetches alert rules for an organization
160 """
161 if not features.has("organizations:incidents", organization, actor=request.user):
162 raise ResourceDoesNotExist
163
164 projects = self.get_projects(request, organization)
165 alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)
166 if not features.has("organizations:performance-view", organization):
167 # Filter to only error alert rules
168 alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)
169
170 return self.paginate(
171 request,
172 queryset=alert_rules,
173 order_by="-date_added",
174 paginator_cls=OffsetPaginator,
175 on_results=lambda x: serialize(x, request.user),
176 default_per_page=25,
177 )
178
179 def post(self, request: Request, organization) -> Response:
180 """
181 Create an alert rule
182 """
183
184 if not features.has("organizations:incidents", organization, actor=request.user):
185 raise ResourceDoesNotExist
186
187 serializer = AlertRuleSerializer(
188 context={"organization": organization, "access": request.access, "user": request.user},
189 data=request.data,
190 )
191
192 if serializer.is_valid():
193 alert_rule = serializer.save()
194 return Response(serialize(alert_rule, request.user), status=status.HTTP_201_CREATED)
195
196 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
197
[end of src/sentry/incidents/endpoints/organization_alert_rule_index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_index.py b/src/sentry/incidents/endpoints/organization_alert_rule_index.py
--- a/src/sentry/incidents/endpoints/organization_alert_rule_index.py
+++ b/src/sentry/incidents/endpoints/organization_alert_rule_index.py
@@ -46,9 +46,9 @@
user_team_list = OrganizationMemberTeam.objects.filter(
organizationmember__user=request.user, team__in=org_team_list
).values_list("team", flat=True)
- project_ids = Project.objects.filter(teams__in=user_team_list).values_list(
- "id", flat=True
- )
+ project_ids = Project.objects.filter(
+ teams__in=user_team_list, status=ProjectStatus.VISIBLE
+ ).values_list("id", flat=True)
# Materialize the project ids here. This helps us to not overwhelm the query planner with
# overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal
| {"golden_diff": "diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_index.py b/src/sentry/incidents/endpoints/organization_alert_rule_index.py\n--- a/src/sentry/incidents/endpoints/organization_alert_rule_index.py\n+++ b/src/sentry/incidents/endpoints/organization_alert_rule_index.py\n@@ -46,9 +46,9 @@\n user_team_list = OrganizationMemberTeam.objects.filter(\n organizationmember__user=request.user, team__in=org_team_list\n ).values_list(\"team\", flat=True)\n- project_ids = Project.objects.filter(teams__in=user_team_list).values_list(\n- \"id\", flat=True\n- )\n+ project_ids = Project.objects.filter(\n+ teams__in=user_team_list, status=ProjectStatus.VISIBLE\n+ ).values_list(\"id\", flat=True)\n \n # Materialize the project ids here. This helps us to not overwhelm the query planner with\n # overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal\n", "issue": "Alert Page broken after delete project \n### Self-Hosted Version\n\n22.8.0\n\n### CPU Architecture\n\nx86_64\n\n### Docker Version\n\n20.10.17, build 100c701\n\n### Docker Compose Version\n\n2.6.0\n\n### Steps to Reproduce\n\n1. create project\r\n2. set a alert rule for it\r\n3. delete this project\n\n### Expected Result\n\nAlert will be deleted too.\n\n### Actual Result\n\nAlert Page broken after delete project.\r\nReturns 403, detail is you don't have permission.\r\nManually fixed by:\r\n```\r\n\\dt\r\nselect * from sentry_alertrule;\r\nselect * from sentry_alertruleactivity;\r\nselect * from sentry_alertruleexcludedprojects;\r\nselect * from sentry_alertruletrigger;\r\nselect * from sentry_alertruletriggeraction;\r\nselect * from sentry_alertruletriggerexclusion;\r\n\\dt\r\nselect * from sentry_rule;\r\ndelete from sentry_rule where id = 2;\r\nselect * from sentry_grouprulestatus;\r\ndelete from sentry_grouprulestatus where rule_id = 2;\r\ndelete from sentry_rule where id = 2;\r\nselect * from sentry_ruleactivity;\r\ndelete from sentry_ruleactivity where rule_id = 2;\r\ndelete from sentry_rule where id = 2;\r\ndelete from sentry_rulefirehistory where rule_id = 2;\r\ndelete from sentry_rule where id = 2;\r\nselect * from sentry_rule;\r\n\\dt\r\nselect * from sentry_userrole;\r\nselect * from sentry_userpermission;\r\nselect * from sentry_userrole_users;\r\nselect * from sentry_ruleactivity;\r\nselect * from sentry_rule;\r\nselect * from sentry_ruleactivity_id_seq;\r\nselect * from sentry_rulefirehistory;\r\nselect * from sentry_rulefirehistory_id_seq;\r\nselect * from sentry_rule_id_seq;\r\n\\q\r\n\r\n```\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.db.models import DateTimeField, IntegerField, OuterRef, Q, Subquery, Value\nfrom django.db.models.functions import Coalesce\nfrom django.utils.timezone import make_aware\nfrom rest_framework import status\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import features\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.paginator import (\n CombinedQuerysetIntermediary,\n CombinedQuerysetPaginator,\n OffsetPaginator,\n)\nfrom sentry.api.serializers import serialize\nfrom sentry.api.serializers.models.alert_rule import CombinedRuleSerializer\nfrom sentry.api.utils import InvalidParams\nfrom sentry.incidents.models import AlertRule, Incident\nfrom sentry.incidents.serializers import AlertRuleSerializer\nfrom sentry.models import OrganizationMemberTeam, Project, ProjectStatus, Rule, RuleStatus, Team\nfrom sentry.snuba.dataset import Dataset\nfrom sentry.utils.cursors import Cursor, StringCursor\n\nfrom .utils import parse_team_params\n\n\n@region_silo_endpoint\nclass OrganizationCombinedRuleIndexEndpoint(OrganizationEndpoint):\n def get(self, request: Request, organization) -> Response:\n \"\"\"\n Fetches alert rules and legacy rules for an organization\n \"\"\"\n project_ids = self.get_requested_project_ids_unchecked(request) or None\n if project_ids == {-1}: # All projects for org:\n project_ids = Project.objects.filter(\n organization=organization, status=ProjectStatus.VISIBLE\n ).values_list(\"id\", flat=True)\n elif project_ids is None: # All projects for user\n org_team_list = Team.objects.filter(organization=organization).values_list(\n \"id\", flat=True\n )\n user_team_list = OrganizationMemberTeam.objects.filter(\n organizationmember__user=request.user, team__in=org_team_list\n ).values_list(\"team\", flat=True)\n project_ids = Project.objects.filter(teams__in=user_team_list).values_list(\n \"id\", flat=True\n )\n\n # Materialize the project ids here. This helps us to not overwhelm the query planner with\n # overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal\n # index to filter on. Also enforces permission checks.\n projects = self.get_projects(request, organization, project_ids=set(project_ids))\n\n teams = request.GET.getlist(\"team\", [])\n team_filter_query = None\n if len(teams) > 0:\n try:\n teams_query, unassigned = parse_team_params(request, organization, teams)\n except InvalidParams as err:\n return Response(str(err), status=status.HTTP_400_BAD_REQUEST)\n\n team_filter_query = Q(owner_id__in=teams_query.values_list(\"actor_id\", flat=True))\n if unassigned:\n team_filter_query = team_filter_query | Q(owner_id=None)\n\n alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)\n if not features.has(\"organizations:performance-view\", organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n issue_rules = Rule.objects.filter(\n status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE], project__in=projects\n )\n name = request.GET.get(\"name\", None)\n if name:\n alert_rules = alert_rules.filter(Q(name__icontains=name))\n issue_rules = issue_rules.filter(Q(label__icontains=name))\n\n if team_filter_query:\n alert_rules = alert_rules.filter(team_filter_query)\n issue_rules = issue_rules.filter(team_filter_query)\n\n expand = request.GET.getlist(\"expand\", [])\n if \"latestIncident\" in expand:\n alert_rules = alert_rules.annotate(\n incident_id=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"id\")[:1]\n ),\n Value(\"-1\"),\n )\n )\n\n is_asc = request.GET.get(\"asc\", False) == \"1\"\n sort_key = request.GET.getlist(\"sort\", [\"date_added\"])\n rule_sort_key = [\n \"label\" if x == \"name\" else x for x in sort_key\n ] # Rule's don't share the same field name for their title/label/name...so we account for that here.\n case_insensitive = sort_key == [\"name\"]\n\n if \"incident_status\" in sort_key:\n alert_rules = alert_rules.annotate(\n incident_status=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"status\")[:1]\n ),\n Value(-1, output_field=IntegerField()),\n )\n )\n issue_rules = issue_rules.annotate(\n incident_status=Value(-2, output_field=IntegerField())\n )\n\n if \"date_triggered\" in sort_key:\n far_past_date = Value(make_aware(datetime.min), output_field=DateTimeField())\n alert_rules = alert_rules.annotate(\n date_triggered=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"date_started\")[:1]\n ),\n far_past_date,\n ),\n )\n issue_rules = issue_rules.annotate(date_triggered=far_past_date)\n alert_rules_count = alert_rules.count()\n issue_rules_count = issue_rules.count()\n alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, sort_key)\n rule_intermediary = CombinedQuerysetIntermediary(issue_rules, rule_sort_key)\n response = self.paginate(\n request,\n paginator_cls=CombinedQuerysetPaginator,\n on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer(expand=expand)),\n default_per_page=25,\n intermediaries=[alert_rule_intermediary, rule_intermediary],\n desc=not is_asc,\n cursor_cls=StringCursor if case_insensitive else Cursor,\n case_insensitive=case_insensitive,\n )\n response[\"X-Sentry-Issue-Rule-Hits\"] = issue_rules_count\n response[\"X-Sentry-Alert-Rule-Hits\"] = alert_rules_count\n return response\n\n\n@region_silo_endpoint\nclass OrganizationAlertRuleIndexEndpoint(OrganizationEndpoint):\n permission_classes = (OrganizationAlertRulePermission,)\n\n def get(self, request: Request, organization) -> Response:\n \"\"\"\n Fetches alert rules for an organization\n \"\"\"\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n projects = self.get_projects(request, organization)\n alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)\n if not features.has(\"organizations:performance-view\", organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n\n return self.paginate(\n request,\n queryset=alert_rules,\n order_by=\"-date_added\",\n paginator_cls=OffsetPaginator,\n on_results=lambda x: serialize(x, request.user),\n default_per_page=25,\n )\n\n def post(self, request: Request, organization) -> Response:\n \"\"\"\n Create an alert rule\n \"\"\"\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n serializer = AlertRuleSerializer(\n context={\"organization\": organization, \"access\": request.access, \"user\": request.user},\n data=request.data,\n )\n\n if serializer.is_valid():\n alert_rule = serializer.save()\n return Response(serialize(alert_rule, request.user), status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n", "path": "src/sentry/incidents/endpoints/organization_alert_rule_index.py"}]} | 3,140 | 221 |
gh_patches_debug_5025 | rasdani/github-patches | git_diff | tobymao__sqlglot-2095 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to parse the oracle query when "#" is present in the column names
Hello,
Parser is throwing an error when there is "#" character is present in the column name.
**Code for your reference -**
```
import sqlglot.expressions as exp
query ="""
SELECT A.COL#, B.COL2 FROM SCHEMA1.TABLE1 A
"""
expressions = sqlglot.parse_one(query, read='oracle')
x = list(expressions.find_all(exp.Table))
for i in x:
listTables = str(i).split(' ')[0]
print(listTables)
```
Above code fails as it cannot process '#' and throws the error.
Request you to add support for '#' for query parsing.
</issue>
<code>
[start of sqlglot/dialects/oracle.py]
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
7 from sqlglot.helper import seq_get
8 from sqlglot.tokens import TokenType
9
10
11 def _parse_xml_table(self: parser.Parser) -> exp.XMLTable:
12 this = self._parse_string()
13
14 passing = None
15 columns = None
16
17 if self._match_text_seq("PASSING"):
18 # The BY VALUE keywords are optional and are provided for semantic clarity
19 self._match_text_seq("BY", "VALUE")
20 passing = self._parse_csv(self._parse_column)
21
22 by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
23
24 if self._match_text_seq("COLUMNS"):
25 columns = self._parse_csv(self._parse_field_def)
26
27 return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)
28
29
30 class Oracle(Dialect):
31 ALIAS_POST_TABLESAMPLE = True
32
33 # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm
34 RESOLVES_IDENTIFIERS_AS_UPPERCASE = True
35
36 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
37 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
38 TIME_MAPPING = {
39 "AM": "%p", # Meridian indicator with or without periods
40 "A.M.": "%p", # Meridian indicator with or without periods
41 "PM": "%p", # Meridian indicator with or without periods
42 "P.M.": "%p", # Meridian indicator with or without periods
43 "D": "%u", # Day of week (1-7)
44 "DAY": "%A", # name of day
45 "DD": "%d", # day of month (1-31)
46 "DDD": "%j", # day of year (1-366)
47 "DY": "%a", # abbreviated name of day
48 "HH": "%I", # Hour of day (1-12)
49 "HH12": "%I", # alias for HH
50 "HH24": "%H", # Hour of day (0-23)
51 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
52 "MI": "%M", # Minute (0-59)
53 "MM": "%m", # Month (01-12; January = 01)
54 "MON": "%b", # Abbreviated name of month
55 "MONTH": "%B", # Name of month
56 "SS": "%S", # Second (0-59)
57 "WW": "%W", # Week of year (1-53)
58 "YY": "%y", # 15
59 "YYYY": "%Y", # 2015
60 }
61
62 class Parser(parser.Parser):
63 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
64
65 FUNCTIONS = {
66 **parser.Parser.FUNCTIONS,
67 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
68 }
69
70 FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
71 **parser.Parser.FUNCTION_PARSERS,
72 "XMLTABLE": _parse_xml_table,
73 }
74
75 TYPE_LITERAL_PARSERS = {
76 exp.DataType.Type.DATE: lambda self, this, _: self.expression(
77 exp.DateStrToDate, this=this
78 )
79 }
80
81 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..
82 # Reference: https://stackoverflow.com/a/336455
83 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}
84
85 def _parse_column(self) -> t.Optional[exp.Expression]:
86 column = super()._parse_column()
87 if column:
88 column.set("join_mark", self._match(TokenType.JOIN_MARKER))
89 return column
90
91 def _parse_hint(self) -> t.Optional[exp.Hint]:
92 if self._match(TokenType.HINT):
93 start = self._curr
94 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
95 self._advance()
96
97 if not self._curr:
98 self.raise_error("Expected */ after HINT")
99
100 end = self._tokens[self._index - 3]
101 return exp.Hint(expressions=[self._find_sql(start, end)])
102
103 return None
104
105 class Generator(generator.Generator):
106 LOCKING_READS_SUPPORTED = True
107 JOIN_HINTS = False
108 TABLE_HINTS = False
109 COLUMN_JOIN_MARKS_SUPPORTED = True
110
111 LIMIT_FETCH = "FETCH"
112
113 TYPE_MAPPING = {
114 **generator.Generator.TYPE_MAPPING,
115 exp.DataType.Type.TINYINT: "NUMBER",
116 exp.DataType.Type.SMALLINT: "NUMBER",
117 exp.DataType.Type.INT: "NUMBER",
118 exp.DataType.Type.BIGINT: "NUMBER",
119 exp.DataType.Type.DECIMAL: "NUMBER",
120 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
121 exp.DataType.Type.VARCHAR: "VARCHAR2",
122 exp.DataType.Type.NVARCHAR: "NVARCHAR2",
123 exp.DataType.Type.NCHAR: "NCHAR",
124 exp.DataType.Type.TEXT: "CLOB",
125 exp.DataType.Type.BINARY: "BLOB",
126 exp.DataType.Type.VARBINARY: "BLOB",
127 }
128
129 TRANSFORMS = {
130 **generator.Generator.TRANSFORMS,
131 exp.DateStrToDate: lambda self, e: self.func(
132 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
133 ),
134 exp.Group: transforms.preprocess([transforms.unalias_group]),
135 exp.ILike: no_ilike_sql,
136 exp.Coalesce: rename_func("NVL"),
137 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
138 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
139 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
140 exp.Substring: rename_func("SUBSTR"),
141 exp.Table: lambda self, e: self.table_sql(e, sep=" "),
142 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
143 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
144 exp.ToChar: lambda self, e: self.function_fallback_sql(e),
145 exp.Trim: trim_sql,
146 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
147 }
148
149 PROPERTIES_LOCATION = {
150 **generator.Generator.PROPERTIES_LOCATION,
151 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
152 }
153
154 def offset_sql(self, expression: exp.Offset) -> str:
155 return f"{super().offset_sql(expression)} ROWS"
156
157 def xmltable_sql(self, expression: exp.XMLTable) -> str:
158 this = self.sql(expression, "this")
159 passing = self.expressions(expression, key="passing")
160 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
161 columns = self.expressions(expression, key="columns")
162 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
163 by_ref = (
164 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
165 )
166 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
167
168 class Tokenizer(tokens.Tokenizer):
169 VAR_SINGLE_TOKENS = {"@"}
170
171 KEYWORDS = {
172 **tokens.Tokenizer.KEYWORDS,
173 "(+)": TokenType.JOIN_MARKER,
174 "BINARY_DOUBLE": TokenType.DOUBLE,
175 "BINARY_FLOAT": TokenType.FLOAT,
176 "COLUMNS": TokenType.COLUMN,
177 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
178 "MINUS": TokenType.EXCEPT,
179 "NVARCHAR2": TokenType.NVARCHAR,
180 "SAMPLE": TokenType.TABLE_SAMPLE,
181 "START": TokenType.BEGIN,
182 "TOP": TokenType.TOP,
183 "VARCHAR2": TokenType.VARCHAR,
184 }
185
[end of sqlglot/dialects/oracle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -166,7 +166,7 @@
return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
class Tokenizer(tokens.Tokenizer):
- VAR_SINGLE_TOKENS = {"@"}
+ VAR_SINGLE_TOKENS = {"@", "$", "#"}
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
| {"golden_diff": "diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py\n--- a/sqlglot/dialects/oracle.py\n+++ b/sqlglot/dialects/oracle.py\n@@ -166,7 +166,7 @@\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n \n class Tokenizer(tokens.Tokenizer):\n- VAR_SINGLE_TOKENS = {\"@\"}\n+ VAR_SINGLE_TOKENS = {\"@\", \"$\", \"#\"}\n \n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n", "issue": "Unable to parse the oracle query when \"#\" is present in the column names\nHello,\r\n\r\nParser is throwing an error when there is \"#\" character is present in the column name.\r\n\r\n**Code for your reference -**\r\n\r\n```\r\nimport sqlglot.expressions as exp\r\n\r\nquery =\"\"\"\r\nSELECT A.COL#, B.COL2 FROM SCHEMA1.TABLE1 A\r\n\"\"\"\r\n\r\nexpressions = sqlglot.parse_one(query, read='oracle')\r\nx = list(expressions.find_all(exp.Table))\r\n\r\nfor i in x:\r\n listTables = str(i).split(' ')[0]\r\n print(listTables)\r\n```\r\n\r\n\r\nAbove code fails as it cannot process '#' and throws the error.\r\nRequest you to add support for '#' for query parsing.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _parse_xml_table(self: parser.Parser) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(self._parse_field_def)\n\n return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)\n\n\nclass Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n\n # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = True\n\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..\n # Reference: https://stackoverflow.com/a/336455\n DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Hint]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n COLUMN_JOIN_MARKS_SUPPORTED = True\n\n LIMIT_FETCH = \"FETCH\"\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.NCHAR: \"NCHAR\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.ILike: no_ilike_sql,\n exp.Coalesce: rename_func(\"NVL\"),\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n VAR_SINGLE_TOKENS = {\"@\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"BINARY_DOUBLE\": TokenType.DOUBLE,\n \"BINARY_FLOAT\": TokenType.FLOAT,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}]} | 3,069 | 140 |
gh_patches_debug_21422 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Metric Reader function not being called
As reported by @aabmass :v:
There is a bug [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py#L204). If a metric reader returns `False` in a shutdown, the next metric reader shutdown function won't be called.
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from atexit import register, unregister
16 from logging import getLogger
17 from threading import Lock
18 from typing import Optional, Sequence
19
20 from opentelemetry._metrics import Meter as APIMeter
21 from opentelemetry._metrics import MeterProvider as APIMeterProvider
22 from opentelemetry._metrics import NoOpMeter
23 from opentelemetry._metrics.instrument import Counter as APICounter
24 from opentelemetry._metrics.instrument import Histogram as APIHistogram
25 from opentelemetry._metrics.instrument import (
26 ObservableCounter as APIObservableCounter,
27 )
28 from opentelemetry._metrics.instrument import (
29 ObservableGauge as APIObservableGauge,
30 )
31 from opentelemetry._metrics.instrument import (
32 ObservableUpDownCounter as APIObservableUpDownCounter,
33 )
34 from opentelemetry._metrics.instrument import UpDownCounter as APIUpDownCounter
35 from opentelemetry.sdk._metrics.instrument import (
36 Counter,
37 Histogram,
38 ObservableCounter,
39 ObservableGauge,
40 ObservableUpDownCounter,
41 UpDownCounter,
42 )
43 from opentelemetry.sdk._metrics.measurement_consumer import (
44 MeasurementConsumer,
45 SynchronousMeasurementConsumer,
46 )
47 from opentelemetry.sdk._metrics.metric_reader import MetricReader
48 from opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration
49 from opentelemetry.sdk.resources import Resource
50 from opentelemetry.sdk.util.instrumentation import InstrumentationInfo
51
52 _logger = getLogger(__name__)
53
54
55 class Meter(APIMeter):
56 def __init__(
57 self,
58 instrumentation_info: InstrumentationInfo,
59 measurement_consumer: MeasurementConsumer,
60 ):
61 super().__init__(instrumentation_info)
62 self._instrumentation_info = instrumentation_info
63 self._measurement_consumer = measurement_consumer
64
65 def create_counter(self, name, unit=None, description=None) -> APICounter:
66 return Counter(
67 name,
68 self._instrumentation_info,
69 self._measurement_consumer,
70 unit,
71 description,
72 )
73
74 def create_up_down_counter(
75 self, name, unit=None, description=None
76 ) -> APIUpDownCounter:
77 return UpDownCounter(
78 name,
79 self._instrumentation_info,
80 self._measurement_consumer,
81 unit,
82 description,
83 )
84
85 def create_observable_counter(
86 self, name, callback, unit=None, description=None
87 ) -> APIObservableCounter:
88
89 instrument = ObservableCounter(
90 name,
91 self._instrumentation_info,
92 self._measurement_consumer,
93 callback,
94 unit,
95 description,
96 )
97
98 self._measurement_consumer.register_asynchronous_instrument(instrument)
99
100 return instrument
101
102 def create_histogram(
103 self, name, unit=None, description=None
104 ) -> APIHistogram:
105 return Histogram(
106 name,
107 self._instrumentation_info,
108 self._measurement_consumer,
109 unit,
110 description,
111 )
112
113 def create_observable_gauge(
114 self, name, callback, unit=None, description=None
115 ) -> APIObservableGauge:
116
117 instrument = ObservableGauge(
118 name,
119 self._instrumentation_info,
120 self._measurement_consumer,
121 callback,
122 unit,
123 description,
124 )
125
126 self._measurement_consumer.register_asynchronous_instrument(instrument)
127
128 return instrument
129
130 def create_observable_up_down_counter(
131 self, name, callback, unit=None, description=None
132 ) -> APIObservableUpDownCounter:
133
134 instrument = ObservableUpDownCounter(
135 name,
136 self._instrumentation_info,
137 self._measurement_consumer,
138 callback,
139 unit,
140 description,
141 )
142
143 self._measurement_consumer.register_asynchronous_instrument(instrument)
144
145 return instrument
146
147
148 class MeterProvider(APIMeterProvider):
149 """See `opentelemetry._metrics.MeterProvider`."""
150
151 def __init__(
152 self,
153 metric_readers: Sequence[MetricReader] = (),
154 resource: Resource = Resource.create({}),
155 shutdown_on_exit: bool = True,
156 ):
157 self._lock = Lock()
158 self._meter_lock = Lock()
159 self._atexit_handler = None
160 self._sdk_config = SdkConfiguration(
161 resource=resource, metric_readers=metric_readers
162 )
163 self._measurement_consumer = SynchronousMeasurementConsumer(
164 sdk_config=self._sdk_config
165 )
166
167 if shutdown_on_exit:
168 self._atexit_handler = register(self.shutdown)
169
170 self._meters = {}
171 self._metric_readers = metric_readers
172
173 for metric_reader in self._sdk_config.metric_readers:
174 metric_reader._register_measurement_consumer(self)
175
176 self._shutdown = False
177
178 def force_flush(self) -> bool:
179
180 # FIXME implement a timeout
181
182 metric_reader_result = True
183
184 for metric_reader in self._sdk_config.metric_readers:
185 metric_reader_result = (
186 metric_reader_result and metric_reader.force_flush()
187 )
188
189 if not metric_reader_result:
190 _logger.warning("Unable to force flush all metric readers")
191
192 return metric_reader_result
193
194 def shutdown(self):
195 # FIXME implement a timeout
196
197 if self._shutdown:
198 _logger.warning("shutdown can only be called once")
199 return False
200
201 result = True
202
203 for metric_reader in self._sdk_config.metric_readers:
204 result = result and metric_reader.shutdown()
205
206 if not result:
207 _logger.warning("A MetricReader failed to shutdown")
208
209 self._shutdown = True
210
211 if self._atexit_handler is not None:
212 unregister(self._atexit_handler)
213 self._atexit_handler = None
214
215 return result
216
217 def get_meter(
218 self,
219 name: str,
220 version: Optional[str] = None,
221 schema_url: Optional[str] = None,
222 ) -> Meter:
223
224 if self._shutdown:
225 _logger.warning(
226 "A shutdown `MeterProvider` can not provide a `Meter`"
227 )
228 return NoOpMeter(name, version=version, schema_url=schema_url)
229
230 info = InstrumentationInfo(name, version, schema_url)
231 with self._meter_lock:
232 if not self._meters.get(info):
233 self._meters[info] = Meter(
234 info,
235 self._measurement_consumer,
236 )
237 return self._meters[info]
238
[end of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
@@ -198,13 +198,17 @@
_logger.warning("shutdown can only be called once")
return False
- result = True
+ overall_result = True
for metric_reader in self._sdk_config.metric_readers:
- result = result and metric_reader.shutdown()
+ metric_reader_result = metric_reader.shutdown()
- if not result:
- _logger.warning("A MetricReader failed to shutdown")
+ if not metric_reader_result:
+ _logger.warning(
+ "MetricReader {metric_reader} failed to shutdown"
+ )
+
+ overall_result = overall_result and metric_reader_result
self._shutdown = True
@@ -212,7 +216,7 @@
unregister(self._atexit_handler)
self._atexit_handler = None
- return result
+ return overall_result
def get_meter(
self,
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n@@ -198,13 +198,17 @@\n _logger.warning(\"shutdown can only be called once\")\n return False\n \n- result = True\n+ overall_result = True\n \n for metric_reader in self._sdk_config.metric_readers:\n- result = result and metric_reader.shutdown()\n+ metric_reader_result = metric_reader.shutdown()\n \n- if not result:\n- _logger.warning(\"A MetricReader failed to shutdown\")\n+ if not metric_reader_result:\n+ _logger.warning(\n+ \"MetricReader {metric_reader} failed to shutdown\"\n+ )\n+\n+ overall_result = overall_result and metric_reader_result\n \n self._shutdown = True\n \n@@ -212,7 +216,7 @@\n unregister(self._atexit_handler)\n self._atexit_handler = None\n \n- return result\n+ return overall_result\n \n def get_meter(\n self,\n", "issue": "Metric Reader function not being called\nAs reported by @aabmass :v:\r\n\r\nThere is a bug [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py#L204). If a metric reader returns `False` in a shutdown, the next metric reader shutdown function won't be called.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom atexit import register, unregister\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import Optional, Sequence\n\nfrom opentelemetry._metrics import Meter as APIMeter\nfrom opentelemetry._metrics import MeterProvider as APIMeterProvider\nfrom opentelemetry._metrics import NoOpMeter\nfrom opentelemetry._metrics.instrument import Counter as APICounter\nfrom opentelemetry._metrics.instrument import Histogram as APIHistogram\nfrom opentelemetry._metrics.instrument import (\n ObservableCounter as APIObservableCounter,\n)\nfrom opentelemetry._metrics.instrument import (\n ObservableGauge as APIObservableGauge,\n)\nfrom opentelemetry._metrics.instrument import (\n ObservableUpDownCounter as APIObservableUpDownCounter,\n)\nfrom opentelemetry._metrics.instrument import UpDownCounter as APIUpDownCounter\nfrom opentelemetry.sdk._metrics.instrument import (\n Counter,\n Histogram,\n ObservableCounter,\n ObservableGauge,\n ObservableUpDownCounter,\n UpDownCounter,\n)\nfrom opentelemetry.sdk._metrics.measurement_consumer import (\n MeasurementConsumer,\n SynchronousMeasurementConsumer,\n)\nfrom opentelemetry.sdk._metrics.metric_reader import MetricReader\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.util.instrumentation import InstrumentationInfo\n\n_logger = getLogger(__name__)\n\n\nclass Meter(APIMeter):\n def __init__(\n self,\n instrumentation_info: InstrumentationInfo,\n measurement_consumer: MeasurementConsumer,\n ):\n super().__init__(instrumentation_info)\n self._instrumentation_info = instrumentation_info\n self._measurement_consumer = measurement_consumer\n\n def create_counter(self, name, unit=None, description=None) -> APICounter:\n return Counter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_up_down_counter(\n self, name, unit=None, description=None\n ) -> APIUpDownCounter:\n return UpDownCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_observable_counter(\n self, name, callback, unit=None, description=None\n ) -> APIObservableCounter:\n\n instrument = ObservableCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n def create_histogram(\n self, name, unit=None, description=None\n ) -> APIHistogram:\n return Histogram(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_observable_gauge(\n self, name, callback, unit=None, description=None\n ) -> APIObservableGauge:\n\n instrument = ObservableGauge(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n def create_observable_up_down_counter(\n self, name, callback, unit=None, description=None\n ) -> APIObservableUpDownCounter:\n\n instrument = ObservableUpDownCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n\nclass MeterProvider(APIMeterProvider):\n \"\"\"See `opentelemetry._metrics.MeterProvider`.\"\"\"\n\n def __init__(\n self,\n metric_readers: Sequence[MetricReader] = (),\n resource: Resource = Resource.create({}),\n shutdown_on_exit: bool = True,\n ):\n self._lock = Lock()\n self._meter_lock = Lock()\n self._atexit_handler = None\n self._sdk_config = SdkConfiguration(\n resource=resource, metric_readers=metric_readers\n )\n self._measurement_consumer = SynchronousMeasurementConsumer(\n sdk_config=self._sdk_config\n )\n\n if shutdown_on_exit:\n self._atexit_handler = register(self.shutdown)\n\n self._meters = {}\n self._metric_readers = metric_readers\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader._register_measurement_consumer(self)\n\n self._shutdown = False\n\n def force_flush(self) -> bool:\n\n # FIXME implement a timeout\n\n metric_reader_result = True\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader_result = (\n metric_reader_result and metric_reader.force_flush()\n )\n\n if not metric_reader_result:\n _logger.warning(\"Unable to force flush all metric readers\")\n\n return metric_reader_result\n\n def shutdown(self):\n # FIXME implement a timeout\n\n if self._shutdown:\n _logger.warning(\"shutdown can only be called once\")\n return False\n\n result = True\n\n for metric_reader in self._sdk_config.metric_readers:\n result = result and metric_reader.shutdown()\n\n if not result:\n _logger.warning(\"A MetricReader failed to shutdown\")\n\n self._shutdown = True\n\n if self._atexit_handler is not None:\n unregister(self._atexit_handler)\n self._atexit_handler = None\n\n return result\n\n def get_meter(\n self,\n name: str,\n version: Optional[str] = None,\n schema_url: Optional[str] = None,\n ) -> Meter:\n\n if self._shutdown:\n _logger.warning(\n \"A shutdown `MeterProvider` can not provide a `Meter`\"\n )\n return NoOpMeter(name, version=version, schema_url=schema_url)\n\n info = InstrumentationInfo(name, version, schema_url)\n with self._meter_lock:\n if not self._meters.get(info):\n self._meters[info] = Meter(\n info,\n self._measurement_consumer,\n )\n return self._meters[info]\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py"}]} | 2,679 | 270 |
gh_patches_debug_30084 | rasdani/github-patches | git_diff | cobbler__cobbler-3264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Task logs don't end
### Describe the bug
Task logs contain the normal server logs, even though the task has stopped. This leads to duplicated logs in all created files.
### Steps to reproduce
1. `cobbler mkloaders` (or any other task)
2. Repeat step 1
3. See logs being appended to completed tasks
### Expected behavior
The files should only contain the logs for the task.
### Cobbler version
Commit: 2a5c7ce9c1533bbf6f6b9050198d98aca45a06ae
````paste below
Cobbler 3.4.0
source: ?, ?
build time: Fri Oct 14 14:17:09 2022
````
### Operating system
```
e62e937a2de9:/var/log/cobbler/tasks # cat /etc/os-release
NAME="openSUSE Leap"
VERSION="15.3"
ID="opensuse-leap"
ID_LIKE="suse opensuse"
VERSION_ID="15.3"
PRETTY_NAME="openSUSE Leap 15.3"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:opensuse:leap:15.3"
BUG_REPORT_URL="https://bugs.opensuse.org"
HOME_URL="https://www.opensuse.org/"
```
### Cobbler log
Not relevant
### Screenshots
Not relevant
### Additional information
Found by accident during another debugging session
</issue>
<code>
[start of cobbler/utils/thread.py]
1 """
2 This module is responsible for managing the custom common threading logic Cobbler has.
3 """
4
5 import logging
6 import pathlib
7 from threading import Thread
8 from typing import Callable
9
10 from cobbler import enums
11 from cobbler import utils
12
13
14 class CobblerThread(Thread):
15 """
16 This is a custom thread that has a custom logger as well as logic to execute Cobbler triggers.
17 """
18
19 def __init__(
20 self,
21 event_id: str,
22 remote,
23 options: dict,
24 task_name: str,
25 api,
26 run: Callable,
27 on_done: Callable = None,
28 ):
29 """
30 This constructor creates a Cobbler thread which then may be run by calling ``run()``.
31
32 :param event_id: The event-id which is associated with this thread. Also used as thread name
33 :param remote: The Cobbler remote object to execute actions with.
34 :param options: Additional options which can be passed into the Thread.
35 :param task_name: The high level task name which is used to trigger pre- and post-task triggers
36 :param api: The Cobbler api object to resolve information with.
37 :param run: The callable that is going to be executed with this thread.
38 :param on_done: An optional callable that is going to be executed after ``run`` but before the triggers.
39 """
40 super().__init__(name=event_id)
41 self.event_id = event_id
42 self.remote = remote
43 self.logger = logging.getLogger()
44 self.__setup_logger()
45 self._run = run
46 self.on_done = on_done
47 if options is None:
48 options = {}
49 self.options = options
50 self.task_name = task_name
51 self.api = api
52
53 def __setup_logger(self):
54 """
55 Utility function that will set up the Python logger for the tasks in a special directory.
56 """
57 filename = pathlib.Path("/var/log/cobbler/tasks") / f"{self.event_id}.log"
58 task_log_handler = logging.FileHandler(str(filename), encoding="utf-8")
59 task_log_formatter = logging.Formatter(
60 "[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s"
61 )
62 task_log_handler.setFormatter(task_log_formatter)
63 self.logger.setLevel(logging.INFO)
64 self.logger.addHandler(task_log_handler)
65
66 def _set_task_state(self, new_state: enums.EventStatus):
67 """
68 Set the state of the task. (For internal use only)
69
70 :param new_state: The new state of the task.
71 """
72 if not isinstance(new_state, enums.EventStatus):
73 raise TypeError('"new_state" needs to be of type enums.EventStatus!')
74 if self.event_id not in self.remote.events:
75 raise ValueError('"event_id" not existing!')
76 self.remote.events[self.event_id].state = new_state
77 # clear the list of who has read it
78 self.remote.events[self.event_id].read_by_who = []
79 if new_state == enums.EventStatus.COMPLETE:
80 self.logger.info("### TASK COMPLETE ###")
81 elif new_state == enums.EventStatus.FAILED:
82 self.logger.error("### TASK FAILED ###")
83
84 def run(self):
85 """
86 Run the thread.
87
88 :return: The return code of the action. This may a boolean or a Linux return code.
89 """
90 self.logger.info("start_task(%s); event_id(%s)", self.task_name, self.event_id)
91 try:
92 if utils.run_triggers(
93 api=self.api,
94 globber=f"/var/lib/cobbler/triggers/task/{self.task_name}/pre/*",
95 additional=self.options,
96 ):
97 self._set_task_state(enums.EventStatus.FAILED)
98 return False
99 rc = self._run(self)
100 if rc is not None and not rc:
101 self._set_task_state(enums.EventStatus.FAILED)
102 else:
103 self._set_task_state(enums.EventStatus.COMPLETE)
104 if self.on_done is not None:
105 self.on_done()
106 utils.run_triggers(
107 api=self.api,
108 globber=f"/var/lib/cobbler/triggers/task/{self.task_name}/post/*",
109 additional=self.options,
110 )
111 return rc
112 except Exception:
113 utils.log_exc()
114 self._set_task_state(enums.EventStatus.FAILED)
115 return False
116
[end of cobbler/utils/thread.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cobbler/utils/thread.py b/cobbler/utils/thread.py
--- a/cobbler/utils/thread.py
+++ b/cobbler/utils/thread.py
@@ -41,6 +41,7 @@
self.event_id = event_id
self.remote = remote
self.logger = logging.getLogger()
+ self.__task_log_handler = None
self.__setup_logger()
self._run = run
self.on_done = on_done
@@ -55,13 +56,13 @@
Utility function that will set up the Python logger for the tasks in a special directory.
"""
filename = pathlib.Path("/var/log/cobbler/tasks") / f"{self.event_id}.log"
- task_log_handler = logging.FileHandler(str(filename), encoding="utf-8")
+ self.__task_log_handler = logging.FileHandler(str(filename), encoding="utf-8")
task_log_formatter = logging.Formatter(
"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s"
)
- task_log_handler.setFormatter(task_log_formatter)
+ self.__task_log_handler.setFormatter(task_log_formatter)
self.logger.setLevel(logging.INFO)
- self.logger.addHandler(task_log_handler)
+ self.logger.addHandler(self.__task_log_handler)
def _set_task_state(self, new_state: enums.EventStatus):
"""
@@ -113,3 +114,5 @@
utils.log_exc()
self._set_task_state(enums.EventStatus.FAILED)
return False
+ finally:
+ self.logger.removeHandler(self.__task_log_handler)
| {"golden_diff": "diff --git a/cobbler/utils/thread.py b/cobbler/utils/thread.py\n--- a/cobbler/utils/thread.py\n+++ b/cobbler/utils/thread.py\n@@ -41,6 +41,7 @@\n self.event_id = event_id\n self.remote = remote\n self.logger = logging.getLogger()\n+ self.__task_log_handler = None\n self.__setup_logger()\n self._run = run\n self.on_done = on_done\n@@ -55,13 +56,13 @@\n Utility function that will set up the Python logger for the tasks in a special directory.\n \"\"\"\n filename = pathlib.Path(\"/var/log/cobbler/tasks\") / f\"{self.event_id}.log\"\n- task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n+ self.__task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n task_log_formatter = logging.Formatter(\n \"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s\"\n )\n- task_log_handler.setFormatter(task_log_formatter)\n+ self.__task_log_handler.setFormatter(task_log_formatter)\n self.logger.setLevel(logging.INFO)\n- self.logger.addHandler(task_log_handler)\n+ self.logger.addHandler(self.__task_log_handler)\n \n def _set_task_state(self, new_state: enums.EventStatus):\n \"\"\"\n@@ -113,3 +114,5 @@\n utils.log_exc()\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n+ finally:\n+ self.logger.removeHandler(self.__task_log_handler)\n", "issue": "Task logs don't end\n### Describe the bug\r\n\r\nTask logs contain the normal server logs, even though the task has stopped. This leads to duplicated logs in all created files.\r\n\r\n### Steps to reproduce\r\n\r\n1. `cobbler mkloaders` (or any other task)\r\n2. Repeat step 1\r\n3. See logs being appended to completed tasks\r\n\r\n### Expected behavior\r\n\r\nThe files should only contain the logs for the task.\r\n\r\n### Cobbler version\r\n\r\nCommit: 2a5c7ce9c1533bbf6f6b9050198d98aca45a06ae\r\n\r\n````paste below\r\nCobbler 3.4.0\r\n source: ?, ?\r\n build time: Fri Oct 14 14:17:09 2022\r\n````\r\n\r\n### Operating system\r\n\r\n```\r\ne62e937a2de9:/var/log/cobbler/tasks # cat /etc/os-release \r\nNAME=\"openSUSE Leap\"\r\nVERSION=\"15.3\"\r\nID=\"opensuse-leap\"\r\nID_LIKE=\"suse opensuse\"\r\nVERSION_ID=\"15.3\"\r\nPRETTY_NAME=\"openSUSE Leap 15.3\"\r\nANSI_COLOR=\"0;32\"\r\nCPE_NAME=\"cpe:/o:opensuse:leap:15.3\"\r\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\r\nHOME_URL=\"https://www.opensuse.org/\"\r\n```\r\n\r\n### Cobbler log\r\n\r\nNot relevant\r\n\r\n### Screenshots\r\n\r\nNot relevant\r\n\r\n### Additional information\r\n\r\nFound by accident during another debugging session\r\n\n", "before_files": [{"content": "\"\"\"\nThis module is responsible for managing the custom common threading logic Cobbler has.\n\"\"\"\n\nimport logging\nimport pathlib\nfrom threading import Thread\nfrom typing import Callable\n\nfrom cobbler import enums\nfrom cobbler import utils\n\n\nclass CobblerThread(Thread):\n \"\"\"\n This is a custom thread that has a custom logger as well as logic to execute Cobbler triggers.\n \"\"\"\n\n def __init__(\n self,\n event_id: str,\n remote,\n options: dict,\n task_name: str,\n api,\n run: Callable,\n on_done: Callable = None,\n ):\n \"\"\"\n This constructor creates a Cobbler thread which then may be run by calling ``run()``.\n\n :param event_id: The event-id which is associated with this thread. Also used as thread name\n :param remote: The Cobbler remote object to execute actions with.\n :param options: Additional options which can be passed into the Thread.\n :param task_name: The high level task name which is used to trigger pre- and post-task triggers\n :param api: The Cobbler api object to resolve information with.\n :param run: The callable that is going to be executed with this thread.\n :param on_done: An optional callable that is going to be executed after ``run`` but before the triggers.\n \"\"\"\n super().__init__(name=event_id)\n self.event_id = event_id\n self.remote = remote\n self.logger = logging.getLogger()\n self.__setup_logger()\n self._run = run\n self.on_done = on_done\n if options is None:\n options = {}\n self.options = options\n self.task_name = task_name\n self.api = api\n\n def __setup_logger(self):\n \"\"\"\n Utility function that will set up the Python logger for the tasks in a special directory.\n \"\"\"\n filename = pathlib.Path(\"/var/log/cobbler/tasks\") / f\"{self.event_id}.log\"\n task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n task_log_formatter = logging.Formatter(\n \"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s\"\n )\n task_log_handler.setFormatter(task_log_formatter)\n self.logger.setLevel(logging.INFO)\n self.logger.addHandler(task_log_handler)\n\n def _set_task_state(self, new_state: enums.EventStatus):\n \"\"\"\n Set the state of the task. (For internal use only)\n\n :param new_state: The new state of the task.\n \"\"\"\n if not isinstance(new_state, enums.EventStatus):\n raise TypeError('\"new_state\" needs to be of type enums.EventStatus!')\n if self.event_id not in self.remote.events:\n raise ValueError('\"event_id\" not existing!')\n self.remote.events[self.event_id].state = new_state\n # clear the list of who has read it\n self.remote.events[self.event_id].read_by_who = []\n if new_state == enums.EventStatus.COMPLETE:\n self.logger.info(\"### TASK COMPLETE ###\")\n elif new_state == enums.EventStatus.FAILED:\n self.logger.error(\"### TASK FAILED ###\")\n\n def run(self):\n \"\"\"\n Run the thread.\n\n :return: The return code of the action. This may a boolean or a Linux return code.\n \"\"\"\n self.logger.info(\"start_task(%s); event_id(%s)\", self.task_name, self.event_id)\n try:\n if utils.run_triggers(\n api=self.api,\n globber=f\"/var/lib/cobbler/triggers/task/{self.task_name}/pre/*\",\n additional=self.options,\n ):\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n rc = self._run(self)\n if rc is not None and not rc:\n self._set_task_state(enums.EventStatus.FAILED)\n else:\n self._set_task_state(enums.EventStatus.COMPLETE)\n if self.on_done is not None:\n self.on_done()\n utils.run_triggers(\n api=self.api,\n globber=f\"/var/lib/cobbler/triggers/task/{self.task_name}/post/*\",\n additional=self.options,\n )\n return rc\n except Exception:\n utils.log_exc()\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n", "path": "cobbler/utils/thread.py"}]} | 2,040 | 345 |
gh_patches_debug_8356 | rasdani/github-patches | git_diff | interlegis__sapl-437 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sapl 31: definir ordem de apresentação dos mandatos do parlamentar
Na função Parlamentar, definir a ordem de apresentação dos mandatos do parlamentar. Penso que a ordem decrescente é a adequada.
Exemplo: http://sapl31agudo.interlegis.leg.br/parlamentar/21/mandato
</issue>
<code>
[start of parlamentares/views.py]
1 from django.contrib import messages
2 from django.core.urlresolvers import reverse_lazy
3 from django.shortcuts import redirect
4 from django.utils.datastructures import MultiValueDictKeyError
5 from django.utils.translation import ugettext_lazy as _
6 from django.views.generic import FormView
7
8 import crud.base
9 import crud.masterdetail
10 from crud.base import Crud
11 from crud.masterdetail import MasterDetailCrud
12
13 from .forms import FiliacaoForm, ParlamentarCreateForm, ParlamentarForm
14 from .models import (CargoMesa, Coligacao, ComposicaoMesa, Dependente,
15 Filiacao, Legislatura, Mandato, NivelInstrucao,
16 Parlamentar, Partido, SessaoLegislativa, SituacaoMilitar,
17 TipoAfastamento, TipoDependente)
18
19 CargoMesaCrud = Crud.build(CargoMesa, 'cargo_mesa')
20 LegislaturaCrud = Crud.build(Legislatura, 'tabelas_auxiliares#legislatura')
21 ColigacaoCrud = Crud.build(Coligacao, 'coligacao')
22 PartidoCrud = Crud.build(Partido, 'partidos')
23 SessaoLegislativaCrud = Crud.build(SessaoLegislativa, 'sessao_legislativa')
24 TipoDependenteCrud = Crud.build(TipoDependente, 'tipo_dependente')
25 NivelInstrucaoCrud = Crud.build(NivelInstrucao, 'nivel_instrucao')
26 TipoAfastamentoCrud = Crud.build(TipoAfastamento, 'tipo_afastamento')
27 TipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')
28
29 DependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')
30 MandatoCrud = MasterDetailCrud.build(Mandato, 'parlamentar', '')
31
32
33 class FiliacaoCrud(MasterDetailCrud):
34 model = Filiacao
35 parent_field = 'parlamentar'
36 help_path = ''
37
38 class CreateView(MasterDetailCrud.CreateView):
39 form_class = FiliacaoForm
40
41 class UpdateView(MasterDetailCrud.UpdateView):
42 form_class = FiliacaoForm
43
44 class ListView(MasterDetailCrud.ListView):
45 ordering = '-data'
46
47
48 class ParlamentarCrud(Crud):
49 model = Parlamentar
50 help_path = ''
51
52 class UpdateView(crud.base.CrudUpdateView):
53 form_class = ParlamentarForm
54
55 class CreateView(crud.base.CrudCreateView):
56 form_class = ParlamentarCreateForm
57
58 @property
59 def layout_key(self):
60 return 'ParlamentarCreate'
61
62 class ListView(crud.base.CrudListView):
63 template_name = "parlamentares/parlamentares_list.html"
64 paginate_by = None
65
66 def take_legislatura_id(self):
67 legislaturas = Legislatura.objects.all().order_by(
68 '-data_inicio', '-data_fim')
69
70 try:
71 legislatura_id = int(self.request.GET['periodo'])
72 except MultiValueDictKeyError:
73 legislatura_id = legislaturas.first().id
74
75 return legislatura_id
76
77 def get_queryset(self):
78 mandatos = Mandato.objects.filter(
79 legislatura_id=self.take_legislatura_id())
80 return mandatos
81
82 def get_rows(self, object_list):
83 parlamentares = []
84 for m in object_list:
85 ultima_filiacao = m.parlamentar.filiacao_set.\
86 order_by('-data').first()
87 if ultima_filiacao and not ultima_filiacao.data_desfiliacao:
88 partido = ultima_filiacao.partido.sigla
89 else:
90 partido = _('Sem Partido')
91
92 parlamentar = [
93 (m.parlamentar.nome_parlamentar, m.parlamentar.id),
94 (partido, None),
95 ('Sim' if m.parlamentar.ativo else 'Não', None)
96 ]
97 parlamentares.append(parlamentar)
98 return parlamentares
99
100 def get_headers(self):
101 return ['Parlamentar', 'Partido', 'Ativo?']
102
103 def get_context_data(self, **kwargs):
104 context = super(ParlamentarCrud.ListView, self
105 ).get_context_data(**kwargs)
106 context.setdefault('title', self.verbose_name_plural)
107
108 # Adiciona legislatura para filtrar parlamentares
109 legislaturas = Legislatura.objects.all().order_by(
110 '-data_inicio', '-data_fim')
111 context['legislaturas'] = legislaturas
112 context['legislatura_id'] = self.take_legislatura_id()
113 return context
114
115
116 class MesaDiretoraView(FormView):
117 template_name = "mesa_diretora/mesa_diretora.html"
118 success_url = reverse_lazy('parlamentares:mesa_diretora')
119
120 # Essa função avisa quando se pode compor uma Mesa Legislativa)
121 def validation(self, request):
122 mensagem = _("Não há nenhuma Sessão Legislativa cadastrada. \
123 Só é possível compor uma Mesa Diretora quando há uma Sessão \
124 Legislativa cadastrada.")
125 messages.add_message(request, messages.INFO, mensagem)
126
127 return self.render_to_response(
128 {'legislaturas': Legislatura.objects.all(
129 ).order_by('-data_inicio'),
130 'legislatura_selecionada': Legislatura.objects.last(),
131 'cargos_vagos': CargoMesa.objects.all()})
132
133 def get(self, request, *args, **kwargs):
134
135 if (not Legislatura.objects.all() or
136 not SessaoLegislativa.objects.all()):
137 return self.validation(request)
138
139 mesa = SessaoLegislativa.objects.filter(
140 legislatura=Legislatura.objects.last()).first(
141 ).composicaomesa_set.all()
142
143 cargos_ocupados = [m.cargo for m in mesa]
144 cargos = CargoMesa.objects.all()
145 cargos_vagos = list(set(cargos) - set(cargos_ocupados))
146
147 parlamentares = Legislatura.objects.last().mandato_set.all()
148 parlamentares_ocupados = [m.parlamentar for m in mesa]
149 parlamentares_vagos = list(
150 set(
151 [p.parlamentar for p in parlamentares]) - set(
152 parlamentares_ocupados))
153
154 return self.render_to_response(
155 {'legislaturas': Legislatura.objects.all(
156 ).order_by('-data_inicio'),
157 'legislatura_selecionada': Legislatura.objects.last(),
158 'sessoes': SessaoLegislativa.objects.filter(
159 legislatura=Legislatura.objects.last()),
160 'sessao_selecionada': SessaoLegislativa.objects.filter(
161 legislatura=Legislatura.objects.last()).first(),
162 'composicao_mesa': mesa,
163 'parlamentares': parlamentares_vagos,
164 'cargos_vagos': cargos_vagos
165 })
166
167 def post(self, request, *args, **kwargs):
168 if 'Incluir' in request.POST:
169
170 if (not Legislatura.objects.all() or
171 not SessaoLegislativa.objects.all()):
172 return self.validation(request)
173
174 composicao = ComposicaoMesa()
175 composicao.sessao_legislativa = SessaoLegislativa.objects.get(
176 id=int(request.POST['sessao']))
177 composicao.parlamentar = Parlamentar.objects.get(
178 id=int(request.POST['parlamentar']))
179 composicao.cargo = CargoMesa.objects.get(
180 id=int(request.POST['cargo']))
181 composicao.save()
182
183 return redirect('parlamentares:mesa_diretora')
184
185 elif 'Excluir' in request.POST:
186
187 if (not Legislatura.objects.all() or
188 not SessaoLegislativa.objects.all()):
189 return self.validation(request)
190
191 if 'composicao_mesa' in request.POST:
192 ids = request.POST['composicao_mesa'].split(':')
193 composicao = ComposicaoMesa.objects.get(
194 sessao_legislativa_id=int(request.POST['sessao']),
195 parlamentar_id=int(ids[0]),
196 cargo_id=int(ids[1])
197 )
198 composicao.delete()
199 return redirect('parlamentares:mesa_diretora')
200 else:
201 mesa = ComposicaoMesa.objects.filter(
202 sessao_legislativa=request.POST['sessao'])
203
204 cargos_ocupados = [m.cargo for m in mesa]
205 cargos = CargoMesa.objects.all()
206 cargos_vagos = list(set(cargos) - set(cargos_ocupados))
207
208 parlamentares = Legislatura.objects.get(
209 id=int(request.POST['legislatura'])).mandato_set.all()
210 parlamentares_ocupados = [m.parlamentar for m in mesa]
211 parlamentares_vagos = list(
212 set(
213 [p.parlamentar for p in parlamentares]) - set(
214 parlamentares_ocupados))
215 return self.render_to_response(
216 {'legislaturas': Legislatura.objects.all(
217 ).order_by('-data_inicio'),
218 'legislatura_selecionada': Legislatura.objects.get(
219 id=int(request.POST['legislatura'])),
220 'sessoes': SessaoLegislativa.objects.filter(
221 legislatura_id=int(request.POST['legislatura'])),
222 'sessao_selecionada': SessaoLegislativa.objects.get(
223 id=int(request.POST['sessao'])),
224 'composicao_mesa': mesa,
225 'parlamentares': parlamentares_vagos,
226 'cargos_vagos': cargos_vagos
227 })
228
[end of parlamentares/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlamentares/views.py b/parlamentares/views.py
--- a/parlamentares/views.py
+++ b/parlamentares/views.py
@@ -27,8 +27,14 @@
TipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')
DependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')
-MandatoCrud = MasterDetailCrud.build(Mandato, 'parlamentar', '')
+class MandatoCrud(MasterDetailCrud):
+ model = Mandato
+ parent_field = 'parlamentar'
+ help_path = ''
+
+ class ListView(MasterDetailCrud.ListView):
+ ordering = ('-data_fim_mandato')
class FiliacaoCrud(MasterDetailCrud):
model = Filiacao
| {"golden_diff": "diff --git a/parlamentares/views.py b/parlamentares/views.py\n--- a/parlamentares/views.py\n+++ b/parlamentares/views.py\n@@ -27,8 +27,14 @@\n TipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')\n \n DependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')\n-MandatoCrud = MasterDetailCrud.build(Mandato, 'parlamentar', '')\n \n+class MandatoCrud(MasterDetailCrud):\n+ model = Mandato\n+ parent_field = 'parlamentar'\n+ help_path = ''\n+\n+ class ListView(MasterDetailCrud.ListView):\n+ ordering = ('-data_fim_mandato')\n \n class FiliacaoCrud(MasterDetailCrud):\n model = Filiacao\n", "issue": "sapl 31: definir ordem de apresenta\u00e7\u00e3o dos mandatos do parlamentar\nNa fun\u00e7\u00e3o Parlamentar, definir a ordem de apresenta\u00e7\u00e3o dos mandatos do parlamentar. Penso que a ordem decrescente \u00e9 a adequada.\nExemplo: http://sapl31agudo.interlegis.leg.br/parlamentar/21/mandato\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import FormView\n\nimport crud.base\nimport crud.masterdetail\nfrom crud.base import Crud\nfrom crud.masterdetail import MasterDetailCrud\n\nfrom .forms import FiliacaoForm, ParlamentarCreateForm, ParlamentarForm\nfrom .models import (CargoMesa, Coligacao, ComposicaoMesa, Dependente,\n Filiacao, Legislatura, Mandato, NivelInstrucao,\n Parlamentar, Partido, SessaoLegislativa, SituacaoMilitar,\n TipoAfastamento, TipoDependente)\n\nCargoMesaCrud = Crud.build(CargoMesa, 'cargo_mesa')\nLegislaturaCrud = Crud.build(Legislatura, 'tabelas_auxiliares#legislatura')\nColigacaoCrud = Crud.build(Coligacao, 'coligacao')\nPartidoCrud = Crud.build(Partido, 'partidos')\nSessaoLegislativaCrud = Crud.build(SessaoLegislativa, 'sessao_legislativa')\nTipoDependenteCrud = Crud.build(TipoDependente, 'tipo_dependente')\nNivelInstrucaoCrud = Crud.build(NivelInstrucao, 'nivel_instrucao')\nTipoAfastamentoCrud = Crud.build(TipoAfastamento, 'tipo_afastamento')\nTipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')\n\nDependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')\nMandatoCrud = MasterDetailCrud.build(Mandato, 'parlamentar', '')\n\n\nclass FiliacaoCrud(MasterDetailCrud):\n model = Filiacao\n parent_field = 'parlamentar'\n help_path = ''\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = FiliacaoForm\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = FiliacaoForm\n\n class ListView(MasterDetailCrud.ListView):\n ordering = '-data'\n\n\nclass ParlamentarCrud(Crud):\n model = Parlamentar\n help_path = ''\n\n class UpdateView(crud.base.CrudUpdateView):\n form_class = ParlamentarForm\n\n class CreateView(crud.base.CrudCreateView):\n form_class = ParlamentarCreateForm\n\n @property\n def layout_key(self):\n return 'ParlamentarCreate'\n\n class ListView(crud.base.CrudListView):\n template_name = \"parlamentares/parlamentares_list.html\"\n paginate_by = None\n\n def take_legislatura_id(self):\n legislaturas = Legislatura.objects.all().order_by(\n '-data_inicio', '-data_fim')\n\n try:\n legislatura_id = int(self.request.GET['periodo'])\n except MultiValueDictKeyError:\n legislatura_id = legislaturas.first().id\n\n return legislatura_id\n\n def get_queryset(self):\n mandatos = Mandato.objects.filter(\n legislatura_id=self.take_legislatura_id())\n return mandatos\n\n def get_rows(self, object_list):\n parlamentares = []\n for m in object_list:\n ultima_filiacao = m.parlamentar.filiacao_set.\\\n order_by('-data').first()\n if ultima_filiacao and not ultima_filiacao.data_desfiliacao:\n partido = ultima_filiacao.partido.sigla\n else:\n partido = _('Sem Partido')\n\n parlamentar = [\n (m.parlamentar.nome_parlamentar, m.parlamentar.id),\n (partido, None),\n ('Sim' if m.parlamentar.ativo else 'N\u00e3o', None)\n ]\n parlamentares.append(parlamentar)\n return parlamentares\n\n def get_headers(self):\n return ['Parlamentar', 'Partido', 'Ativo?']\n\n def get_context_data(self, **kwargs):\n context = super(ParlamentarCrud.ListView, self\n ).get_context_data(**kwargs)\n context.setdefault('title', self.verbose_name_plural)\n\n # Adiciona legislatura para filtrar parlamentares\n legislaturas = Legislatura.objects.all().order_by(\n '-data_inicio', '-data_fim')\n context['legislaturas'] = legislaturas\n context['legislatura_id'] = self.take_legislatura_id()\n return context\n\n\nclass MesaDiretoraView(FormView):\n template_name = \"mesa_diretora/mesa_diretora.html\"\n success_url = reverse_lazy('parlamentares:mesa_diretora')\n\n # Essa fun\u00e7\u00e3o avisa quando se pode compor uma Mesa Legislativa)\n def validation(self, request):\n mensagem = _(\"N\u00e3o h\u00e1 nenhuma Sess\u00e3o Legislativa cadastrada. \\\n S\u00f3 \u00e9 poss\u00edvel compor uma Mesa Diretora quando h\u00e1 uma Sess\u00e3o \\\n Legislativa cadastrada.\")\n messages.add_message(request, messages.INFO, mensagem)\n\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.last(),\n 'cargos_vagos': CargoMesa.objects.all()})\n\n def get(self, request, *args, **kwargs):\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n mesa = SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()).first(\n ).composicaomesa_set.all()\n\n cargos_ocupados = [m.cargo for m in mesa]\n cargos = CargoMesa.objects.all()\n cargos_vagos = list(set(cargos) - set(cargos_ocupados))\n\n parlamentares = Legislatura.objects.last().mandato_set.all()\n parlamentares_ocupados = [m.parlamentar for m in mesa]\n parlamentares_vagos = list(\n set(\n [p.parlamentar for p in parlamentares]) - set(\n parlamentares_ocupados))\n\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.last(),\n 'sessoes': SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()),\n 'sessao_selecionada': SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()).first(),\n 'composicao_mesa': mesa,\n 'parlamentares': parlamentares_vagos,\n 'cargos_vagos': cargos_vagos\n })\n\n def post(self, request, *args, **kwargs):\n if 'Incluir' in request.POST:\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n composicao = ComposicaoMesa()\n composicao.sessao_legislativa = SessaoLegislativa.objects.get(\n id=int(request.POST['sessao']))\n composicao.parlamentar = Parlamentar.objects.get(\n id=int(request.POST['parlamentar']))\n composicao.cargo = CargoMesa.objects.get(\n id=int(request.POST['cargo']))\n composicao.save()\n\n return redirect('parlamentares:mesa_diretora')\n\n elif 'Excluir' in request.POST:\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n if 'composicao_mesa' in request.POST:\n ids = request.POST['composicao_mesa'].split(':')\n composicao = ComposicaoMesa.objects.get(\n sessao_legislativa_id=int(request.POST['sessao']),\n parlamentar_id=int(ids[0]),\n cargo_id=int(ids[1])\n )\n composicao.delete()\n return redirect('parlamentares:mesa_diretora')\n else:\n mesa = ComposicaoMesa.objects.filter(\n sessao_legislativa=request.POST['sessao'])\n\n cargos_ocupados = [m.cargo for m in mesa]\n cargos = CargoMesa.objects.all()\n cargos_vagos = list(set(cargos) - set(cargos_ocupados))\n\n parlamentares = Legislatura.objects.get(\n id=int(request.POST['legislatura'])).mandato_set.all()\n parlamentares_ocupados = [m.parlamentar for m in mesa]\n parlamentares_vagos = list(\n set(\n [p.parlamentar for p in parlamentares]) - set(\n parlamentares_ocupados))\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.get(\n id=int(request.POST['legislatura'])),\n 'sessoes': SessaoLegislativa.objects.filter(\n legislatura_id=int(request.POST['legislatura'])),\n 'sessao_selecionada': SessaoLegislativa.objects.get(\n id=int(request.POST['sessao'])),\n 'composicao_mesa': mesa,\n 'parlamentares': parlamentares_vagos,\n 'cargos_vagos': cargos_vagos\n })\n", "path": "parlamentares/views.py"}]} | 3,339 | 194 |
gh_patches_debug_22326 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-798 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error while training model with config "ner_conll2003_pos"
I'm trying to train a NER model using "train_model(configs.ner.ner_conll2003_pos)" on Colab. There are only three things I've changed in original ner_conll2003_pos.json file: number of epochs = 1, DOWNLOADS_PATH and MODELS_PATH. After I start, it terminates with this error:
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-5-683c58afa1f4> in <module>()
1 from deeppavlov import configs, train_model
----> 2 ner_model = train_model(configs.ner.ner_conll2003_pos)
/usr/local/lib/python3.6/dist-packages/deeppavlov/__init__.py in train_model(config, download, recursive)
29 # TODO: make better
30 def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer:
---> 31 train_evaluate_model_from_config(config, download=download, recursive=recursive)
32 return build_model(config, load_trained=True)
33
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/commands/train.py in train_evaluate_model_from_config(config, iterator, to_train, evaluation_targets, to_validate, download, start_epoch_num, recursive)
119
120 if to_train:
--> 121 trainer.train(iterator)
122
123 res = {}
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in train(self, iterator)
292 if callable(getattr(self._chainer, 'train_on_batch', None)):
293 try:
--> 294 self.train_on_batches(iterator)
295 except KeyboardInterrupt:
296 log.info('Stopped training')
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in train_on_batches(self, iterator)
232 self.start_time = time.time()
233 if self.validate_first:
--> 234 self._validate(iterator)
235
236 while True:
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in _validate(self, iterator, tensorboard_tag, tensorboard_index)
142 self._send_event(event_name='before_validation')
143 report = self.test(iterator.gen_batches(self.batch_size, data_type='valid', shuffle=False),
--> 144 start_time=self.start_time)
145
146 report['epochs_done'] = self.epoch
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/fit_trainer.py in test(self, data, metrics, start_time, show_examples)
204 for x, y_true in data:
205 examples += len(x)
--> 206 y_predicted = list(self._chainer.compute(list(x), list(y_true), targets=expected_outputs))
207 if len(expected_outputs) == 1:
208 y_predicted = [y_predicted]
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/common/chainer.py in compute(self, x, y, targets)
141 in_params += self.in_y
142
--> 143 return self._compute(*args, pipe=pipe, param_names=in_params, targets=targets)
144
145 def __call__(self, *args):
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/common/chainer.py in _compute(***failed resolving arguments***)
167 res = component(**dict(zip(in_keys, x)))
168 else:
--> 169 res = component(*x)
170 if len(out_params) == 1:
171 mem[out_params[0]] = res
/usr/local/lib/python3.6/dist-packages/deeppavlov/models/preprocessors/one_hotter.py in __call__(self, batch, **kwargs)
68 one_hotted_utt = np.sum(one_hotted_utt, axis=0)
69
---> 70 one_hotted_batch.append(one_hotted_utt)
71
72 if self._pad_zeros:
UnboundLocalError: local variable 'one_hotted_utt' referenced before assignment
-----------------------------------------------------------------------------------------------
How can I fix this?
</issue>
<code>
[start of deeppavlov/models/preprocessors/one_hotter.py]
1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import List, Union
16
17 import numpy as np
18
19 from deeppavlov.core.common.errors import ConfigError
20 from deeppavlov.core.common.registry import register
21 from deeppavlov.core.data.utils import zero_pad
22 from deeppavlov.core.models.component import Component
23
24
25 @register('one_hotter')
26 class OneHotter(Component):
27 """
28 One-hot featurizer with zero-padding.
29 If ``single_vector``, return the only vector per sample which can have several elements equal to ``1``.
30
31 Parameters:
32 depth: the depth for one-hotting
33 pad_zeros: whether to pad elements of batch with zeros
34 single_vector: whether to return one vector for the sample (sum of each one-hotted vectors)
35 """
36 def __init__(self, depth: int, pad_zeros: bool = False,
37 single_vector=False, *args, **kwargs):
38 self._depth = depth
39 self._pad_zeros = pad_zeros
40 self.single_vector = single_vector
41 if self._pad_zeros and self.single_vector:
42 raise ConfigError("Cannot perform ``single_vector`` with zero padding for OneHotter")
43
44 def __call__(self, batch: List[List[int]], **kwargs) -> Union[List[List[np.ndarray]], List[np.ndarray]]:
45 """
46 Convert given batch of list of labels to one-hot representation of the batch.
47
48 Args:
49 batch: list of samples, where each sample is a list of integer labels.
50 **kwargs: additional arguments
51
52 Returns:
53 if ``single_vector``, list of one-hot representations of each sample,
54 otherwise, list of lists of one-hot representations of each label in a sample
55 """
56 one_hotted_batch = []
57
58 for utt in batch:
59 if isinstance(utt, list):
60 one_hotted_utt = self._to_one_hot(utt, self._depth)
61 elif isinstance(utt, int):
62 if self._pad_zeros or self.single_vector:
63 one_hotted_utt = self._to_one_hot([utt], self._depth)
64 else:
65 one_hotted_utt = self._to_one_hot([utt], self._depth).reshape(-1)
66
67 if self.single_vector:
68 one_hotted_utt = np.sum(one_hotted_utt, axis=0)
69
70 one_hotted_batch.append(one_hotted_utt)
71
72 if self._pad_zeros:
73 one_hotted_batch = zero_pad(one_hotted_batch)
74 return one_hotted_batch
75
76 @staticmethod
77 def _to_one_hot(x, n):
78 b = np.zeros([len(x), n], dtype=np.float32)
79 for q, tok in enumerate(x):
80 b[q, tok] = 1
81 return b
82
[end of deeppavlov/models/preprocessors/one_hotter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deeppavlov/models/preprocessors/one_hotter.py b/deeppavlov/models/preprocessors/one_hotter.py
--- a/deeppavlov/models/preprocessors/one_hotter.py
+++ b/deeppavlov/models/preprocessors/one_hotter.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Union
+from typing import List, Union, Iterable
import numpy as np
@@ -56,7 +56,7 @@
one_hotted_batch = []
for utt in batch:
- if isinstance(utt, list):
+ if isinstance(utt, Iterable):
one_hotted_utt = self._to_one_hot(utt, self._depth)
elif isinstance(utt, int):
if self._pad_zeros or self.single_vector:
@@ -77,5 +77,5 @@
def _to_one_hot(x, n):
b = np.zeros([len(x), n], dtype=np.float32)
for q, tok in enumerate(x):
- b[q, tok] = 1
+ b[q, int(tok)] = 1
return b
| {"golden_diff": "diff --git a/deeppavlov/models/preprocessors/one_hotter.py b/deeppavlov/models/preprocessors/one_hotter.py\n--- a/deeppavlov/models/preprocessors/one_hotter.py\n+++ b/deeppavlov/models/preprocessors/one_hotter.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import List, Union\n+from typing import List, Union, Iterable\n \n import numpy as np\n \n@@ -56,7 +56,7 @@\n one_hotted_batch = []\n \n for utt in batch:\n- if isinstance(utt, list):\n+ if isinstance(utt, Iterable):\n one_hotted_utt = self._to_one_hot(utt, self._depth)\n elif isinstance(utt, int):\n if self._pad_zeros or self.single_vector:\n@@ -77,5 +77,5 @@\n def _to_one_hot(x, n):\n b = np.zeros([len(x), n], dtype=np.float32)\n for q, tok in enumerate(x):\n- b[q, tok] = 1\n+ b[q, int(tok)] = 1\n return b\n", "issue": "Error while training model with config \"ner_conll2003_pos\"\nI'm trying to train a NER model using \"train_model(configs.ner.ner_conll2003_pos)\" on Colab. There are only three things I've changed in original ner_conll2003_pos.json file: number of epochs = 1, DOWNLOADS_PATH and MODELS_PATH. After I start, it terminates with this error:\r\n\r\n---------------------------------------------------------------------------\r\nUnboundLocalError Traceback (most recent call last)\r\n<ipython-input-5-683c58afa1f4> in <module>()\r\n 1 from deeppavlov import configs, train_model\r\n----> 2 ner_model = train_model(configs.ner.ner_conll2003_pos)\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/__init__.py in train_model(config, download, recursive)\r\n 29 # TODO: make better\r\n 30 def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer:\r\n---> 31 train_evaluate_model_from_config(config, download=download, recursive=recursive)\r\n 32 return build_model(config, load_trained=True)\r\n 33 \r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/commands/train.py in train_evaluate_model_from_config(config, iterator, to_train, evaluation_targets, to_validate, download, start_epoch_num, recursive)\r\n 119 \r\n 120 if to_train:\r\n--> 121 trainer.train(iterator)\r\n 122 \r\n 123 res = {}\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in train(self, iterator)\r\n 292 if callable(getattr(self._chainer, 'train_on_batch', None)):\r\n 293 try:\r\n--> 294 self.train_on_batches(iterator)\r\n 295 except KeyboardInterrupt:\r\n 296 log.info('Stopped training')\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in train_on_batches(self, iterator)\r\n 232 self.start_time = time.time()\r\n 233 if self.validate_first:\r\n--> 234 self._validate(iterator)\r\n 235 \r\n 236 while True:\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in _validate(self, iterator, tensorboard_tag, tensorboard_index)\r\n 142 self._send_event(event_name='before_validation')\r\n 143 report = self.test(iterator.gen_batches(self.batch_size, data_type='valid', shuffle=False),\r\n--> 144 start_time=self.start_time)\r\n 145 \r\n 146 report['epochs_done'] = self.epoch\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/fit_trainer.py in test(self, data, metrics, start_time, show_examples)\r\n 204 for x, y_true in data:\r\n 205 examples += len(x)\r\n--> 206 y_predicted = list(self._chainer.compute(list(x), list(y_true), targets=expected_outputs))\r\n 207 if len(expected_outputs) == 1:\r\n 208 y_predicted = [y_predicted]\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/common/chainer.py in compute(self, x, y, targets)\r\n 141 in_params += self.in_y\r\n 142 \r\n--> 143 return self._compute(*args, pipe=pipe, param_names=in_params, targets=targets)\r\n 144 \r\n 145 def __call__(self, *args):\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/common/chainer.py in _compute(***failed resolving arguments***)\r\n 167 res = component(**dict(zip(in_keys, x)))\r\n 168 else:\r\n--> 169 res = component(*x)\r\n 170 if len(out_params) == 1:\r\n 171 mem[out_params[0]] = res\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/models/preprocessors/one_hotter.py in __call__(self, batch, **kwargs)\r\n 68 one_hotted_utt = np.sum(one_hotted_utt, axis=0)\r\n 69 \r\n---> 70 one_hotted_batch.append(one_hotted_utt)\r\n 71 \r\n 72 if self._pad_zeros:\r\n\r\nUnboundLocalError: local variable 'one_hotted_utt' referenced before assignment\r\n-----------------------------------------------------------------------------------------------\r\n\r\nHow can I fix this? \n", "before_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Union\n\nimport numpy as np\n\nfrom deeppavlov.core.common.errors import ConfigError\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.data.utils import zero_pad\nfrom deeppavlov.core.models.component import Component\n\n\n@register('one_hotter')\nclass OneHotter(Component):\n \"\"\"\n One-hot featurizer with zero-padding.\n If ``single_vector``, return the only vector per sample which can have several elements equal to ``1``.\n\n Parameters:\n depth: the depth for one-hotting\n pad_zeros: whether to pad elements of batch with zeros\n single_vector: whether to return one vector for the sample (sum of each one-hotted vectors)\n \"\"\"\n def __init__(self, depth: int, pad_zeros: bool = False,\n single_vector=False, *args, **kwargs):\n self._depth = depth\n self._pad_zeros = pad_zeros\n self.single_vector = single_vector\n if self._pad_zeros and self.single_vector:\n raise ConfigError(\"Cannot perform ``single_vector`` with zero padding for OneHotter\")\n\n def __call__(self, batch: List[List[int]], **kwargs) -> Union[List[List[np.ndarray]], List[np.ndarray]]:\n \"\"\"\n Convert given batch of list of labels to one-hot representation of the batch.\n\n Args:\n batch: list of samples, where each sample is a list of integer labels.\n **kwargs: additional arguments\n\n Returns:\n if ``single_vector``, list of one-hot representations of each sample,\n otherwise, list of lists of one-hot representations of each label in a sample\n \"\"\"\n one_hotted_batch = []\n\n for utt in batch:\n if isinstance(utt, list):\n one_hotted_utt = self._to_one_hot(utt, self._depth)\n elif isinstance(utt, int):\n if self._pad_zeros or self.single_vector:\n one_hotted_utt = self._to_one_hot([utt], self._depth)\n else:\n one_hotted_utt = self._to_one_hot([utt], self._depth).reshape(-1)\n\n if self.single_vector:\n one_hotted_utt = np.sum(one_hotted_utt, axis=0)\n\n one_hotted_batch.append(one_hotted_utt)\n\n if self._pad_zeros:\n one_hotted_batch = zero_pad(one_hotted_batch)\n return one_hotted_batch\n\n @staticmethod\n def _to_one_hot(x, n):\n b = np.zeros([len(x), n], dtype=np.float32)\n for q, tok in enumerate(x):\n b[q, tok] = 1\n return b\n", "path": "deeppavlov/models/preprocessors/one_hotter.py"}]} | 2,506 | 275 |
gh_patches_debug_43152 | rasdani/github-patches | git_diff | sktime__sktime-3561 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] `CNNRegressor` missing essential initializing parameters
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
`CNNRegressor` misses parameters like `optimizer` in its constructor. There might be more things which don't coincide with `CNNClassifier` which should be added but since I'm not sure what all things might be missing, I'm opening this issue to bring it to notice.
</issue>
<code>
[start of sktime/regression/deep_learning/cnn.py]
1 # -*- coding: utf-8 -*-
2 """Time Convolutional Neural Network (CNN) for regression."""
3
4 __author__ = ["AurumnPegasus"]
5 __all__ = ["CNNRegressor"]
6
7 from sktime.networks.cnn import CNNNetwork
8 from sktime.regression.deep_learning.base import BaseDeepRegressor
9 from sktime.utils.validation._dependencies import _check_dl_dependencies
10
11 _check_dl_dependencies(severity="warning")
12
13
14 class CNNRegressor(BaseDeepRegressor):
15 """Time Convolutional Neural Network (CNN), as described in [1].
16
17 Parameters
18 ----------
19 should inherited fields be listed here?
20 n_epochs : int, default = 2000
21 the number of epochs to train the model
22 batch_size : int, default = 16
23 the number of samples per gradient update.
24 kernel_size : int, default = 7
25 the length of the 1D convolution window
26 avg_pool_size : int, default = 3
27 size of the average pooling windows
28 n_conv_layers : int, default = 2
29 the number of convolutional plus average pooling layers
30 filter_sizes : array of shape (n_conv_layers) default = [6, 12]
31 random_state : int or None, default=None
32 Seed for random number generation.
33 verbose : boolean, default = False
34 whether to output extra information
35 loss : string, default="mean_squared_error"
36 fit parameter for the keras model
37 optimizer : keras.optimizer, default=keras.optimizers.Adam(),
38 metrics : list of strings, default=["accuracy"],
39
40 Notes
41 -----
42 .. [1] Zhao et. al, Convolutional neural networks for
43 time series classification, Journal of
44 Systems Engineering and Electronics, 28(1):2017.
45
46 Adapted from the implementation from Fawaz et. al
47 https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/cnn.py
48 """
49
50 def __init__(
51 self,
52 n_epochs=2000,
53 batch_size=16,
54 kernel_size=7,
55 avg_pool_size=3,
56 n_conv_layers=2,
57 callbacks=None,
58 verbose=False,
59 loss="mean_squared_error",
60 metrics=None,
61 random_seed=0,
62 ):
63 _check_dl_dependencies(severity="error")
64 super(CNNRegressor, self).__init__(
65 batch_size=batch_size,
66 )
67 self.n_conv_layers = n_conv_layers
68 self.avg_pool_size = avg_pool_size
69 self.kernel_size = kernel_size
70 self.callbacks = callbacks
71 self.n_epochs = n_epochs
72 self.batch_size = batch_size
73 self.verbose = verbose
74 self.loss = loss
75 self.metrics = metrics
76 self.random_seed = random_seed
77 self._network = CNNNetwork()
78
79 def build_model(self, input_shape, **kwargs):
80 """Construct a compiled, un-trained, keras model that is ready for training.
81
82 In sktime, time series are stored in numpy arrays of shape (d,m), where d
83 is the number of dimensions, m is the series length. Keras/tensorflow assume
84 data is in shape (m,d). This method also assumes (m,d). Transpose should
85 happen in fit.
86
87 Parameters
88 ----------
89 input_shape : tuple
90 The shape of the data fed into the input layer, should be (m,d)
91
92 Returns
93 -------
94 output : a compiled Keras Model
95 """
96 import tensorflow as tf
97 from tensorflow import keras
98
99 tf.random.set_seed(self.random_seed)
100
101 if self.metrics is None:
102 metrics = ["accuracy"]
103 else:
104 metrics = self.metrics
105
106 input_layer, output_layer = self._network.build_network(input_shape, **kwargs)
107
108 output_layer = keras.layers.Dense(units=1, activation="sigmoid")(output_layer)
109
110 model = keras.models.Model(inputs=input_layer, outputs=output_layer)
111
112 model.compile(
113 loss=self.loss,
114 optimizer=keras.optimizers.Adam(),
115 metrics=metrics,
116 )
117 return model
118
119 def _fit(self, X, y):
120 """Fit the classifier on the training set (X, y).
121
122 Parameters
123 ----------
124 X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))
125 The training input samples.
126 y : np.ndarray of shape n
127 The training data class labels.
128
129 Returns
130 -------
131 self : object
132 """
133 if self.callbacks is None:
134 self._callbacks = []
135
136 # Transpose to conform to Keras input style.
137 X = X.transpose(0, 2, 1)
138
139 self.input_shape = X.shape[1:]
140 self.model_ = self.build_model(self.input_shape)
141 if self.verbose:
142 self.model.summary()
143
144 self.history = self.model_.fit(
145 X,
146 y,
147 batch_size=self.batch_size,
148 epochs=self.n_epochs,
149 verbose=self.verbose,
150 callbacks=self._callbacks,
151 )
152 return self
153
[end of sktime/regression/deep_learning/cnn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/regression/deep_learning/cnn.py b/sktime/regression/deep_learning/cnn.py
--- a/sktime/regression/deep_learning/cnn.py
+++ b/sktime/regression/deep_learning/cnn.py
@@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
"""Time Convolutional Neural Network (CNN) for regression."""
-__author__ = ["AurumnPegasus"]
+__author__ = ["AurumnPegasus", "achieveordie"]
__all__ = ["CNNRegressor"]
+from sklearn.utils import check_random_state
+
from sktime.networks.cnn import CNNNetwork
from sktime.regression.deep_learning.base import BaseDeepRegressor
from sktime.utils.validation._dependencies import _check_dl_dependencies
@@ -34,7 +36,12 @@
whether to output extra information
loss : string, default="mean_squared_error"
fit parameter for the keras model
- optimizer : keras.optimizer, default=keras.optimizers.Adam(),
+ activation : keras.activations or string, default `linear`
+ function to use in the output layer.
+ optimizer : keras.optimizers or string, default `None`.
+ when `None`, internally uses `keras.optimizers.Adam(0.01)`
+ use_bias : bool, default=True
+ whether to use bias in the output layer.
metrics : list of strings, default=["accuracy"],
Notes
@@ -58,7 +65,10 @@
verbose=False,
loss="mean_squared_error",
metrics=None,
- random_seed=0,
+ random_state=0,
+ activation="linear",
+ use_bias=True,
+ optimizer=None,
):
_check_dl_dependencies(severity="error")
super(CNNRegressor, self).__init__(
@@ -73,7 +83,11 @@
self.verbose = verbose
self.loss = loss
self.metrics = metrics
- self.random_seed = random_seed
+ self.random_state = random_state
+ self.activation = activation
+ self.use_bias = use_bias
+ self.optimizer = optimizer
+ self.history = None
self._network = CNNNetwork()
def build_model(self, input_shape, **kwargs):
@@ -96,7 +110,7 @@
import tensorflow as tf
from tensorflow import keras
- tf.random.set_seed(self.random_seed)
+ tf.random.set_seed(self.random_state)
if self.metrics is None:
metrics = ["accuracy"]
@@ -105,13 +119,23 @@
input_layer, output_layer = self._network.build_network(input_shape, **kwargs)
- output_layer = keras.layers.Dense(units=1, activation="sigmoid")(output_layer)
+ output_layer = keras.layers.Dense(
+ units=1,
+ activation=self.activation,
+ use_bias=self.use_bias,
+ )(output_layer)
+
+ self.optimizer_ = (
+ keras.optimizers.Adam(learning_rate=0.01)
+ if self.optimizer is None
+ else self.optimizer
+ )
model = keras.models.Model(inputs=input_layer, outputs=output_layer)
model.compile(
loss=self.loss,
- optimizer=keras.optimizers.Adam(),
+ optimizer=self.optimizer_,
metrics=metrics,
)
return model
@@ -136,6 +160,7 @@
# Transpose to conform to Keras input style.
X = X.transpose(0, 2, 1)
+ check_random_state(self.random_state)
self.input_shape = X.shape[1:]
self.model_ = self.build_model(self.input_shape)
if self.verbose:
| {"golden_diff": "diff --git a/sktime/regression/deep_learning/cnn.py b/sktime/regression/deep_learning/cnn.py\n--- a/sktime/regression/deep_learning/cnn.py\n+++ b/sktime/regression/deep_learning/cnn.py\n@@ -1,9 +1,11 @@\n # -*- coding: utf-8 -*-\n \"\"\"Time Convolutional Neural Network (CNN) for regression.\"\"\"\n \n-__author__ = [\"AurumnPegasus\"]\n+__author__ = [\"AurumnPegasus\", \"achieveordie\"]\n __all__ = [\"CNNRegressor\"]\n \n+from sklearn.utils import check_random_state\n+\n from sktime.networks.cnn import CNNNetwork\n from sktime.regression.deep_learning.base import BaseDeepRegressor\n from sktime.utils.validation._dependencies import _check_dl_dependencies\n@@ -34,7 +36,12 @@\n whether to output extra information\n loss : string, default=\"mean_squared_error\"\n fit parameter for the keras model\n- optimizer : keras.optimizer, default=keras.optimizers.Adam(),\n+ activation : keras.activations or string, default `linear`\n+ function to use in the output layer.\n+ optimizer : keras.optimizers or string, default `None`.\n+ when `None`, internally uses `keras.optimizers.Adam(0.01)`\n+ use_bias : bool, default=True\n+ whether to use bias in the output layer.\n metrics : list of strings, default=[\"accuracy\"],\n \n Notes\n@@ -58,7 +65,10 @@\n verbose=False,\n loss=\"mean_squared_error\",\n metrics=None,\n- random_seed=0,\n+ random_state=0,\n+ activation=\"linear\",\n+ use_bias=True,\n+ optimizer=None,\n ):\n _check_dl_dependencies(severity=\"error\")\n super(CNNRegressor, self).__init__(\n@@ -73,7 +83,11 @@\n self.verbose = verbose\n self.loss = loss\n self.metrics = metrics\n- self.random_seed = random_seed\n+ self.random_state = random_state\n+ self.activation = activation\n+ self.use_bias = use_bias\n+ self.optimizer = optimizer\n+ self.history = None\n self._network = CNNNetwork()\n \n def build_model(self, input_shape, **kwargs):\n@@ -96,7 +110,7 @@\n import tensorflow as tf\n from tensorflow import keras\n \n- tf.random.set_seed(self.random_seed)\n+ tf.random.set_seed(self.random_state)\n \n if self.metrics is None:\n metrics = [\"accuracy\"]\n@@ -105,13 +119,23 @@\n \n input_layer, output_layer = self._network.build_network(input_shape, **kwargs)\n \n- output_layer = keras.layers.Dense(units=1, activation=\"sigmoid\")(output_layer)\n+ output_layer = keras.layers.Dense(\n+ units=1,\n+ activation=self.activation,\n+ use_bias=self.use_bias,\n+ )(output_layer)\n+\n+ self.optimizer_ = (\n+ keras.optimizers.Adam(learning_rate=0.01)\n+ if self.optimizer is None\n+ else self.optimizer\n+ )\n \n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n \n model.compile(\n loss=self.loss,\n- optimizer=keras.optimizers.Adam(),\n+ optimizer=self.optimizer_,\n metrics=metrics,\n )\n return model\n@@ -136,6 +160,7 @@\n # Transpose to conform to Keras input style.\n X = X.transpose(0, 2, 1)\n \n+ check_random_state(self.random_state)\n self.input_shape = X.shape[1:]\n self.model_ = self.build_model(self.input_shape)\n if self.verbose:\n", "issue": "[BUG] `CNNRegressor` missing essential initializing parameters\n**Describe the bug**\r\n<!--\r\nA clear and concise description of what the bug is.\r\n-->\r\n`CNNRegressor` misses parameters like `optimizer` in its constructor. There might be more things which don't coincide with `CNNClassifier` which should be added but since I'm not sure what all things might be missing, I'm opening this issue to bring it to notice.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Time Convolutional Neural Network (CNN) for regression.\"\"\"\n\n__author__ = [\"AurumnPegasus\"]\n__all__ = [\"CNNRegressor\"]\n\nfrom sktime.networks.cnn import CNNNetwork\nfrom sktime.regression.deep_learning.base import BaseDeepRegressor\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\n\n_check_dl_dependencies(severity=\"warning\")\n\n\nclass CNNRegressor(BaseDeepRegressor):\n \"\"\"Time Convolutional Neural Network (CNN), as described in [1].\n\n Parameters\n ----------\n should inherited fields be listed here?\n n_epochs : int, default = 2000\n the number of epochs to train the model\n batch_size : int, default = 16\n the number of samples per gradient update.\n kernel_size : int, default = 7\n the length of the 1D convolution window\n avg_pool_size : int, default = 3\n size of the average pooling windows\n n_conv_layers : int, default = 2\n the number of convolutional plus average pooling layers\n filter_sizes : array of shape (n_conv_layers) default = [6, 12]\n random_state : int or None, default=None\n Seed for random number generation.\n verbose : boolean, default = False\n whether to output extra information\n loss : string, default=\"mean_squared_error\"\n fit parameter for the keras model\n optimizer : keras.optimizer, default=keras.optimizers.Adam(),\n metrics : list of strings, default=[\"accuracy\"],\n\n Notes\n -----\n .. [1] Zhao et. al, Convolutional neural networks for\n time series classification, Journal of\n Systems Engineering and Electronics, 28(1):2017.\n\n Adapted from the implementation from Fawaz et. al\n https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/cnn.py\n \"\"\"\n\n def __init__(\n self,\n n_epochs=2000,\n batch_size=16,\n kernel_size=7,\n avg_pool_size=3,\n n_conv_layers=2,\n callbacks=None,\n verbose=False,\n loss=\"mean_squared_error\",\n metrics=None,\n random_seed=0,\n ):\n _check_dl_dependencies(severity=\"error\")\n super(CNNRegressor, self).__init__(\n batch_size=batch_size,\n )\n self.n_conv_layers = n_conv_layers\n self.avg_pool_size = avg_pool_size\n self.kernel_size = kernel_size\n self.callbacks = callbacks\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.verbose = verbose\n self.loss = loss\n self.metrics = metrics\n self.random_seed = random_seed\n self._network = CNNNetwork()\n\n def build_model(self, input_shape, **kwargs):\n \"\"\"Construct a compiled, un-trained, keras model that is ready for training.\n\n In sktime, time series are stored in numpy arrays of shape (d,m), where d\n is the number of dimensions, m is the series length. Keras/tensorflow assume\n data is in shape (m,d). This method also assumes (m,d). Transpose should\n happen in fit.\n\n Parameters\n ----------\n input_shape : tuple\n The shape of the data fed into the input layer, should be (m,d)\n\n Returns\n -------\n output : a compiled Keras Model\n \"\"\"\n import tensorflow as tf\n from tensorflow import keras\n\n tf.random.set_seed(self.random_seed)\n\n if self.metrics is None:\n metrics = [\"accuracy\"]\n else:\n metrics = self.metrics\n\n input_layer, output_layer = self._network.build_network(input_shape, **kwargs)\n\n output_layer = keras.layers.Dense(units=1, activation=\"sigmoid\")(output_layer)\n\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n\n model.compile(\n loss=self.loss,\n optimizer=keras.optimizers.Adam(),\n metrics=metrics,\n )\n return model\n\n def _fit(self, X, y):\n \"\"\"Fit the classifier on the training set (X, y).\n\n Parameters\n ----------\n X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))\n The training input samples.\n y : np.ndarray of shape n\n The training data class labels.\n\n Returns\n -------\n self : object\n \"\"\"\n if self.callbacks is None:\n self._callbacks = []\n\n # Transpose to conform to Keras input style.\n X = X.transpose(0, 2, 1)\n\n self.input_shape = X.shape[1:]\n self.model_ = self.build_model(self.input_shape)\n if self.verbose:\n self.model.summary()\n\n self.history = self.model_.fit(\n X,\n y,\n batch_size=self.batch_size,\n epochs=self.n_epochs,\n verbose=self.verbose,\n callbacks=self._callbacks,\n )\n return self\n", "path": "sktime/regression/deep_learning/cnn.py"}]} | 2,088 | 828 |
gh_patches_debug_5953 | rasdani/github-patches | git_diff | dask__distributed-1331 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ValueError / hang on LocalCluster startup
I'm loading my conda environment off NAS storage, and when usage is heavy, there can be long delays in loading things, which I believe can exceed the default timeout (five seconds) here:
https://github.com/dask/distributed/blob/master/distributed/client.py#L428
As a result I see this on a call to `LocalCluster()`:
Traceback (most recent call last):
File "lib/python3.5/site-packages/tornado/gen.py", line 910, in error_callback
future.result()
File "lib/python3.5/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
File "lib/python3.5/site-packages/tornado/gen.py", line 1063, in run
yielded = self.gen.throw(*exc_info)
File "lib/python3.5/site-packages/distributed/nanny.py", line 300, in start
yield self._wait_until_running()
File "lib/python3.5/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "lib/python3.5/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
File "lib/python3.5/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "lib/python3.5/site-packages/distributed/nanny.py", line 386, in _wait_until_running
raise ValueError("Worker not started")
ValueError: Worker not started
This `ValueError` does not seem to be passed up to my application; my application seems frozen waiting for the cluster start up (not 100% sure where) - ie, it never returns from the `LocalCluster()` call. Although the multiprocessing pool actually does start up, in this case too late to matter.
How can I catch / retry this situation?
</issue>
<code>
[start of distributed/cli/dask_worker.py]
1 from __future__ import print_function, division, absolute_import
2
3 import atexit
4 from datetime import timedelta
5 from functools import partial
6 import json
7 import logging
8 import os
9 import shutil
10 import signal
11 from sys import exit
12 from time import sleep
13
14 import click
15 from distributed import Nanny, Worker, rpc
16 from distributed.utils import All, get_ip_interface
17 from distributed.worker import _ncores
18 from distributed.http import HTTPWorker
19 from distributed.metrics import time
20 from distributed.security import Security
21 from distributed.cli.utils import check_python_3, uri_from_host_port
22
23 from toolz import valmap
24 from tornado.ioloop import IOLoop, TimeoutError
25 from tornado import gen
26
27 logger = logging.getLogger('distributed.dask_worker')
28
29
30 pem_file_option_type = click.Path(exists=True, resolve_path=True)
31
32 @click.command()
33 @click.argument('scheduler', type=str, required=False)
34 @click.option('--tls-ca-file', type=pem_file_option_type, default=None,
35 help="CA cert(s) file for TLS (in PEM format)")
36 @click.option('--tls-cert', type=pem_file_option_type, default=None,
37 help="certificate file for TLS (in PEM format)")
38 @click.option('--tls-key', type=pem_file_option_type, default=None,
39 help="private key file for TLS (in PEM format)")
40 @click.option('--worker-port', type=int, default=0,
41 help="Serving computation port, defaults to random")
42 @click.option('--http-port', type=int, default=0,
43 help="Serving http port, defaults to random")
44 @click.option('--nanny-port', type=int, default=0,
45 help="Serving nanny port, defaults to random")
46 @click.option('--bokeh-port', type=int, default=8789,
47 help="Bokeh port, defaults to 8789")
48 @click.option('--bokeh/--no-bokeh', 'bokeh', default=True, show_default=True,
49 required=False, help="Launch Bokeh Web UI")
50 @click.option('--host', type=str, default=None,
51 help="Serving host. Should be an ip address that is"
52 " visible to the scheduler and other workers. "
53 "See --interface.")
54 @click.option('--interface', type=str, default=None,
55 help="Network interface like 'eth0' or 'ib0'")
56 @click.option('--nthreads', type=int, default=0,
57 help="Number of threads per process.")
58 @click.option('--nprocs', type=int, default=1,
59 help="Number of worker processes. Defaults to one.")
60 @click.option('--name', type=str, default='',
61 help="A unique name for this worker like 'worker-1'")
62 @click.option('--memory-limit', default='auto',
63 help="Number of bytes before spilling data to disk. "
64 "This can be an integer (nbytes) "
65 "float (fraction of total memory) "
66 "or 'auto'")
67 @click.option('--reconnect/--no-reconnect', default=True,
68 help="Reconnect to scheduler if disconnected")
69 @click.option('--nanny/--no-nanny', default=True,
70 help="Start workers in nanny process for management")
71 @click.option('--pid-file', type=str, default='',
72 help="File to write the process PID")
73 @click.option('--local-directory', default='', type=str,
74 help="Directory to place worker files")
75 @click.option('--resources', type=str, default='',
76 help='Resources for task constraints like "GPU=2 MEM=10e9"')
77 @click.option('--scheduler-file', type=str, default='',
78 help='Filename to JSON encoded scheduler information. '
79 'Use with dask-scheduler --scheduler-file')
80 @click.option('--death-timeout', type=float, default=None,
81 help="Seconds to wait for a scheduler before closing")
82 @click.option('--bokeh-prefix', type=str, default=None,
83 help="Prefix for the bokeh app")
84 @click.option('--preload', type=str, multiple=True,
85 help='Module that should be loaded by each worker process '
86 'like "foo.bar" or "/path/to/foo.py"')
87 def main(scheduler, host, worker_port, http_port, nanny_port, nthreads, nprocs,
88 nanny, name, memory_limit, pid_file, reconnect,
89 resources, bokeh, bokeh_port, local_directory, scheduler_file,
90 interface, death_timeout, preload, bokeh_prefix,
91 tls_ca_file, tls_cert, tls_key):
92 sec = Security(tls_ca_file=tls_ca_file,
93 tls_worker_cert=tls_cert,
94 tls_worker_key=tls_key,
95 )
96
97 if nanny:
98 port = nanny_port
99 else:
100 port = worker_port
101
102 if nprocs > 1 and worker_port != 0:
103 logger.error("Failed to launch worker. You cannot use the --port argument when nprocs > 1.")
104 exit(1)
105
106 if nprocs > 1 and name:
107 logger.error("Failed to launch worker. You cannot use the --name argument when nprocs > 1.")
108 exit(1)
109
110 if nprocs > 1 and not nanny:
111 logger.error("Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1.")
112 exit(1)
113
114 if not nthreads:
115 nthreads = _ncores // nprocs
116
117 if pid_file:
118 with open(pid_file, 'w') as f:
119 f.write(str(os.getpid()))
120
121 def del_pid_file():
122 if os.path.exists(pid_file):
123 os.remove(pid_file)
124 atexit.register(del_pid_file)
125
126 services = {('http', http_port): HTTPWorker}
127
128 if bokeh:
129 try:
130 from distributed.bokeh.worker import BokehWorker
131 except ImportError:
132 pass
133 else:
134 if bokeh_prefix:
135 result = (BokehWorker, {'prefix': bokeh_prefix})
136 else:
137 result = BokehWorker
138 services[('bokeh', bokeh_port)] = result
139
140 if resources:
141 resources = resources.replace(',', ' ').split()
142 resources = dict(pair.split('=') for pair in resources)
143 resources = valmap(float, resources)
144 else:
145 resources = None
146
147 loop = IOLoop.current()
148
149 if nanny:
150 kwargs = {'worker_port': worker_port}
151 t = Nanny
152 else:
153 kwargs = {}
154 if nanny_port:
155 kwargs['service_ports'] = {'nanny': nanny_port}
156 t = Worker
157
158 if scheduler_file:
159 while not os.path.exists(scheduler_file):
160 sleep(0.01)
161 for i in range(10):
162 try:
163 with open(scheduler_file) as f:
164 cfg = json.load(f)
165 scheduler = cfg['address']
166 break
167 except (ValueError, KeyError): # race with scheduler on file
168 sleep(0.01)
169
170 if not scheduler:
171 raise ValueError("Need to provide scheduler address like\n"
172 "dask-worker SCHEDULER_ADDRESS:8786")
173
174 if interface:
175 if host:
176 raise ValueError("Can not specify both interface and host")
177 else:
178 host = get_ip_interface(interface)
179
180 if host or port:
181 addr = uri_from_host_port(host, port, 0)
182 else:
183 # Choose appropriate address for scheduler
184 addr = None
185
186 nannies = [t(scheduler, ncores=nthreads,
187 services=services, name=name, loop=loop, resources=resources,
188 memory_limit=memory_limit, reconnect=reconnect,
189 local_dir=local_directory, death_timeout=death_timeout,
190 preload=preload, security=sec,
191 **kwargs)
192 for i in range(nprocs)]
193
194 @gen.coroutine
195 def close_all():
196 try:
197 if nanny:
198 yield [n._close(timeout=2) for n in nannies]
199 finally:
200 loop.stop()
201
202 def handle_signal(signum, frame):
203 logger.info("Exiting on signal %d", signum)
204 if loop._running:
205 loop.add_callback_from_signal(loop.stop)
206 else:
207 exit(0)
208
209 # NOTE: We can't use the generic install_signal_handlers() function from
210 # distributed.cli.utils because we're handling the signal differently.
211 signal.signal(signal.SIGINT, handle_signal)
212 signal.signal(signal.SIGTERM, handle_signal)
213
214 for n in nannies:
215 n.start(addr)
216
217 @gen.coroutine
218 def run():
219 while all(n.status != 'closed' for n in nannies):
220 yield gen.sleep(0.2)
221
222 try:
223 loop.run_sync(run)
224 except (KeyboardInterrupt, TimeoutError):
225 pass
226 finally:
227 logger.info("End worker")
228
229 # Clean exit: unregister all workers from scheduler
230 loop.run_sync(close_all)
231
232
233 def go():
234 check_python_3()
235 main()
236
237 if __name__ == '__main__':
238 go()
239
[end of distributed/cli/dask_worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/cli/dask_worker.py b/distributed/cli/dask_worker.py
--- a/distributed/cli/dask_worker.py
+++ b/distributed/cli/dask_worker.py
@@ -211,11 +211,9 @@
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
- for n in nannies:
- n.start(addr)
-
@gen.coroutine
def run():
+ yield [n.start(addr) for n in nannies]
while all(n.status != 'closed' for n in nannies):
yield gen.sleep(0.2)
| {"golden_diff": "diff --git a/distributed/cli/dask_worker.py b/distributed/cli/dask_worker.py\n--- a/distributed/cli/dask_worker.py\n+++ b/distributed/cli/dask_worker.py\n@@ -211,11 +211,9 @@\n signal.signal(signal.SIGINT, handle_signal)\n signal.signal(signal.SIGTERM, handle_signal)\n \n- for n in nannies:\n- n.start(addr)\n-\n @gen.coroutine\n def run():\n+ yield [n.start(addr) for n in nannies]\n while all(n.status != 'closed' for n in nannies):\n yield gen.sleep(0.2)\n", "issue": "ValueError / hang on LocalCluster startup\nI'm loading my conda environment off NAS storage, and when usage is heavy, there can be long delays in loading things, which I believe can exceed the default timeout (five seconds) here:\r\n\r\nhttps://github.com/dask/distributed/blob/master/distributed/client.py#L428\r\n\r\nAs a result I see this on a call to `LocalCluster()`:\r\n\r\n\tTraceback (most recent call last): \r\n\t File \"lib/python3.5/site-packages/tornado/gen.py\", line 910, in error_callback \r\n\t\tfuture.result() \r\n\t File \"lib/python3.5/site-packages/tornado/concurrent.py\", line 238, in result \r\n\t\traise_exc_info(self._exc_info) \r\n\t File \"<string>\", line 4, in raise_exc_info \r\n\t File \"lib/python3.5/site-packages/tornado/gen.py\", line 1063, in run \r\n\t\tyielded = self.gen.throw(*exc_info) \r\n\t File \"lib/python3.5/site-packages/distributed/nanny.py\", line 300, in start\r\n\t\tyield self._wait_until_running() \r\n\t File \"lib/python3.5/site-packages/tornado/gen.py\", line 1055, in run \r\n\t\tvalue = future.result() \r\n\t File \"lib/python3.5/site-packages/tornado/concurrent.py\", line 238, in result\r\n\t\traise_exc_info(self._exc_info) \r\n\t File \"<string>\", line 4, in raise_exc_info \r\n\t File \"lib/python3.5/site-packages/tornado/gen.py\", line 1069, in run\r\n\t\tyielded = self.gen.send(value) \r\n\t File \"lib/python3.5/site-packages/distributed/nanny.py\", line 386, in _wait_until_running\r\n\t\traise ValueError(\"Worker not started\") \r\n\tValueError: Worker not started \r\n\r\nThis `ValueError` does not seem to be passed up to my application; my application seems frozen waiting for the cluster start up (not 100% sure where) - ie, it never returns from the `LocalCluster()` call. Although the multiprocessing pool actually does start up, in this case too late to matter.\r\n\r\nHow can I catch / retry this situation?\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nfrom datetime import timedelta\nfrom functools import partial\nimport json\nimport logging\nimport os\nimport shutil\nimport signal\nfrom sys import exit\nfrom time import sleep\n\nimport click\nfrom distributed import Nanny, Worker, rpc\nfrom distributed.utils import All, get_ip_interface\nfrom distributed.worker import _ncores\nfrom distributed.http import HTTPWorker\nfrom distributed.metrics import time\nfrom distributed.security import Security\nfrom distributed.cli.utils import check_python_3, uri_from_host_port\n\nfrom toolz import valmap\nfrom tornado.ioloop import IOLoop, TimeoutError\nfrom tornado import gen\n\nlogger = logging.getLogger('distributed.dask_worker')\n\n\npem_file_option_type = click.Path(exists=True, resolve_path=True)\n\[email protected]()\[email protected]('scheduler', type=str, required=False)\[email protected]('--tls-ca-file', type=pem_file_option_type, default=None,\n help=\"CA cert(s) file for TLS (in PEM format)\")\[email protected]('--tls-cert', type=pem_file_option_type, default=None,\n help=\"certificate file for TLS (in PEM format)\")\[email protected]('--tls-key', type=pem_file_option_type, default=None,\n help=\"private key file for TLS (in PEM format)\")\[email protected]('--worker-port', type=int, default=0,\n help=\"Serving computation port, defaults to random\")\[email protected]('--http-port', type=int, default=0,\n help=\"Serving http port, defaults to random\")\[email protected]('--nanny-port', type=int, default=0,\n help=\"Serving nanny port, defaults to random\")\[email protected]('--bokeh-port', type=int, default=8789,\n help=\"Bokeh port, defaults to 8789\")\[email protected]('--bokeh/--no-bokeh', 'bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=None,\n help=\"Serving host. Should be an ip address that is\"\n \" visible to the scheduler and other workers. \"\n \"See --interface.\")\[email protected]('--interface', type=str, default=None,\n help=\"Network interface like 'eth0' or 'ib0'\")\[email protected]('--nthreads', type=int, default=0,\n help=\"Number of threads per process.\")\[email protected]('--nprocs', type=int, default=1,\n help=\"Number of worker processes. Defaults to one.\")\[email protected]('--name', type=str, default='',\n help=\"A unique name for this worker like 'worker-1'\")\[email protected]('--memory-limit', default='auto',\n help=\"Number of bytes before spilling data to disk. \"\n \"This can be an integer (nbytes) \"\n \"float (fraction of total memory) \"\n \"or 'auto'\")\[email protected]('--reconnect/--no-reconnect', default=True,\n help=\"Reconnect to scheduler if disconnected\")\[email protected]('--nanny/--no-nanny', default=True,\n help=\"Start workers in nanny process for management\")\[email protected]('--pid-file', type=str, default='',\n help=\"File to write the process PID\")\[email protected]('--local-directory', default='', type=str,\n help=\"Directory to place worker files\")\[email protected]('--resources', type=str, default='',\n help='Resources for task constraints like \"GPU=2 MEM=10e9\"')\[email protected]('--scheduler-file', type=str, default='',\n help='Filename to JSON encoded scheduler information. '\n 'Use with dask-scheduler --scheduler-file')\[email protected]('--death-timeout', type=float, default=None,\n help=\"Seconds to wait for a scheduler before closing\")\[email protected]('--bokeh-prefix', type=str, default=None,\n help=\"Prefix for the bokeh app\")\[email protected]('--preload', type=str, multiple=True,\n help='Module that should be loaded by each worker process '\n 'like \"foo.bar\" or \"/path/to/foo.py\"')\ndef main(scheduler, host, worker_port, http_port, nanny_port, nthreads, nprocs,\n nanny, name, memory_limit, pid_file, reconnect,\n resources, bokeh, bokeh_port, local_directory, scheduler_file,\n interface, death_timeout, preload, bokeh_prefix,\n tls_ca_file, tls_cert, tls_key):\n sec = Security(tls_ca_file=tls_ca_file,\n tls_worker_cert=tls_cert,\n tls_worker_key=tls_key,\n )\n\n if nanny:\n port = nanny_port\n else:\n port = worker_port\n\n if nprocs > 1 and worker_port != 0:\n logger.error(\"Failed to launch worker. You cannot use the --port argument when nprocs > 1.\")\n exit(1)\n\n if nprocs > 1 and name:\n logger.error(\"Failed to launch worker. You cannot use the --name argument when nprocs > 1.\")\n exit(1)\n\n if nprocs > 1 and not nanny:\n logger.error(\"Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1.\")\n exit(1)\n\n if not nthreads:\n nthreads = _ncores // nprocs\n\n if pid_file:\n with open(pid_file, 'w') as f:\n f.write(str(os.getpid()))\n\n def del_pid_file():\n if os.path.exists(pid_file):\n os.remove(pid_file)\n atexit.register(del_pid_file)\n\n services = {('http', http_port): HTTPWorker}\n\n if bokeh:\n try:\n from distributed.bokeh.worker import BokehWorker\n except ImportError:\n pass\n else:\n if bokeh_prefix:\n result = (BokehWorker, {'prefix': bokeh_prefix})\n else:\n result = BokehWorker\n services[('bokeh', bokeh_port)] = result\n\n if resources:\n resources = resources.replace(',', ' ').split()\n resources = dict(pair.split('=') for pair in resources)\n resources = valmap(float, resources)\n else:\n resources = None\n\n loop = IOLoop.current()\n\n if nanny:\n kwargs = {'worker_port': worker_port}\n t = Nanny\n else:\n kwargs = {}\n if nanny_port:\n kwargs['service_ports'] = {'nanny': nanny_port}\n t = Worker\n\n if scheduler_file:\n while not os.path.exists(scheduler_file):\n sleep(0.01)\n for i in range(10):\n try:\n with open(scheduler_file) as f:\n cfg = json.load(f)\n scheduler = cfg['address']\n break\n except (ValueError, KeyError): # race with scheduler on file\n sleep(0.01)\n\n if not scheduler:\n raise ValueError(\"Need to provide scheduler address like\\n\"\n \"dask-worker SCHEDULER_ADDRESS:8786\")\n\n if interface:\n if host:\n raise ValueError(\"Can not specify both interface and host\")\n else:\n host = get_ip_interface(interface)\n\n if host or port:\n addr = uri_from_host_port(host, port, 0)\n else:\n # Choose appropriate address for scheduler\n addr = None\n\n nannies = [t(scheduler, ncores=nthreads,\n services=services, name=name, loop=loop, resources=resources,\n memory_limit=memory_limit, reconnect=reconnect,\n local_dir=local_directory, death_timeout=death_timeout,\n preload=preload, security=sec,\n **kwargs)\n for i in range(nprocs)]\n\n @gen.coroutine\n def close_all():\n try:\n if nanny:\n yield [n._close(timeout=2) for n in nannies]\n finally:\n loop.stop()\n\n def handle_signal(signum, frame):\n logger.info(\"Exiting on signal %d\", signum)\n if loop._running:\n loop.add_callback_from_signal(loop.stop)\n else:\n exit(0)\n\n # NOTE: We can't use the generic install_signal_handlers() function from\n # distributed.cli.utils because we're handling the signal differently.\n signal.signal(signal.SIGINT, handle_signal)\n signal.signal(signal.SIGTERM, handle_signal)\n\n for n in nannies:\n n.start(addr)\n\n @gen.coroutine\n def run():\n while all(n.status != 'closed' for n in nannies):\n yield gen.sleep(0.2)\n\n try:\n loop.run_sync(run)\n except (KeyboardInterrupt, TimeoutError):\n pass\n finally:\n logger.info(\"End worker\")\n\n # Clean exit: unregister all workers from scheduler\n loop.run_sync(close_all)\n\n\ndef go():\n check_python_3()\n main()\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dask_worker.py"}]} | 3,577 | 139 |
gh_patches_debug_38550 | rasdani/github-patches | git_diff | ethereum__web3.py-2917 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`web3.providers.ipc.get_default_ipc_path()` returned `None` instead of `str`
* Version: 6.0.0
* Python: 3.10
* OS: macky whacky
* `pip freeze` output
```
will provide if needed
```
### What was wrong?
The method `get_deafult_ipc_path()` located in `web3.providers.ipc` returned `None` even though its return type is documented as `str`.
I am not sure why it returned `None` in my case, probably forgot some flag in `geth`.
Edit: it returned None in my case because i was checking it before starting geth, so it didnt exist yet.
### How can it be fixed?
Either change type to `Optional[str]` or raise an error instead of returning `None` ?
---
**Note:** We prefer to use issues to track our work. If you think you've encountered a bug in web3py or
have a feature request, you're in the right place. If you have implementation or usage questions,
please refer to our [documentation](https://web3py.readthedocs.io/en/latest/) and/or join the conversation
on [discord](https://discord.gg/GHryRvPB84).
</issue>
<code>
[start of web3/providers/ipc.py]
1 from json import (
2 JSONDecodeError,
3 )
4 import logging
5 import os
6 from pathlib import (
7 Path,
8 )
9 import socket
10 import sys
11 import threading
12 from types import (
13 TracebackType,
14 )
15 from typing import (
16 Any,
17 Type,
18 Union,
19 )
20
21 from web3._utils.threads import (
22 Timeout,
23 )
24 from web3.types import (
25 RPCEndpoint,
26 RPCResponse,
27 )
28
29 from .base import (
30 JSONBaseProvider,
31 )
32
33
34 def get_ipc_socket(ipc_path: str, timeout: float = 2.0) -> socket.socket:
35 if sys.platform == "win32":
36 # On Windows named pipe is used. Simulate socket with it.
37 from web3._utils.windows import (
38 NamedPipe,
39 )
40
41 return NamedPipe(ipc_path)
42 else:
43 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
44 sock.connect(ipc_path)
45 sock.settimeout(timeout)
46 return sock
47
48
49 class PersistantSocket:
50 sock = None
51
52 def __init__(self, ipc_path: str) -> None:
53 self.ipc_path = ipc_path
54
55 def __enter__(self) -> socket.socket:
56 if not self.ipc_path:
57 raise FileNotFoundError(
58 f"cannot connect to IPC socket at path: {self.ipc_path!r}"
59 )
60
61 if not self.sock:
62 self.sock = self._open()
63 return self.sock
64
65 def __exit__(
66 self,
67 exc_type: Type[BaseException],
68 exc_value: BaseException,
69 traceback: TracebackType,
70 ) -> None:
71 # only close the socket if there was an error
72 if exc_value is not None:
73 try:
74 self.sock.close()
75 except Exception:
76 pass
77 self.sock = None
78
79 def _open(self) -> socket.socket:
80 return get_ipc_socket(self.ipc_path)
81
82 def reset(self) -> socket.socket:
83 self.sock.close()
84 self.sock = self._open()
85 return self.sock
86
87
88 # type ignored b/c missing return statement is by design here
89 def get_default_ipc_path() -> str: # type: ignore
90 if sys.platform == "darwin":
91 ipc_path = os.path.expanduser(
92 os.path.join("~", "Library", "Ethereum", "geth.ipc")
93 )
94 if os.path.exists(ipc_path):
95 return ipc_path
96
97 elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
98 ipc_path = os.path.expanduser(os.path.join("~", ".ethereum", "geth.ipc"))
99 if os.path.exists(ipc_path):
100 return ipc_path
101
102 elif sys.platform == "win32":
103 ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")
104 if os.path.exists(ipc_path):
105 return ipc_path
106
107 else:
108 raise ValueError(
109 f"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/"
110 "freebsd are supported. You must specify the ipc_path"
111 )
112
113
114 # type ignored b/c missing return statement is by design here
115 def get_dev_ipc_path() -> str: # type: ignore
116 if os.environ.get("WEB3_PROVIDER_URI", ""):
117 ipc_path = os.environ.get("WEB3_PROVIDER_URI")
118 if os.path.exists(ipc_path):
119 return ipc_path
120 elif sys.platform == "darwin":
121 tmpdir = os.environ.get("TMPDIR", "")
122 ipc_path = os.path.expanduser(os.path.join(tmpdir, "geth.ipc"))
123 if os.path.exists(ipc_path):
124 return ipc_path
125
126 elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
127 ipc_path = os.path.expanduser(os.path.join("/tmp", "geth.ipc"))
128 if os.path.exists(ipc_path):
129 return ipc_path
130
131 elif sys.platform == "win32":
132 ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")
133 if os.path.exists(ipc_path):
134 return ipc_path
135
136 ipc_path = os.path.join("\\\\", ".", "pipe", "jsonrpc.ipc")
137 if os.path.exists(ipc_path):
138 return ipc_path
139
140 else:
141 raise ValueError(
142 f"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/"
143 "freebsd are supported. You must specify the ipc_path"
144 )
145
146
147 class IPCProvider(JSONBaseProvider):
148 logger = logging.getLogger("web3.providers.IPCProvider")
149 _socket = None
150
151 def __init__(
152 self,
153 ipc_path: Union[str, Path] = None,
154 timeout: int = 10,
155 *args: Any,
156 **kwargs: Any,
157 ) -> None:
158 if ipc_path is None:
159 self.ipc_path = get_default_ipc_path()
160 elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):
161 self.ipc_path = str(Path(ipc_path).expanduser().resolve())
162 else:
163 raise TypeError("ipc_path must be of type string or pathlib.Path")
164
165 self.timeout = timeout
166 self._lock = threading.Lock()
167 self._socket = PersistantSocket(self.ipc_path)
168 super().__init__()
169
170 def __str__(self) -> str:
171 return f"<{self.__class__.__name__} {self.ipc_path}>"
172
173 def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
174 self.logger.debug(
175 f"Making request IPC. Path: {self.ipc_path}, Method: {method}"
176 )
177 request = self.encode_rpc_request(method, params)
178
179 with self._lock, self._socket as sock:
180 try:
181 sock.sendall(request)
182 except BrokenPipeError:
183 # one extra attempt, then give up
184 sock = self._socket.reset()
185 sock.sendall(request)
186
187 raw_response = b""
188 with Timeout(self.timeout) as timeout:
189 while True:
190 try:
191 raw_response += sock.recv(4096)
192 except socket.timeout:
193 timeout.sleep(0)
194 continue
195 if raw_response == b"":
196 timeout.sleep(0)
197 elif has_valid_json_rpc_ending(raw_response):
198 try:
199 response = self.decode_rpc_response(raw_response)
200 except JSONDecodeError:
201 timeout.sleep(0)
202 continue
203 else:
204 return response
205 else:
206 timeout.sleep(0)
207 continue
208
209
210 # A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification
211 def has_valid_json_rpc_ending(raw_response: bytes) -> bool:
212 stripped_raw_response = raw_response.rstrip()
213 for valid_ending in [b"}", b"]"]:
214 if stripped_raw_response.endswith(valid_ending):
215 return True
216 else:
217 return False
218
[end of web3/providers/ipc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py
--- a/web3/providers/ipc.py
+++ b/web3/providers/ipc.py
@@ -14,6 +14,7 @@
)
from typing import (
Any,
+ Optional,
Type,
Union,
)
@@ -85,24 +86,26 @@
return self.sock
-# type ignored b/c missing return statement is by design here
-def get_default_ipc_path() -> str: # type: ignore
+def get_default_ipc_path() -> Optional[str]:
if sys.platform == "darwin":
ipc_path = os.path.expanduser(
os.path.join("~", "Library", "Ethereum", "geth.ipc")
)
if os.path.exists(ipc_path):
return ipc_path
+ return None
elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
ipc_path = os.path.expanduser(os.path.join("~", ".ethereum", "geth.ipc"))
if os.path.exists(ipc_path):
return ipc_path
+ return None
elif sys.platform == "win32":
ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")
if os.path.exists(ipc_path):
return ipc_path
+ return None
else:
raise ValueError(
@@ -111,22 +114,25 @@
)
-# type ignored b/c missing return statement is by design here
-def get_dev_ipc_path() -> str: # type: ignore
+def get_dev_ipc_path() -> Optional[str]:
if os.environ.get("WEB3_PROVIDER_URI", ""):
ipc_path = os.environ.get("WEB3_PROVIDER_URI")
if os.path.exists(ipc_path):
return ipc_path
+ return None
+
elif sys.platform == "darwin":
tmpdir = os.environ.get("TMPDIR", "")
ipc_path = os.path.expanduser(os.path.join(tmpdir, "geth.ipc"))
if os.path.exists(ipc_path):
return ipc_path
+ return None
elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
ipc_path = os.path.expanduser(os.path.join("/tmp", "geth.ipc"))
if os.path.exists(ipc_path):
return ipc_path
+ return None
elif sys.platform == "win32":
ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")
| {"golden_diff": "diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py\n--- a/web3/providers/ipc.py\n+++ b/web3/providers/ipc.py\n@@ -14,6 +14,7 @@\n )\n from typing import (\n Any,\n+ Optional,\n Type,\n Union,\n )\n@@ -85,24 +86,26 @@\n return self.sock\n \n \n-# type ignored b/c missing return statement is by design here\n-def get_default_ipc_path() -> str: # type: ignore\n+def get_default_ipc_path() -> Optional[str]:\n if sys.platform == \"darwin\":\n ipc_path = os.path.expanduser(\n os.path.join(\"~\", \"Library\", \"Ethereum\", \"geth.ipc\")\n )\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"~\", \".ethereum\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n else:\n raise ValueError(\n@@ -111,22 +114,25 @@\n )\n \n \n-# type ignored b/c missing return statement is by design here\n-def get_dev_ipc_path() -> str: # type: ignore\n+def get_dev_ipc_path() -> Optional[str]:\n if os.environ.get(\"WEB3_PROVIDER_URI\", \"\"):\n ipc_path = os.environ.get(\"WEB3_PROVIDER_URI\")\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n+\n elif sys.platform == \"darwin\":\n tmpdir = os.environ.get(\"TMPDIR\", \"\")\n ipc_path = os.path.expanduser(os.path.join(tmpdir, \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"/tmp\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n", "issue": "`web3.providers.ipc.get_default_ipc_path()` returned `None` instead of `str`\n* Version: 6.0.0\r\n* Python: 3.10\r\n* OS: macky whacky\r\n* `pip freeze` output\r\n\r\n```\r\nwill provide if needed\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\nThe method `get_deafult_ipc_path()` located in `web3.providers.ipc` returned `None` even though its return type is documented as `str`.\r\n\r\nI am not sure why it returned `None` in my case, probably forgot some flag in `geth`.\r\n\r\nEdit: it returned None in my case because i was checking it before starting geth, so it didnt exist yet.\r\n\r\n### How can it be fixed?\r\n\r\nEither change type to `Optional[str]` or raise an error instead of returning `None` ?\r\n\r\n---\r\n**Note:** We prefer to use issues to track our work. If you think you've encountered a bug in web3py or \r\nhave a feature request, you're in the right place. If you have implementation or usage questions, \r\nplease refer to our [documentation](https://web3py.readthedocs.io/en/latest/) and/or join the conversation \r\non [discord](https://discord.gg/GHryRvPB84).\r\n\n", "before_files": [{"content": "from json import (\n JSONDecodeError,\n)\nimport logging\nimport os\nfrom pathlib import (\n Path,\n)\nimport socket\nimport sys\nimport threading\nfrom types import (\n TracebackType,\n)\nfrom typing import (\n Any,\n Type,\n Union,\n)\n\nfrom web3._utils.threads import (\n Timeout,\n)\nfrom web3.types import (\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom .base import (\n JSONBaseProvider,\n)\n\n\ndef get_ipc_socket(ipc_path: str, timeout: float = 2.0) -> socket.socket:\n if sys.platform == \"win32\":\n # On Windows named pipe is used. Simulate socket with it.\n from web3._utils.windows import (\n NamedPipe,\n )\n\n return NamedPipe(ipc_path)\n else:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(ipc_path)\n sock.settimeout(timeout)\n return sock\n\n\nclass PersistantSocket:\n sock = None\n\n def __init__(self, ipc_path: str) -> None:\n self.ipc_path = ipc_path\n\n def __enter__(self) -> socket.socket:\n if not self.ipc_path:\n raise FileNotFoundError(\n f\"cannot connect to IPC socket at path: {self.ipc_path!r}\"\n )\n\n if not self.sock:\n self.sock = self._open()\n return self.sock\n\n def __exit__(\n self,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: TracebackType,\n ) -> None:\n # only close the socket if there was an error\n if exc_value is not None:\n try:\n self.sock.close()\n except Exception:\n pass\n self.sock = None\n\n def _open(self) -> socket.socket:\n return get_ipc_socket(self.ipc_path)\n\n def reset(self) -> socket.socket:\n self.sock.close()\n self.sock = self._open()\n return self.sock\n\n\n# type ignored b/c missing return statement is by design here\ndef get_default_ipc_path() -> str: # type: ignore\n if sys.platform == \"darwin\":\n ipc_path = os.path.expanduser(\n os.path.join(\"~\", \"Library\", \"Ethereum\", \"geth.ipc\")\n )\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"~\", \".ethereum\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\n# type ignored b/c missing return statement is by design here\ndef get_dev_ipc_path() -> str: # type: ignore\n if os.environ.get(\"WEB3_PROVIDER_URI\", \"\"):\n ipc_path = os.environ.get(\"WEB3_PROVIDER_URI\")\n if os.path.exists(ipc_path):\n return ipc_path\n elif sys.platform == \"darwin\":\n tmpdir = os.environ.get(\"TMPDIR\", \"\")\n ipc_path = os.path.expanduser(os.path.join(tmpdir, \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"/tmp\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"jsonrpc.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\nclass IPCProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.IPCProvider\")\n _socket = None\n\n def __init__(\n self,\n ipc_path: Union[str, Path] = None,\n timeout: int = 10,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path()\n elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):\n self.ipc_path = str(Path(ipc_path).expanduser().resolve())\n else:\n raise TypeError(\"ipc_path must be of type string or pathlib.Path\")\n\n self.timeout = timeout\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super().__init__()\n\n def __str__(self) -> str:\n return f\"<{self.__class__.__name__} {self.ipc_path}>\"\n\n def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\n f\"Making request IPC. Path: {self.ipc_path}, Method: {method}\"\n )\n request = self.encode_rpc_request(method, params)\n\n with self._lock, self._socket as sock:\n try:\n sock.sendall(request)\n except BrokenPipeError:\n # one extra attempt, then give up\n sock = self._socket.reset()\n sock.sendall(request)\n\n raw_response = b\"\"\n with Timeout(self.timeout) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n except socket.timeout:\n timeout.sleep(0)\n continue\n if raw_response == b\"\":\n timeout.sleep(0)\n elif has_valid_json_rpc_ending(raw_response):\n try:\n response = self.decode_rpc_response(raw_response)\n except JSONDecodeError:\n timeout.sleep(0)\n continue\n else:\n return response\n else:\n timeout.sleep(0)\n continue\n\n\n# A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification\ndef has_valid_json_rpc_ending(raw_response: bytes) -> bool:\n stripped_raw_response = raw_response.rstrip()\n for valid_ending in [b\"}\", b\"]\"]:\n if stripped_raw_response.endswith(valid_ending):\n return True\n else:\n return False\n", "path": "web3/providers/ipc.py"}]} | 2,838 | 547 |
gh_patches_debug_28692 | rasdani/github-patches | git_diff | spyder-ide__spyder-12435 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check handling of str responses in Kite client signals
Sorry for taking so long to respond. I've included everything that I thought might be relevant. I think @metalogical has it right, however I it would be pretty difficult for me to convince the security guys at work to change anything. Would it be possible to change the error handling so it is able to continue after an error like this? Especially since I have explicitly disabled Kite and the software works fine if it encounters the error when already running.
2020-04-20 12:05:42,162 [DEBUG] [spyder.plugins.editor.plugin] -> Start completion server for C:\Users\[removed]\.spyder-py3\temp.py [Python]
2020-04-20 12:05:42,162 [DEBUG] [spyder.plugins.editor.plugin] -> Python completion server is ready
2020-04-20 12:05:42,164 [DEBUG] [spyder.plugins.editor.widgets.codeeditor] -> Completions services available for: C:\Users\[removed]\.spyder-py3\temp.py
2020-04-20 12:05:42,164 [DEBUG] [spyder.plugins.editor.plugin] -> python completion server request: 'textDocument/didOpen'
2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Completion plugin: Request 0 Got response from lsp
2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Gather responses for textDocument/didOpen
2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Completion plugin: Request 0 Got response from kite
2020-04-20 12:05:42,166 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Set focus to: C:\Users\[removed]\.spyder-py3\temp.py
2020-04-20 12:05:42,166 [DEBUG] [spyder.plugins.completion.fallback.actor] -> Got request id 0: textDocument/didOpen for file C:\Users\[removed]\.spyder-py3\temp.py
2020-04-20 12:05:42,167 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Current changed: 0 - C:\Users\[removed]\.spyder-py3\temp.py
2020-04-20 12:05:42,170 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Added thread <spyder.plugins.editor.widgets.editor.AnalysisThread object at 0x0000019B03720A68> to queue
2020-04-20 12:05:42,189 [INFO] [spyder.app.mainwindow] -> Launching code completion client for Python...
2020-04-20 12:05:42,203 [DEBUG] [spyder.plugins.completion.kite.client] -> Starting Kite HTTP session...
2020-04-20 12:05:42,208 [DEBUG] [urllib3.connectionpool] -> Starting new HTTP connection (1): [vpn server name].[company name].com:80
2020-04-20 12:05:42,512 [DEBUG] [urllib3.connectionpool] -> http://[vpn server name].[company name].com:80 "GET http://127.0.0.1:46624/clientapi/languages HTTP/1.1" 200 7277
Traceback (most recent call last):
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\app\mainwindow.py", line 3718, in main
mainwindow = run_spyder(app, options, args)
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\app\mainwindow.py", line 3559, in run_spyder
main.setup()
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\app\mainwindow.py", line 960, in setup
self.completions.start()
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\plugins\completion\plugin.py", line 292, in start
client_info['plugin'].start()
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\plugins\completion\kite\plugin.py", line 144, in start
self.client.start()
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\plugins\completion\kite\client.py", line 62, in start
self.sig_client_started.emit(self.languages)
TypeError: KiteClient.sig_client_started[list].emit(): argument 1 has unexpected type 'str'
_Originally posted by @nsluhrs in https://github.com/spyder-ide/spyder/issues/12357#issuecomment-616665440_
</issue>
<code>
[start of spyder/plugins/completion/kite/client.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """Kite completions HTTP client."""
8
9 # Standard library imports
10 import logging
11 try:
12 from urllib import quote # Python 2
13 except ImportError:
14 from urllib.parse import quote # Python 3
15
16 # Third party imports
17 from qtpy.QtCore import QObject, QThread, Signal, QMutex
18 import requests
19
20 # Local imports
21 from spyder.config.base import _
22 from spyder.plugins.completion.kite import KITE_ENDPOINTS, KITE_REQUEST_MAPPING
23 from spyder.plugins.completion.kite.decorators import class_register
24 from spyder.plugins.completion.kite.providers import KiteMethodProviderMixIn
25 from spyder.plugins.completion.kite.utils.status import (
26 status, check_if_kite_running)
27 from spyder.py3compat import (
28 ConnectionError, ConnectionRefusedError, TEXT_TYPES)
29
30
31 logger = logging.getLogger(__name__)
32
33
34 @class_register
35 class KiteClient(QObject, KiteMethodProviderMixIn):
36 sig_response_ready = Signal(int, dict)
37 sig_client_started = Signal(list)
38 sig_client_not_responding = Signal()
39 sig_perform_request = Signal(int, str, object)
40 sig_perform_status_request = Signal(str)
41 sig_status_response_ready = Signal((str,), (dict,))
42 sig_perform_onboarding_request = Signal()
43 sig_onboarding_response_ready = Signal(str)
44
45 def __init__(self, parent, enable_code_snippets=True):
46 QObject.__init__(self, parent)
47 self.endpoint = None
48 self.requests = {}
49 self.languages = []
50 self.mutex = QMutex()
51 self.opened_files = {}
52 self.opened_files_status = {}
53 self.thread_started = False
54 self.enable_code_snippets = enable_code_snippets
55 self.thread = QThread()
56 self.moveToThread(self.thread)
57 self.thread.started.connect(self.started)
58 self.sig_perform_request.connect(self.perform_request)
59 self.sig_perform_status_request.connect(self.get_status)
60 self.sig_perform_onboarding_request.connect(self.get_onboarding_file)
61
62 def start(self):
63 if not self.thread_started:
64 self.thread.start()
65 logger.debug('Starting Kite HTTP session...')
66 self.endpoint = requests.Session()
67 self.languages = self.get_languages()
68 self.sig_client_started.emit(self.languages)
69
70 def started(self):
71 self.thread_started = True
72
73 def stop(self):
74 if self.thread_started:
75 logger.debug('Closing Kite HTTP session...')
76 self.endpoint.close()
77 self.thread.quit()
78
79 def get_languages(self):
80 verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT
81 success, response = self.perform_http_request(verb, url)
82 if response is None:
83 response = ['python']
84 return response
85
86 def _get_onboarding_file(self):
87 """Perform a request to get kite's onboarding file."""
88 verb, url = KITE_ENDPOINTS.ONBOARDING_ENDPOINT
89 success, response = self.perform_http_request(verb, url)
90 return response
91
92 def get_onboarding_file(self):
93 """Get onboarding file."""
94 onboarding_file = self._get_onboarding_file()
95 self.sig_onboarding_response_ready.emit(onboarding_file)
96
97 def _get_status(self, filename):
98 """Perform a request to get kite status for a file."""
99 verb, url = KITE_ENDPOINTS.STATUS_ENDPOINT
100 if filename:
101 url_params = {'filename': filename}
102 else:
103 url_params = {'filetype': 'python'}
104 success, response = self.perform_http_request(
105 verb, url, url_params=url_params)
106 return success, response
107
108 def get_status(self, filename):
109 """Get kite status for a given filename."""
110 success_status, kite_status = self._get_status(filename)
111 if not filename or kite_status is None:
112 kite_status = status()
113 self.sig_status_response_ready[str].emit(kite_status)
114 elif isinstance(kite_status, TEXT_TYPES):
115 if not success_status:
116 status_str = status(extra_status=' with errors')
117 long_str = _("<code>{error}</code><br><br>"
118 "Note: If you are using a VPN, "
119 "please don't route requests to "
120 "localhost/127.0.0.1 with it").format(
121 error=kite_status)
122 else:
123 status_str = status()
124 long_str = kite_status
125 kite_status_dict = {
126 'status': status_str,
127 'short': status_str,
128 'long': long_str}
129 self.sig_status_response_ready[dict].emit(kite_status_dict)
130 else:
131 self.sig_status_response_ready[dict].emit(kite_status)
132
133 def perform_http_request(self, verb, url, url_params=None, params=None):
134 response = None
135 http_method = getattr(self.endpoint, verb)
136 try:
137 http_response = http_method(url, params=url_params, json=params)
138 except Exception as error:
139 return False, None
140 success = http_response.status_code == 200
141 if success:
142 try:
143 response = http_response.json()
144 except Exception:
145 response = http_response.text
146 response = None if response == '' else response
147 return success, response
148
149 def send(self, method, params, url_params):
150 response = None
151 if self.endpoint is not None and method in KITE_REQUEST_MAPPING:
152 http_verb, path = KITE_REQUEST_MAPPING[method]
153 encoded_url_params = {
154 key: quote(value) if isinstance(value, TEXT_TYPES) else value
155 for (key, value) in url_params.items()}
156 path = path.format(**encoded_url_params)
157 try:
158 success, response = self.perform_http_request(
159 http_verb, path, params=params)
160 except (ConnectionRefusedError, ConnectionError):
161 return response
162 return response
163
164 def perform_request(self, req_id, method, params):
165 response = None
166 if method in self.sender_registry:
167 logger.debug('Perform {0} request with id {1}'.format(
168 method, req_id))
169 handler_name = self.sender_registry[method]
170 handler = getattr(self, handler_name)
171 response = handler(params)
172 if method in self.handler_registry:
173 converter_name = self.handler_registry[method]
174 converter = getattr(self, converter_name)
175 if response is not None:
176 response = converter(response)
177 self.sig_response_ready.emit(req_id, response or {})
178
[end of spyder/plugins/completion/kite/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spyder/plugins/completion/kite/client.py b/spyder/plugins/completion/kite/client.py
--- a/spyder/plugins/completion/kite/client.py
+++ b/spyder/plugins/completion/kite/client.py
@@ -79,7 +79,7 @@
def get_languages(self):
verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT
success, response = self.perform_http_request(verb, url)
- if response is None:
+ if response is None or isinstance(response, TEXT_TYPES):
response = ['python']
return response
@@ -112,16 +112,12 @@
kite_status = status()
self.sig_status_response_ready[str].emit(kite_status)
elif isinstance(kite_status, TEXT_TYPES):
- if not success_status:
- status_str = status(extra_status=' with errors')
- long_str = _("<code>{error}</code><br><br>"
- "Note: If you are using a VPN, "
- "please don't route requests to "
- "localhost/127.0.0.1 with it").format(
- error=kite_status)
- else:
- status_str = status()
- long_str = kite_status
+ status_str = status(extra_status=' with errors')
+ long_str = _("<code>{error}</code><br><br>"
+ "Note: If you are using a VPN, "
+ "please don't route requests to "
+ "localhost/127.0.0.1 with it").format(
+ error=kite_status)
kite_status_dict = {
'status': status_str,
'short': status_str,
| {"golden_diff": "diff --git a/spyder/plugins/completion/kite/client.py b/spyder/plugins/completion/kite/client.py\n--- a/spyder/plugins/completion/kite/client.py\n+++ b/spyder/plugins/completion/kite/client.py\n@@ -79,7 +79,7 @@\n def get_languages(self):\n verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT\n success, response = self.perform_http_request(verb, url)\n- if response is None:\n+ if response is None or isinstance(response, TEXT_TYPES):\n response = ['python']\n return response\n \n@@ -112,16 +112,12 @@\n kite_status = status()\n self.sig_status_response_ready[str].emit(kite_status)\n elif isinstance(kite_status, TEXT_TYPES):\n- if not success_status:\n- status_str = status(extra_status=' with errors')\n- long_str = _(\"<code>{error}</code><br><br>\"\n- \"Note: If you are using a VPN, \"\n- \"please don't route requests to \"\n- \"localhost/127.0.0.1 with it\").format(\n- error=kite_status)\n- else:\n- status_str = status()\n- long_str = kite_status\n+ status_str = status(extra_status=' with errors')\n+ long_str = _(\"<code>{error}</code><br><br>\"\n+ \"Note: If you are using a VPN, \"\n+ \"please don't route requests to \"\n+ \"localhost/127.0.0.1 with it\").format(\n+ error=kite_status)\n kite_status_dict = {\n 'status': status_str,\n 'short': status_str,\n", "issue": "Check handling of str responses in Kite client signals\nSorry for taking so long to respond. I've included everything that I thought might be relevant. I think @metalogical has it right, however I it would be pretty difficult for me to convince the security guys at work to change anything. Would it be possible to change the error handling so it is able to continue after an error like this? Especially since I have explicitly disabled Kite and the software works fine if it encounters the error when already running. \r\n\r\n\t2020-04-20 12:05:42,162 [DEBUG] [spyder.plugins.editor.plugin] -> Start completion server for C:\\Users\\[removed]\\.spyder-py3\\temp.py [Python]\r\n\t2020-04-20 12:05:42,162 [DEBUG] [spyder.plugins.editor.plugin] -> Python completion server is ready\r\n\t2020-04-20 12:05:42,164 [DEBUG] [spyder.plugins.editor.widgets.codeeditor] -> Completions services available for: C:\\Users\\[removed]\\.spyder-py3\\temp.py\r\n\t2020-04-20 12:05:42,164 [DEBUG] [spyder.plugins.editor.plugin] -> python completion server request: 'textDocument/didOpen'\r\n\t2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Completion plugin: Request 0 Got response from lsp\r\n\t2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Gather responses for textDocument/didOpen\r\n\t2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Completion plugin: Request 0 Got response from kite\r\n\t2020-04-20 12:05:42,166 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Set focus to: C:\\Users\\[removed]\\.spyder-py3\\temp.py\r\n\t2020-04-20 12:05:42,166 [DEBUG] [spyder.plugins.completion.fallback.actor] -> Got request id 0: textDocument/didOpen for file C:\\Users\\[removed]\\.spyder-py3\\temp.py\r\n\t2020-04-20 12:05:42,167 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Current changed: 0 - C:\\Users\\[removed]\\.spyder-py3\\temp.py\r\n\t2020-04-20 12:05:42,170 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Added thread <spyder.plugins.editor.widgets.editor.AnalysisThread object at 0x0000019B03720A68> to queue\r\n\t2020-04-20 12:05:42,189 [INFO] [spyder.app.mainwindow] -> Launching code completion client for Python...\r\n\t2020-04-20 12:05:42,203 [DEBUG] [spyder.plugins.completion.kite.client] -> Starting Kite HTTP session...\r\n\t2020-04-20 12:05:42,208 [DEBUG] [urllib3.connectionpool] -> Starting new HTTP connection (1): [vpn server name].[company name].com:80\r\n\t2020-04-20 12:05:42,512 [DEBUG] [urllib3.connectionpool] -> http://[vpn server name].[company name].com:80 \"GET http://127.0.0.1:46624/clientapi/languages HTTP/1.1\" 200 7277\r\n\tTraceback (most recent call last):\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\app\\mainwindow.py\", line 3718, in main\r\n\t\tmainwindow = run_spyder(app, options, args)\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\app\\mainwindow.py\", line 3559, in run_spyder\r\n\t\tmain.setup()\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\app\\mainwindow.py\", line 960, in setup\r\n\t\tself.completions.start()\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\plugins\\completion\\plugin.py\", line 292, in start\r\n\t\tclient_info['plugin'].start()\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\plugins\\completion\\kite\\plugin.py\", line 144, in start\r\n\t\tself.client.start()\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\plugins\\completion\\kite\\client.py\", line 62, in start\r\n\t\tself.sig_client_started.emit(self.languages)\r\n\tTypeError: KiteClient.sig_client_started[list].emit(): argument 1 has unexpected type 'str'\r\n\r\n_Originally posted by @nsluhrs in https://github.com/spyder-ide/spyder/issues/12357#issuecomment-616665440_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Kite completions HTTP client.\"\"\"\n\n# Standard library imports\nimport logging\ntry:\n from urllib import quote # Python 2\nexcept ImportError:\n from urllib.parse import quote # Python 3\n\n# Third party imports\nfrom qtpy.QtCore import QObject, QThread, Signal, QMutex\nimport requests\n\n# Local imports\nfrom spyder.config.base import _\nfrom spyder.plugins.completion.kite import KITE_ENDPOINTS, KITE_REQUEST_MAPPING\nfrom spyder.plugins.completion.kite.decorators import class_register\nfrom spyder.plugins.completion.kite.providers import KiteMethodProviderMixIn\nfrom spyder.plugins.completion.kite.utils.status import (\n status, check_if_kite_running)\nfrom spyder.py3compat import (\n ConnectionError, ConnectionRefusedError, TEXT_TYPES)\n\n\nlogger = logging.getLogger(__name__)\n\n\n@class_register\nclass KiteClient(QObject, KiteMethodProviderMixIn):\n sig_response_ready = Signal(int, dict)\n sig_client_started = Signal(list)\n sig_client_not_responding = Signal()\n sig_perform_request = Signal(int, str, object)\n sig_perform_status_request = Signal(str)\n sig_status_response_ready = Signal((str,), (dict,))\n sig_perform_onboarding_request = Signal()\n sig_onboarding_response_ready = Signal(str)\n\n def __init__(self, parent, enable_code_snippets=True):\n QObject.__init__(self, parent)\n self.endpoint = None\n self.requests = {}\n self.languages = []\n self.mutex = QMutex()\n self.opened_files = {}\n self.opened_files_status = {}\n self.thread_started = False\n self.enable_code_snippets = enable_code_snippets\n self.thread = QThread()\n self.moveToThread(self.thread)\n self.thread.started.connect(self.started)\n self.sig_perform_request.connect(self.perform_request)\n self.sig_perform_status_request.connect(self.get_status)\n self.sig_perform_onboarding_request.connect(self.get_onboarding_file)\n\n def start(self):\n if not self.thread_started:\n self.thread.start()\n logger.debug('Starting Kite HTTP session...')\n self.endpoint = requests.Session()\n self.languages = self.get_languages()\n self.sig_client_started.emit(self.languages)\n\n def started(self):\n self.thread_started = True\n\n def stop(self):\n if self.thread_started:\n logger.debug('Closing Kite HTTP session...')\n self.endpoint.close()\n self.thread.quit()\n\n def get_languages(self):\n verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT\n success, response = self.perform_http_request(verb, url)\n if response is None:\n response = ['python']\n return response\n\n def _get_onboarding_file(self):\n \"\"\"Perform a request to get kite's onboarding file.\"\"\"\n verb, url = KITE_ENDPOINTS.ONBOARDING_ENDPOINT\n success, response = self.perform_http_request(verb, url)\n return response\n\n def get_onboarding_file(self):\n \"\"\"Get onboarding file.\"\"\"\n onboarding_file = self._get_onboarding_file()\n self.sig_onboarding_response_ready.emit(onboarding_file)\n\n def _get_status(self, filename):\n \"\"\"Perform a request to get kite status for a file.\"\"\"\n verb, url = KITE_ENDPOINTS.STATUS_ENDPOINT\n if filename:\n url_params = {'filename': filename}\n else:\n url_params = {'filetype': 'python'}\n success, response = self.perform_http_request(\n verb, url, url_params=url_params)\n return success, response\n\n def get_status(self, filename):\n \"\"\"Get kite status for a given filename.\"\"\"\n success_status, kite_status = self._get_status(filename)\n if not filename or kite_status is None:\n kite_status = status()\n self.sig_status_response_ready[str].emit(kite_status)\n elif isinstance(kite_status, TEXT_TYPES):\n if not success_status:\n status_str = status(extra_status=' with errors')\n long_str = _(\"<code>{error}</code><br><br>\"\n \"Note: If you are using a VPN, \"\n \"please don't route requests to \"\n \"localhost/127.0.0.1 with it\").format(\n error=kite_status)\n else:\n status_str = status()\n long_str = kite_status\n kite_status_dict = {\n 'status': status_str,\n 'short': status_str,\n 'long': long_str}\n self.sig_status_response_ready[dict].emit(kite_status_dict)\n else:\n self.sig_status_response_ready[dict].emit(kite_status)\n\n def perform_http_request(self, verb, url, url_params=None, params=None):\n response = None\n http_method = getattr(self.endpoint, verb)\n try:\n http_response = http_method(url, params=url_params, json=params)\n except Exception as error:\n return False, None\n success = http_response.status_code == 200\n if success:\n try:\n response = http_response.json()\n except Exception:\n response = http_response.text\n response = None if response == '' else response\n return success, response\n\n def send(self, method, params, url_params):\n response = None\n if self.endpoint is not None and method in KITE_REQUEST_MAPPING:\n http_verb, path = KITE_REQUEST_MAPPING[method]\n encoded_url_params = {\n key: quote(value) if isinstance(value, TEXT_TYPES) else value\n for (key, value) in url_params.items()}\n path = path.format(**encoded_url_params)\n try:\n success, response = self.perform_http_request(\n http_verb, path, params=params)\n except (ConnectionRefusedError, ConnectionError):\n return response\n return response\n\n def perform_request(self, req_id, method, params):\n response = None\n if method in self.sender_registry:\n logger.debug('Perform {0} request with id {1}'.format(\n method, req_id))\n handler_name = self.sender_registry[method]\n handler = getattr(self, handler_name)\n response = handler(params)\n if method in self.handler_registry:\n converter_name = self.handler_registry[method]\n converter = getattr(self, converter_name)\n if response is not None:\n response = converter(response)\n self.sig_response_ready.emit(req_id, response or {})\n", "path": "spyder/plugins/completion/kite/client.py"}]} | 3,652 | 376 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.