problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_38325
|
rasdani/github-patches
|
git_diff
|
encode__starlette-8
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request should present a scope-like interface
The `Request` class should present a dict-like interface so that it can be used in the same way as `scope`. Should also allow it to be instantiated without a `receive` channel being set initially.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/request.py`
Content:
```
1 from starlette.datastructures import URL, Headers, QueryParams
2 import json
3
4
5 class Request:
6 def __init__(self, scope, receive):
7 self._scope = scope
8 self._receive = receive
9 self._stream_consumed = False
10
11 @property
12 def method(self):
13 return self._scope["method"]
14
15 @property
16 def url(self):
17 if not hasattr(self, "_url"):
18 scheme = self._scope["scheme"]
19 host, port = self._scope["server"]
20 path = self._scope["path"]
21 query_string = self._scope["query_string"]
22
23 if (scheme == "http" and port != 80) or (scheme == "https" and port != 443):
24 url = "%s://%s:%s%s" % (scheme, host, port, path)
25 else:
26 url = "%s://%s%s" % (scheme, host, path)
27
28 if query_string:
29 url += "?" + query_string.decode()
30
31 self._url = URL(url)
32 return self._url
33
34 @property
35 def headers(self):
36 if not hasattr(self, "_headers"):
37 self._headers = Headers(
38 [
39 (key.decode(), value.decode())
40 for key, value in self._scope["headers"]
41 ]
42 )
43 return self._headers
44
45 @property
46 def query_params(self):
47 if not hasattr(self, "_query_params"):
48 query_string = self._scope["query_string"].decode()
49 self._query_params = QueryParams(query_string)
50 return self._query_params
51
52 async def stream(self):
53 if hasattr(self, "_body"):
54 yield self._body
55 return
56
57 if self._stream_consumed:
58 raise RuntimeError("Stream consumed")
59
60 self._stream_consumed = True
61 while True:
62 message = await self._receive()
63 if message["type"] == "http.request":
64 yield message.get("body", b"")
65 if not message.get("more_body", False):
66 break
67
68 async def body(self):
69 if not hasattr(self, "_body"):
70 body = b""
71 async for chunk in self.stream():
72 body += chunk
73 self._body = body
74 return self._body
75
76 async def json(self):
77 if not hasattr(self, "_json"):
78 body = await self.body()
79 self._json = json.loads(body)
80 return self._json
81
```
Path: `starlette/decorators.py`
Content:
```
1 from starlette.request import Request
2 from starlette.response import Response
3 from starlette.types import ASGIInstance, Receive, Send, Scope
4
5
6 def asgi_application(func):
7 def app(scope: Scope) -> ASGIInstance:
8 async def awaitable(receive: Receive, send: Send) -> None:
9 request = Request(scope, receive)
10 response = func(request)
11 await response(receive, send)
12
13 return awaitable
14
15 return app
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlette/decorators.py b/starlette/decorators.py
--- a/starlette/decorators.py
+++ b/starlette/decorators.py
@@ -5,8 +5,10 @@
def asgi_application(func):
def app(scope: Scope) -> ASGIInstance:
+ request = Request(scope)
+
async def awaitable(receive: Receive, send: Send) -> None:
- request = Request(scope, receive)
+ request.set_receive_channel(receive)
response = func(request)
await response(receive, send)
diff --git a/starlette/request.py b/starlette/request.py
--- a/starlette/request.py
+++ b/starlette/request.py
@@ -1,19 +1,33 @@
from starlette.datastructures import URL, Headers, QueryParams
+from collections.abc import Mapping
import json
+import typing
-class Request:
- def __init__(self, scope, receive):
+class Request(Mapping):
+ def __init__(self, scope, receive=None):
self._scope = scope
self._receive = receive
self._stream_consumed = False
+ def __getitem__(self, key):
+ return self._scope[key]
+
+ def __iter__(self):
+ return iter(self._scope)
+
+ def __len__(self):
+ return len(self._scope)
+
+ def set_receive_channel(self, receive):
+ self._receive = receive
+
@property
- def method(self):
+ def method(self) -> str:
return self._scope["method"]
@property
- def url(self):
+ def url(self) -> URL:
if not hasattr(self, "_url"):
scheme = self._scope["scheme"]
host, port = self._scope["server"]
@@ -32,7 +46,7 @@
return self._url
@property
- def headers(self):
+ def headers(self) -> Headers:
if not hasattr(self, "_headers"):
self._headers = Headers(
[
@@ -43,7 +57,7 @@
return self._headers
@property
- def query_params(self):
+ def query_params(self) -> QueryParams:
if not hasattr(self, "_query_params"):
query_string = self._scope["query_string"].decode()
self._query_params = QueryParams(query_string)
@@ -57,6 +71,9 @@
if self._stream_consumed:
raise RuntimeError("Stream consumed")
+ if self._receive is None:
+ raise RuntimeError("Receive channel has not been made available")
+
self._stream_consumed = True
while True:
message = await self._receive()
|
{"golden_diff": "diff --git a/starlette/decorators.py b/starlette/decorators.py\n--- a/starlette/decorators.py\n+++ b/starlette/decorators.py\n@@ -5,8 +5,10 @@\n \n def asgi_application(func):\n def app(scope: Scope) -> ASGIInstance:\n+ request = Request(scope)\n+\n async def awaitable(receive: Receive, send: Send) -> None:\n- request = Request(scope, receive)\n+ request.set_receive_channel(receive)\n response = func(request)\n await response(receive, send)\n \ndiff --git a/starlette/request.py b/starlette/request.py\n--- a/starlette/request.py\n+++ b/starlette/request.py\n@@ -1,19 +1,33 @@\n from starlette.datastructures import URL, Headers, QueryParams\n+from collections.abc import Mapping\n import json\n+import typing\n \n \n-class Request:\n- def __init__(self, scope, receive):\n+class Request(Mapping):\n+ def __init__(self, scope, receive=None):\n self._scope = scope\n self._receive = receive\n self._stream_consumed = False\n \n+ def __getitem__(self, key):\n+ return self._scope[key]\n+\n+ def __iter__(self):\n+ return iter(self._scope)\n+\n+ def __len__(self):\n+ return len(self._scope)\n+\n+ def set_receive_channel(self, receive):\n+ self._receive = receive\n+\n @property\n- def method(self):\n+ def method(self) -> str:\n return self._scope[\"method\"]\n \n @property\n- def url(self):\n+ def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n scheme = self._scope[\"scheme\"]\n host, port = self._scope[\"server\"]\n@@ -32,7 +46,7 @@\n return self._url\n \n @property\n- def headers(self):\n+ def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(\n [\n@@ -43,7 +57,7 @@\n return self._headers\n \n @property\n- def query_params(self):\n+ def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n query_string = self._scope[\"query_string\"].decode()\n self._query_params = QueryParams(query_string)\n@@ -57,6 +71,9 @@\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n \n+ if self._receive is None:\n+ raise RuntimeError(\"Receive channel has not been made available\")\n+\n self._stream_consumed = True\n while True:\n message = await self._receive()\n", "issue": "Request should present a scope-like interface\nThe `Request` class should present a dict-like interface so that it can be used in the same way as `scope`. Should also allow it to be instantiated without a `receive` channel being set initially.\n", "before_files": [{"content": "from starlette.datastructures import URL, Headers, QueryParams\nimport json\n\n\nclass Request:\n def __init__(self, scope, receive):\n self._scope = scope\n self._receive = receive\n self._stream_consumed = False\n\n @property\n def method(self):\n return self._scope[\"method\"]\n\n @property\n def url(self):\n if not hasattr(self, \"_url\"):\n scheme = self._scope[\"scheme\"]\n host, port = self._scope[\"server\"]\n path = self._scope[\"path\"]\n query_string = self._scope[\"query_string\"]\n\n if (scheme == \"http\" and port != 80) or (scheme == \"https\" and port != 443):\n url = \"%s://%s:%s%s\" % (scheme, host, port, path)\n else:\n url = \"%s://%s%s\" % (scheme, host, path)\n\n if query_string:\n url += \"?\" + query_string.decode()\n\n self._url = URL(url)\n return self._url\n\n @property\n def headers(self):\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(\n [\n (key.decode(), value.decode())\n for key, value in self._scope[\"headers\"]\n ]\n )\n return self._headers\n\n @property\n def query_params(self):\n if not hasattr(self, \"_query_params\"):\n query_string = self._scope[\"query_string\"].decode()\n self._query_params = QueryParams(query_string)\n return self._query_params\n\n async def stream(self):\n if hasattr(self, \"_body\"):\n yield self._body\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n yield message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n break\n\n async def body(self):\n if not hasattr(self, \"_body\"):\n body = b\"\"\n async for chunk in self.stream():\n body += chunk\n self._body = body\n return self._body\n\n async def json(self):\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n", "path": "starlette/request.py"}, {"content": "from starlette.request import Request\nfrom starlette.response import Response\nfrom starlette.types import ASGIInstance, Receive, Send, Scope\n\n\ndef asgi_application(func):\n def app(scope: Scope) -> ASGIInstance:\n async def awaitable(receive: Receive, send: Send) -> None:\n request = Request(scope, receive)\n response = func(request)\n await response(receive, send)\n\n return awaitable\n\n return app\n", "path": "starlette/decorators.py"}], "after_files": [{"content": "from starlette.datastructures import URL, Headers, QueryParams\nfrom collections.abc import Mapping\nimport json\nimport typing\n\n\nclass Request(Mapping):\n def __init__(self, scope, receive=None):\n self._scope = scope\n self._receive = receive\n self._stream_consumed = False\n\n def __getitem__(self, key):\n return self._scope[key]\n\n def __iter__(self):\n return iter(self._scope)\n\n def __len__(self):\n return len(self._scope)\n\n def set_receive_channel(self, receive):\n self._receive = receive\n\n @property\n def method(self) -> str:\n return self._scope[\"method\"]\n\n @property\n def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n scheme = self._scope[\"scheme\"]\n host, port = self._scope[\"server\"]\n path = self._scope[\"path\"]\n query_string = self._scope[\"query_string\"]\n\n if (scheme == \"http\" and port != 80) or (scheme == \"https\" and port != 443):\n url = \"%s://%s:%s%s\" % (scheme, host, port, path)\n else:\n url = \"%s://%s%s\" % (scheme, host, path)\n\n if query_string:\n url += \"?\" + query_string.decode()\n\n self._url = URL(url)\n return self._url\n\n @property\n def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(\n [\n (key.decode(), value.decode())\n for key, value in self._scope[\"headers\"]\n ]\n )\n return self._headers\n\n @property\n def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n query_string = self._scope[\"query_string\"].decode()\n self._query_params = QueryParams(query_string)\n return self._query_params\n\n async def stream(self):\n if hasattr(self, \"_body\"):\n yield self._body\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n if self._receive is None:\n raise RuntimeError(\"Receive channel has not been made available\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n yield message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n break\n\n async def body(self):\n if not hasattr(self, \"_body\"):\n body = b\"\"\n async for chunk in self.stream():\n body += chunk\n self._body = body\n return self._body\n\n async def json(self):\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n", "path": "starlette/request.py"}, {"content": "from starlette.request import Request\nfrom starlette.response import Response\nfrom starlette.types import ASGIInstance, Receive, Send, Scope\n\n\ndef asgi_application(func):\n def app(scope: Scope) -> ASGIInstance:\n request = Request(scope)\n\n async def awaitable(receive: Receive, send: Send) -> None:\n request.set_receive_channel(receive)\n response = func(request)\n await response(receive, send)\n\n return awaitable\n\n return app\n", "path": "starlette/decorators.py"}]}
| 1,126 | 599 |
gh_patches_debug_16899
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmdetection-1099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling'
I have successful install the mmdetection by the command "pip install -v -e .". But I have the problem in the test. Would anyone help me ?
(lab) gpuserver@ubuntu:~/ht/labs/mmdetection-master$ python
Python 3.7.3 (default, Mar 27 2019, 22:11:17)
[GCC 7.3.0] :: Anaconda, Inc. on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from mmdet.apis import init_detector
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/__init__.py", line 2, in <module>
from .inference import inference_detector, init_detector, show_result
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/inference.py", line 9, in <module>
from mmdet.core import get_classes
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/__init__.py", line 1, in <module>
from .anchor import * # noqa: F401, F403
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/__init__.py", line 2, in <module>
from .anchor_target import anchor_inside_flags, anchor_target
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/anchor_target.py", line 3, in <module>
from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/__init__.py", line 1, in <module>
from .assign_sampling import assign_and_sample, build_assigner, build_sampler
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py", line 3, in <module>
from . import assigners, samplers
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/__init__.py", line 2, in <module>
from .combined_sampler import CombinedSampler
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/combined_sampler.py", line 1, in <module>
from ..assign_sampling import build_sampler
ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' (/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/core/bbox/__init__.py`
Content:
```
1 from .assign_sampling import assign_and_sample, build_assigner, build_sampler
2 from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner
3 from .bbox_target import bbox_target
4 from .geometry import bbox_overlaps
5 from .samplers import (BaseSampler, CombinedSampler,
6 InstanceBalancedPosSampler, IoUBalancedNegSampler,
7 PseudoSampler, RandomSampler, SamplingResult)
8 from .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,
9 bbox_mapping, bbox_mapping_back, delta2bbox,
10 distance2bbox, roi2bbox)
11
12 __all__ = [
13 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',
14 'BaseSampler', 'PseudoSampler', 'RandomSampler',
15 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
16 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',
17 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',
18 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',
19 'distance2bbox', 'bbox_target'
20 ]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmdet/core/bbox/__init__.py b/mmdet/core/bbox/__init__.py
--- a/mmdet/core/bbox/__init__.py
+++ b/mmdet/core/bbox/__init__.py
@@ -1,4 +1,3 @@
-from .assign_sampling import assign_and_sample, build_assigner, build_sampler
from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner
from .bbox_target import bbox_target
from .geometry import bbox_overlaps
@@ -9,6 +8,9 @@
bbox_mapping, bbox_mapping_back, delta2bbox,
distance2bbox, roi2bbox)
+from .assign_sampling import ( # isort:skip, avoid recursive imports
+ assign_and_sample, build_assigner, build_sampler)
+
__all__ = [
'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',
'BaseSampler', 'PseudoSampler', 'RandomSampler',
|
{"golden_diff": "diff --git a/mmdet/core/bbox/__init__.py b/mmdet/core/bbox/__init__.py\n--- a/mmdet/core/bbox/__init__.py\n+++ b/mmdet/core/bbox/__init__.py\n@@ -1,4 +1,3 @@\n-from .assign_sampling import assign_and_sample, build_assigner, build_sampler\n from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\n from .bbox_target import bbox_target\n from .geometry import bbox_overlaps\n@@ -9,6 +8,9 @@\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n \n+from .assign_sampling import ( # isort:skip, avoid recursive imports\n+ assign_and_sample, build_assigner, build_sampler)\n+\n __all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n", "issue": "ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' \nI have successful install the mmdetection by the command \"pip install -v -e .\". But I have the problem in the test. Would anyone help me ?\r\n\r\n(lab) gpuserver@ubuntu:~/ht/labs/mmdetection-master$ python\r\nPython 3.7.3 (default, Mar 27 2019, 22:11:17) \r\n[GCC 7.3.0] :: Anaconda, Inc. on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from mmdet.apis import init_detector\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/__init__.py\", line 2, in <module>\r\n from .inference import inference_detector, init_detector, show_result\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/inference.py\", line 9, in <module>\r\n from mmdet.core import get_classes\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/__init__.py\", line 1, in <module>\r\n from .anchor import * # noqa: F401, F403\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/__init__.py\", line 2, in <module>\r\n from .anchor_target import anchor_inside_flags, anchor_target\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/anchor_target.py\", line 3, in <module>\r\n from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/__init__.py\", line 1, in <module>\r\n from .assign_sampling import assign_and_sample, build_assigner, build_sampler\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py\", line 3, in <module>\r\n from . import assigners, samplers\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/__init__.py\", line 2, in <module>\r\n from .combined_sampler import CombinedSampler\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/combined_sampler.py\", line 1, in <module>\r\n from ..assign_sampling import build_sampler\r\nImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' (/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py)\n", "before_files": [{"content": "from .assign_sampling import assign_and_sample, build_assigner, build_sampler\nfrom .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\nfrom .bbox_target import bbox_target\nfrom .geometry import bbox_overlaps\nfrom .samplers import (BaseSampler, CombinedSampler,\n InstanceBalancedPosSampler, IoUBalancedNegSampler,\n PseudoSampler, RandomSampler, SamplingResult)\nfrom .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n\n__all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',\n 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',\n 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',\n 'distance2bbox', 'bbox_target'\n]\n", "path": "mmdet/core/bbox/__init__.py"}], "after_files": [{"content": "from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\nfrom .bbox_target import bbox_target\nfrom .geometry import bbox_overlaps\nfrom .samplers import (BaseSampler, CombinedSampler,\n InstanceBalancedPosSampler, IoUBalancedNegSampler,\n PseudoSampler, RandomSampler, SamplingResult)\nfrom .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n\nfrom .assign_sampling import ( # isort:skip, avoid recursive imports\n assign_and_sample, build_assigner, build_sampler)\n\n__all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',\n 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',\n 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',\n 'distance2bbox', 'bbox_target'\n]\n", "path": "mmdet/core/bbox/__init__.py"}]}
| 1,217 | 217 |
gh_patches_debug_2125
|
rasdani/github-patches
|
git_diff
|
PyGithub__PyGithub-946
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PaginatedList reversed property loses http headers
In reversed(), 'headers' parameter is not passed to PaginatedList(). It makes some APIs not reversible. For example, get_stargazers_with_dates() which requires "Accept: application/vnd.github.v3.star+json" header in the API call.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `github/PaginatedList.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 AKFish <[email protected]> #
8 # Copyright 2013 Bill Mill <[email protected]> #
9 # Copyright 2013 Vincent Jacques <[email protected]> #
10 # Copyright 2013 davidbrai <[email protected]> #
11 # Copyright 2014 Thialfihar <[email protected]> #
12 # Copyright 2014 Vincent Jacques <[email protected]> #
13 # Copyright 2015 Dan Vanderkam <[email protected]> #
14 # Copyright 2015 Eliot Walker <[email protected]> #
15 # Copyright 2016 Peter Buckley <[email protected]> #
16 # Copyright 2017 Jannis Gebauer <[email protected]> #
17 # Copyright 2018 Gilad Shefer <[email protected]> #
18 # Copyright 2018 Joel Koglin <[email protected]> #
19 # Copyright 2018 Wan Liuyang <[email protected]> #
20 # Copyright 2018 sfdye <[email protected]> #
21 # #
22 # This file is part of PyGithub. #
23 # http://pygithub.readthedocs.io/ #
24 # #
25 # PyGithub is free software: you can redistribute it and/or modify it under #
26 # the terms of the GNU Lesser General Public License as published by the Free #
27 # Software Foundation, either version 3 of the License, or (at your option) #
28 # any later version. #
29 # #
30 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
31 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
32 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
33 # details. #
34 # #
35 # You should have received a copy of the GNU Lesser General Public License #
36 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
37 # #
38 ################################################################################
39
40 try:
41 from urllib.parse import parse_qs
42 except ImportError:
43 from urlparse import parse_qs
44
45 import github.GithubObject
46
47
48 class PaginatedListBase:
49 def __init__(self):
50 self.__elements = list()
51
52 def __getitem__(self, index):
53 assert isinstance(index, (int, slice))
54 if isinstance(index, (int, long)):
55 self.__fetchToIndex(index)
56 return self.__elements[index]
57 else:
58 return self._Slice(self, index)
59
60 def __iter__(self):
61 for element in self.__elements:
62 yield element
63 while self._couldGrow():
64 newElements = self._grow()
65 for element in newElements:
66 yield element
67
68 def _isBiggerThan(self, index):
69 return len(self.__elements) > index or self._couldGrow()
70
71 def __fetchToIndex(self, index):
72 while len(self.__elements) <= index and self._couldGrow():
73 self._grow()
74
75 def _grow(self):
76 newElements = self._fetchNextPage()
77 self.__elements += newElements
78 return newElements
79
80 class _Slice:
81 def __init__(self, theList, theSlice):
82 self.__list = theList
83 self.__start = theSlice.start or 0
84 self.__stop = theSlice.stop
85 self.__step = theSlice.step or 1
86
87 def __iter__(self):
88 index = self.__start
89 while not self.__finished(index):
90 if self.__list._isBiggerThan(index):
91 yield self.__list[index]
92 index += self.__step
93 else:
94 return
95
96 def __finished(self, index):
97 return self.__stop is not None and index >= self.__stop
98
99
100 class PaginatedList(PaginatedListBase):
101 """
102 This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.
103
104 You can simply enumerate through instances of this class::
105
106 for repo in user.get_repos():
107 print(repo.name)
108
109 If you want to know the total number of items in the list::
110
111 print(user.get_repos().totalCount)
112 print(len(user.get_repos()))
113
114 You can also index them or take slices::
115
116 second_repo = user.get_repos()[1]
117 first_repos = user.get_repos()[:10]
118
119 If you want to iterate in reversed order, just do::
120
121 for repo in user.get_repos().reversed:
122 print(repo.name)
123
124 And if you really need it, you can explicitly access a specific page::
125
126 some_repos = user.get_repos().get_page(0)
127 some_other_repos = user.get_repos().get_page(3)
128 """
129
130 def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None, list_item="items"):
131 PaginatedListBase.__init__(self)
132 self.__requester = requester
133 self.__contentClass = contentClass
134 self.__firstUrl = firstUrl
135 self.__firstParams = firstParams or ()
136 self.__nextUrl = firstUrl
137 self.__nextParams = firstParams or {}
138 self.__headers = headers
139 self.__list_item = list_item
140 if self.__requester.per_page != 30:
141 self.__nextParams["per_page"] = self.__requester.per_page
142 self._reversed = False
143 self.__totalCount = None
144
145 @property
146 def totalCount(self):
147 if not self.__totalCount:
148 params = {} if self.__nextParams is None else self.__nextParams.copy()
149 # set per_page = 1 so the totalCount is just the number of pages
150 params.update({"per_page": 1})
151 headers, data = self.__requester.requestJsonAndCheck(
152 "GET",
153 self.__firstUrl,
154 parameters=params,
155 headers=self.__headers
156 )
157 if 'link' not in headers:
158 self.__totalCount = len(data) if data else 0
159 else:
160 links = self.__parseLinkHeader(headers)
161 lastUrl = links.get("last")
162 self.__totalCount = int(parse_qs(lastUrl)['page'][0])
163 return self.__totalCount
164
165 def _getLastPageUrl(self):
166 headers, data = self.__requester.requestJsonAndCheck(
167 "GET",
168 self.__firstUrl,
169 parameters=self.__nextParams,
170 headers=self.__headers
171 )
172 links = self.__parseLinkHeader(headers)
173 lastUrl = links.get("last")
174 return lastUrl
175
176 @property
177 def reversed(self):
178 r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
179 r.__reverse()
180 return r
181
182 def __reverse(self):
183 self._reversed = True
184 lastUrl = self._getLastPageUrl()
185 if lastUrl:
186 self.__nextUrl = lastUrl
187
188 def _couldGrow(self):
189 return self.__nextUrl is not None
190
191 def _fetchNextPage(self):
192 headers, data = self.__requester.requestJsonAndCheck(
193 "GET",
194 self.__nextUrl,
195 parameters=self.__nextParams,
196 headers=self.__headers
197 )
198 data = data if data else []
199
200 self.__nextUrl = None
201 if len(data) > 0:
202 links = self.__parseLinkHeader(headers)
203 if self._reversed:
204 if "prev" in links:
205 self.__nextUrl = links["prev"]
206 elif "next" in links:
207 self.__nextUrl = links["next"]
208 self.__nextParams = None
209
210 if self.__list_item in data:
211 self.__totalCount = data.get('total_count')
212 data = data[self.__list_item]
213
214 content = [
215 self.__contentClass(self.__requester, headers, element, completed=False)
216 for element in data if element is not None
217 ]
218 if self._reversed:
219 return content[::-1]
220 return content
221
222 def __parseLinkHeader(self, headers):
223 links = {}
224 if "link" in headers:
225 linkHeaders = headers["link"].split(", ")
226 for linkHeader in linkHeaders:
227 (url, rel) = linkHeader.split("; ")
228 url = url[1:-1]
229 rel = rel[5:-1]
230 links[rel] = url
231 return links
232
233 def get_page(self, page):
234 params = dict(self.__firstParams)
235 if page != 0:
236 params["page"] = page + 1
237 if self.__requester.per_page != 30:
238 params["per_page"] = self.__requester.per_page
239 headers, data = self.__requester.requestJsonAndCheck(
240 "GET",
241 self.__firstUrl,
242 parameters=params,
243 headers=self.__headers
244 )
245
246 if self.__list_item in data:
247 self.__totalCount = data.get('total_count')
248 data = data[self.__list_item]
249
250 return [
251 self.__contentClass(self.__requester, headers, element, completed=False)
252 for element in data
253 ]
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/github/PaginatedList.py b/github/PaginatedList.py
--- a/github/PaginatedList.py
+++ b/github/PaginatedList.py
@@ -175,7 +175,7 @@
@property
def reversed(self):
- r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
+ r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams, self.__headers, self.__list_item)
r.__reverse()
return r
|
{"golden_diff": "diff --git a/github/PaginatedList.py b/github/PaginatedList.py\n--- a/github/PaginatedList.py\n+++ b/github/PaginatedList.py\n@@ -175,7 +175,7 @@\n \n @property\n def reversed(self):\n- r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)\n+ r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams, self.__headers, self.__list_item)\n r.__reverse()\n return r\n", "issue": "PaginatedList reversed property loses http headers\nIn reversed(), 'headers' parameter is not passed to PaginatedList(). It makes some APIs not reversible. For example, get_stargazers_with_dates() which requires \"Accept: application/vnd.github.v3.star+json\" header in the API call.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Bill Mill <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 davidbrai <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Dan Vanderkam <[email protected]> #\n# Copyright 2015 Eliot Walker <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2018 Gilad Shefer <[email protected]> #\n# Copyright 2018 Joel Koglin <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\ntry:\n from urllib.parse import parse_qs\nexcept ImportError:\n from urlparse import parse_qs\n\nimport github.GithubObject\n\n\nclass PaginatedListBase:\n def __init__(self):\n self.__elements = list()\n\n def __getitem__(self, index):\n assert isinstance(index, (int, slice))\n if isinstance(index, (int, long)):\n self.__fetchToIndex(index)\n return self.__elements[index]\n else:\n return self._Slice(self, index)\n\n def __iter__(self):\n for element in self.__elements:\n yield element\n while self._couldGrow():\n newElements = self._grow()\n for element in newElements:\n yield element\n\n def _isBiggerThan(self, index):\n return len(self.__elements) > index or self._couldGrow()\n\n def __fetchToIndex(self, index):\n while len(self.__elements) <= index and self._couldGrow():\n self._grow()\n\n def _grow(self):\n newElements = self._fetchNextPage()\n self.__elements += newElements\n return newElements\n\n class _Slice:\n def __init__(self, theList, theSlice):\n self.__list = theList\n self.__start = theSlice.start or 0\n self.__stop = theSlice.stop\n self.__step = theSlice.step or 1\n\n def __iter__(self):\n index = self.__start\n while not self.__finished(index):\n if self.__list._isBiggerThan(index):\n yield self.__list[index]\n index += self.__step\n else:\n return\n\n def __finished(self, index):\n return self.__stop is not None and index >= self.__stop\n\n\nclass PaginatedList(PaginatedListBase):\n \"\"\"\n This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.\n\n You can simply enumerate through instances of this class::\n\n for repo in user.get_repos():\n print(repo.name)\n\n If you want to know the total number of items in the list::\n\n print(user.get_repos().totalCount)\n print(len(user.get_repos()))\n\n You can also index them or take slices::\n\n second_repo = user.get_repos()[1]\n first_repos = user.get_repos()[:10]\n\n If you want to iterate in reversed order, just do::\n\n for repo in user.get_repos().reversed:\n print(repo.name)\n\n And if you really need it, you can explicitly access a specific page::\n\n some_repos = user.get_repos().get_page(0)\n some_other_repos = user.get_repos().get_page(3)\n \"\"\"\n\n def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None, list_item=\"items\"):\n PaginatedListBase.__init__(self)\n self.__requester = requester\n self.__contentClass = contentClass\n self.__firstUrl = firstUrl\n self.__firstParams = firstParams or ()\n self.__nextUrl = firstUrl\n self.__nextParams = firstParams or {}\n self.__headers = headers\n self.__list_item = list_item\n if self.__requester.per_page != 30:\n self.__nextParams[\"per_page\"] = self.__requester.per_page\n self._reversed = False\n self.__totalCount = None\n\n @property\n def totalCount(self):\n if not self.__totalCount:\n params = {} if self.__nextParams is None else self.__nextParams.copy()\n # set per_page = 1 so the totalCount is just the number of pages\n params.update({\"per_page\": 1})\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n if 'link' not in headers:\n self.__totalCount = len(data) if data else 0\n else:\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n self.__totalCount = int(parse_qs(lastUrl)['page'][0])\n return self.__totalCount\n\n def _getLastPageUrl(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n return lastUrl\n\n @property\n def reversed(self):\n r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)\n r.__reverse()\n return r\n\n def __reverse(self):\n self._reversed = True\n lastUrl = self._getLastPageUrl()\n if lastUrl:\n self.__nextUrl = lastUrl\n\n def _couldGrow(self):\n return self.__nextUrl is not None\n\n def _fetchNextPage(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__nextUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n data = data if data else []\n\n self.__nextUrl = None\n if len(data) > 0:\n links = self.__parseLinkHeader(headers)\n if self._reversed:\n if \"prev\" in links:\n self.__nextUrl = links[\"prev\"]\n elif \"next\" in links:\n self.__nextUrl = links[\"next\"]\n self.__nextParams = None\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n content = [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data if element is not None\n ]\n if self._reversed:\n return content[::-1]\n return content\n\n def __parseLinkHeader(self, headers):\n links = {}\n if \"link\" in headers:\n linkHeaders = headers[\"link\"].split(\", \")\n for linkHeader in linkHeaders:\n (url, rel) = linkHeader.split(\"; \")\n url = url[1:-1]\n rel = rel[5:-1]\n links[rel] = url\n return links\n\n def get_page(self, page):\n params = dict(self.__firstParams)\n if page != 0:\n params[\"page\"] = page + 1\n if self.__requester.per_page != 30:\n params[\"per_page\"] = self.__requester.per_page\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n return [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n ]\n", "path": "github/PaginatedList.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Bill Mill <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 davidbrai <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Dan Vanderkam <[email protected]> #\n# Copyright 2015 Eliot Walker <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2018 Gilad Shefer <[email protected]> #\n# Copyright 2018 Joel Koglin <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\ntry:\n from urllib.parse import parse_qs\nexcept ImportError:\n from urlparse import parse_qs\n\nimport github.GithubObject\n\n\nclass PaginatedListBase:\n def __init__(self):\n self.__elements = list()\n\n def __getitem__(self, index):\n assert isinstance(index, (int, slice))\n if isinstance(index, (int, long)):\n self.__fetchToIndex(index)\n return self.__elements[index]\n else:\n return self._Slice(self, index)\n\n def __iter__(self):\n for element in self.__elements:\n yield element\n while self._couldGrow():\n newElements = self._grow()\n for element in newElements:\n yield element\n\n def _isBiggerThan(self, index):\n return len(self.__elements) > index or self._couldGrow()\n\n def __fetchToIndex(self, index):\n while len(self.__elements) <= index and self._couldGrow():\n self._grow()\n\n def _grow(self):\n newElements = self._fetchNextPage()\n self.__elements += newElements\n return newElements\n\n class _Slice:\n def __init__(self, theList, theSlice):\n self.__list = theList\n self.__start = theSlice.start or 0\n self.__stop = theSlice.stop\n self.__step = theSlice.step or 1\n\n def __iter__(self):\n index = self.__start\n while not self.__finished(index):\n if self.__list._isBiggerThan(index):\n yield self.__list[index]\n index += self.__step\n else:\n return\n\n def __finished(self, index):\n return self.__stop is not None and index >= self.__stop\n\n\nclass PaginatedList(PaginatedListBase):\n \"\"\"\n This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.\n\n You can simply enumerate through instances of this class::\n\n for repo in user.get_repos():\n print(repo.name)\n\n If you want to know the total number of items in the list::\n\n print(user.get_repos().totalCount)\n print(len(user.get_repos()))\n\n You can also index them or take slices::\n\n second_repo = user.get_repos()[1]\n first_repos = user.get_repos()[:10]\n\n If you want to iterate in reversed order, just do::\n\n for repo in user.get_repos().reversed:\n print(repo.name)\n\n And if you really need it, you can explicitly access a specific page::\n\n some_repos = user.get_repos().get_page(0)\n some_other_repos = user.get_repos().get_page(3)\n \"\"\"\n\n def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None, list_item=\"items\"):\n PaginatedListBase.__init__(self)\n self.__requester = requester\n self.__contentClass = contentClass\n self.__firstUrl = firstUrl\n self.__firstParams = firstParams or ()\n self.__nextUrl = firstUrl\n self.__nextParams = firstParams or {}\n self.__headers = headers\n self.__list_item = list_item\n if self.__requester.per_page != 30:\n self.__nextParams[\"per_page\"] = self.__requester.per_page\n self._reversed = False\n self.__totalCount = None\n\n @property\n def totalCount(self):\n if not self.__totalCount:\n params = {} if self.__nextParams is None else self.__nextParams.copy()\n # set per_page = 1 so the totalCount is just the number of pages\n params.update({\"per_page\": 1})\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n if 'link' not in headers:\n self.__totalCount = len(data) if data else 0\n else:\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n self.__totalCount = int(parse_qs(lastUrl)['page'][0])\n return self.__totalCount\n\n def _getLastPageUrl(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n return lastUrl\n\n @property\n def reversed(self):\n r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams, self.__headers, self.__list_item)\n r.__reverse()\n return r\n\n def __reverse(self):\n self._reversed = True\n lastUrl = self._getLastPageUrl()\n if lastUrl:\n self.__nextUrl = lastUrl\n\n def _couldGrow(self):\n return self.__nextUrl is not None\n\n def _fetchNextPage(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__nextUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n data = data if data else []\n\n self.__nextUrl = None\n if len(data) > 0:\n links = self.__parseLinkHeader(headers)\n if self._reversed:\n if \"prev\" in links:\n self.__nextUrl = links[\"prev\"]\n elif \"next\" in links:\n self.__nextUrl = links[\"next\"]\n self.__nextParams = None\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n content = [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data if element is not None\n ]\n if self._reversed:\n return content[::-1]\n return content\n\n def __parseLinkHeader(self, headers):\n links = {}\n if \"link\" in headers:\n linkHeaders = headers[\"link\"].split(\", \")\n for linkHeader in linkHeaders:\n (url, rel) = linkHeader.split(\"; \")\n url = url[1:-1]\n rel = rel[5:-1]\n links[rel] = url\n return links\n\n def get_page(self, page):\n params = dict(self.__firstParams)\n if page != 0:\n params[\"page\"] = page + 1\n if self.__requester.per_page != 30:\n params[\"per_page\"] = self.__requester.per_page\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n return [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n ]\n", "path": "github/PaginatedList.py"}]}
| 3,072 | 132 |
gh_patches_debug_2947
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-878
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't create Invites
**Describe the bug**
When creating a new invite, the following appears:

Rest of the page is blank.
It appeared since the last update I did a few days ago (don't know at which commit exactly, sorry) and didn't change with the last one.
**Additional context**
It doesn't matter what I set for Expiry and Use limit.
Also, there's an invite in the list that has "Max uses: None" that I'm not sure where it comes from.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/forms.py`
Content:
```
1 """ using django model forms """
2 import datetime
3 from collections import defaultdict
4
5 from django import forms
6 from django.forms import ModelForm, PasswordInput, widgets
7 from django.forms.widgets import Textarea
8 from django.utils import timezone
9 from django.utils.translation import gettext_lazy as _
10
11 from bookwyrm import models
12
13
14 class CustomForm(ModelForm):
15 """ add css classes to the forms """
16
17 def __init__(self, *args, **kwargs):
18 css_classes = defaultdict(lambda: "")
19 css_classes["text"] = "input"
20 css_classes["password"] = "input"
21 css_classes["email"] = "input"
22 css_classes["number"] = "input"
23 css_classes["checkbox"] = "checkbox"
24 css_classes["textarea"] = "textarea"
25 super(CustomForm, self).__init__(*args, **kwargs)
26 for visible in self.visible_fields():
27 if hasattr(visible.field.widget, "input_type"):
28 input_type = visible.field.widget.input_type
29 if isinstance(visible.field.widget, Textarea):
30 input_type = "textarea"
31 visible.field.widget.attrs["cols"] = None
32 visible.field.widget.attrs["rows"] = None
33 visible.field.widget.attrs["class"] = css_classes[input_type]
34
35
36 # pylint: disable=missing-class-docstring
37 class LoginForm(CustomForm):
38 class Meta:
39 model = models.User
40 fields = ["localname", "password"]
41 help_texts = {f: None for f in fields}
42 widgets = {
43 "password": PasswordInput(),
44 }
45
46
47 class RegisterForm(CustomForm):
48 class Meta:
49 model = models.User
50 fields = ["localname", "email", "password"]
51 help_texts = {f: None for f in fields}
52 widgets = {"password": PasswordInput()}
53
54
55 class RatingForm(CustomForm):
56 class Meta:
57 model = models.ReviewRating
58 fields = ["user", "book", "rating", "privacy"]
59
60
61 class ReviewForm(CustomForm):
62 class Meta:
63 model = models.Review
64 fields = [
65 "user",
66 "book",
67 "name",
68 "content",
69 "rating",
70 "content_warning",
71 "sensitive",
72 "privacy",
73 ]
74
75
76 class CommentForm(CustomForm):
77 class Meta:
78 model = models.Comment
79 fields = [
80 "user",
81 "book",
82 "content",
83 "content_warning",
84 "sensitive",
85 "privacy",
86 "progress",
87 "progress_mode",
88 ]
89
90
91 class QuotationForm(CustomForm):
92 class Meta:
93 model = models.Quotation
94 fields = [
95 "user",
96 "book",
97 "quote",
98 "content",
99 "content_warning",
100 "sensitive",
101 "privacy",
102 ]
103
104
105 class ReplyForm(CustomForm):
106 class Meta:
107 model = models.Status
108 fields = [
109 "user",
110 "content",
111 "content_warning",
112 "sensitive",
113 "reply_parent",
114 "privacy",
115 ]
116
117
118 class StatusForm(CustomForm):
119 class Meta:
120 model = models.Status
121 fields = ["user", "content", "content_warning", "sensitive", "privacy"]
122
123
124 class EditUserForm(CustomForm):
125 class Meta:
126 model = models.User
127 fields = [
128 "avatar",
129 "name",
130 "email",
131 "summary",
132 "show_goal",
133 "manually_approves_followers",
134 "discoverable",
135 "preferred_timezone",
136 ]
137 help_texts = {f: None for f in fields}
138
139
140 class LimitedEditUserForm(CustomForm):
141 class Meta:
142 model = models.User
143 fields = [
144 "avatar",
145 "name",
146 "summary",
147 "manually_approves_followers",
148 "discoverable",
149 ]
150 help_texts = {f: None for f in fields}
151
152
153 class TagForm(CustomForm):
154 class Meta:
155 model = models.Tag
156 fields = ["name"]
157 help_texts = {f: None for f in fields}
158 labels = {"name": "Add a tag"}
159
160
161 class CoverForm(CustomForm):
162 class Meta:
163 model = models.Book
164 fields = ["cover"]
165 help_texts = {f: None for f in fields}
166
167
168 class EditionForm(CustomForm):
169 class Meta:
170 model = models.Edition
171 exclude = [
172 "remote_id",
173 "origin_id",
174 "created_date",
175 "updated_date",
176 "edition_rank",
177 "authors",
178 "parent_work",
179 "shelves",
180 "subjects", # TODO
181 "subject_places", # TODO
182 "connector",
183 ]
184
185
186 class AuthorForm(CustomForm):
187 class Meta:
188 model = models.Author
189 exclude = [
190 "remote_id",
191 "origin_id",
192 "created_date",
193 "updated_date",
194 ]
195
196
197 class ImportForm(forms.Form):
198 csv_file = forms.FileField()
199
200
201 class ExpiryWidget(widgets.Select):
202 def value_from_datadict(self, data, files, name):
203 """ human-readable exiration time buckets """
204 selected_string = super().value_from_datadict(data, files, name)
205
206 if selected_string == "day":
207 interval = datetime.timedelta(days=1)
208 elif selected_string == "week":
209 interval = datetime.timedelta(days=7)
210 elif selected_string == "month":
211 interval = datetime.timedelta(days=31) # Close enough?
212 elif selected_string == "forever":
213 return None
214 else:
215 return selected_string # "This will raise
216
217 return timezone.now() + interval
218
219
220 class InviteRequestForm(CustomForm):
221 def clean(self):
222 """ make sure the email isn't in use by a registered user """
223 cleaned_data = super().clean()
224 email = cleaned_data.get("email")
225 if email and models.User.objects.filter(email=email).exists():
226 self.add_error("email", _("A user with this email already exists."))
227
228 class Meta:
229 model = models.InviteRequest
230 fields = ["email"]
231
232
233 class CreateInviteForm(CustomForm):
234 class Meta:
235 model = models.SiteInvite
236 exclude = ["code", "user", "times_used"]
237 widgets = {
238 "expiry": ExpiryWidget(
239 choices=[
240 ("day", _("One Day")),
241 ("week", _("One Week")),
242 ("month", _("One Month")),
243 ("forever", _("Does Not Expire")),
244 ]
245 ),
246 "use_limit": widgets.Select(
247 choices=[
248 (i, _("%(count)d uses" % {"count": i}))
249 for i in [1, 5, 10, 25, 50, 100]
250 ]
251 + [(None, _("Unlimited"))]
252 ),
253 }
254
255
256 class ShelfForm(CustomForm):
257 class Meta:
258 model = models.Shelf
259 fields = ["user", "name", "privacy"]
260
261
262 class GoalForm(CustomForm):
263 class Meta:
264 model = models.AnnualGoal
265 fields = ["user", "year", "goal", "privacy"]
266
267
268 class SiteForm(CustomForm):
269 class Meta:
270 model = models.SiteSettings
271 exclude = []
272
273
274 class ListForm(CustomForm):
275 class Meta:
276 model = models.List
277 fields = ["user", "name", "description", "curation", "privacy"]
278
279
280 class ReportForm(CustomForm):
281 class Meta:
282 model = models.Report
283 fields = ["user", "reporter", "statuses", "note"]
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/forms.py b/bookwyrm/forms.py
--- a/bookwyrm/forms.py
+++ b/bookwyrm/forms.py
@@ -233,7 +233,7 @@
class CreateInviteForm(CustomForm):
class Meta:
model = models.SiteInvite
- exclude = ["code", "user", "times_used"]
+ exclude = ["code", "user", "times_used", "invitees"]
widgets = {
"expiry": ExpiryWidget(
choices=[
|
{"golden_diff": "diff --git a/bookwyrm/forms.py b/bookwyrm/forms.py\n--- a/bookwyrm/forms.py\n+++ b/bookwyrm/forms.py\n@@ -233,7 +233,7 @@\n class CreateInviteForm(CustomForm):\n class Meta:\n model = models.SiteInvite\n- exclude = [\"code\", \"user\", \"times_used\"]\n+ exclude = [\"code\", \"user\", \"times_used\", \"invitees\"]\n widgets = {\n \"expiry\": ExpiryWidget(\n choices=[\n", "issue": "Can't create Invites\n**Describe the bug**\r\nWhen creating a new invite, the following appears:\r\n\r\nRest of the page is blank.\r\n\r\nIt appeared since the last update I did a few days ago (don't know at which commit exactly, sorry) and didn't change with the last one.\r\n\r\n**Additional context**\r\nIt doesn't matter what I set for Expiry and Use limit.\r\nAlso, there's an invite in the list that has \"Max uses: None\" that I'm not sure where it comes from.\r\n\n", "before_files": [{"content": "\"\"\" using django model forms \"\"\"\nimport datetime\nfrom collections import defaultdict\n\nfrom django import forms\nfrom django.forms import ModelForm, PasswordInput, widgets\nfrom django.forms.widgets import Textarea\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\n\n\nclass CustomForm(ModelForm):\n \"\"\" add css classes to the forms \"\"\"\n\n def __init__(self, *args, **kwargs):\n css_classes = defaultdict(lambda: \"\")\n css_classes[\"text\"] = \"input\"\n css_classes[\"password\"] = \"input\"\n css_classes[\"email\"] = \"input\"\n css_classes[\"number\"] = \"input\"\n css_classes[\"checkbox\"] = \"checkbox\"\n css_classes[\"textarea\"] = \"textarea\"\n super(CustomForm, self).__init__(*args, **kwargs)\n for visible in self.visible_fields():\n if hasattr(visible.field.widget, \"input_type\"):\n input_type = visible.field.widget.input_type\n if isinstance(visible.field.widget, Textarea):\n input_type = \"textarea\"\n visible.field.widget.attrs[\"cols\"] = None\n visible.field.widget.attrs[\"rows\"] = None\n visible.field.widget.attrs[\"class\"] = css_classes[input_type]\n\n\n# pylint: disable=missing-class-docstring\nclass LoginForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"password\": PasswordInput(),\n }\n\n\nclass RegisterForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"email\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\"password\": PasswordInput()}\n\n\nclass RatingForm(CustomForm):\n class Meta:\n model = models.ReviewRating\n fields = [\"user\", \"book\", \"rating\", \"privacy\"]\n\n\nclass ReviewForm(CustomForm):\n class Meta:\n model = models.Review\n fields = [\n \"user\",\n \"book\",\n \"name\",\n \"content\",\n \"rating\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass CommentForm(CustomForm):\n class Meta:\n model = models.Comment\n fields = [\n \"user\",\n \"book\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n \"progress\",\n \"progress_mode\",\n ]\n\n\nclass QuotationForm(CustomForm):\n class Meta:\n model = models.Quotation\n fields = [\n \"user\",\n \"book\",\n \"quote\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass ReplyForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\n \"user\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"reply_parent\",\n \"privacy\",\n ]\n\n\nclass StatusForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\"user\", \"content\", \"content_warning\", \"sensitive\", \"privacy\"]\n\n\nclass EditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"email\",\n \"summary\",\n \"show_goal\",\n \"manually_approves_followers\",\n \"discoverable\",\n \"preferred_timezone\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass LimitedEditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"summary\",\n \"manually_approves_followers\",\n \"discoverable\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass TagForm(CustomForm):\n class Meta:\n model = models.Tag\n fields = [\"name\"]\n help_texts = {f: None for f in fields}\n labels = {\"name\": \"Add a tag\"}\n\n\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n \"edition_rank\",\n \"authors\",\n \"parent_work\",\n \"shelves\",\n \"subjects\", # TODO\n \"subject_places\", # TODO\n \"connector\",\n ]\n\n\nclass AuthorForm(CustomForm):\n class Meta:\n model = models.Author\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n ]\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ExpiryWidget(widgets.Select):\n def value_from_datadict(self, data, files, name):\n \"\"\" human-readable exiration time buckets \"\"\"\n selected_string = super().value_from_datadict(data, files, name)\n\n if selected_string == \"day\":\n interval = datetime.timedelta(days=1)\n elif selected_string == \"week\":\n interval = datetime.timedelta(days=7)\n elif selected_string == \"month\":\n interval = datetime.timedelta(days=31) # Close enough?\n elif selected_string == \"forever\":\n return None\n else:\n return selected_string # \"This will raise\n\n return timezone.now() + interval\n\n\nclass InviteRequestForm(CustomForm):\n def clean(self):\n \"\"\" make sure the email isn't in use by a registered user \"\"\"\n cleaned_data = super().clean()\n email = cleaned_data.get(\"email\")\n if email and models.User.objects.filter(email=email).exists():\n self.add_error(\"email\", _(\"A user with this email already exists.\"))\n\n class Meta:\n model = models.InviteRequest\n fields = [\"email\"]\n\n\nclass CreateInviteForm(CustomForm):\n class Meta:\n model = models.SiteInvite\n exclude = [\"code\", \"user\", \"times_used\"]\n widgets = {\n \"expiry\": ExpiryWidget(\n choices=[\n (\"day\", _(\"One Day\")),\n (\"week\", _(\"One Week\")),\n (\"month\", _(\"One Month\")),\n (\"forever\", _(\"Does Not Expire\")),\n ]\n ),\n \"use_limit\": widgets.Select(\n choices=[\n (i, _(\"%(count)d uses\" % {\"count\": i}))\n for i in [1, 5, 10, 25, 50, 100]\n ]\n + [(None, _(\"Unlimited\"))]\n ),\n }\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass SiteForm(CustomForm):\n class Meta:\n model = models.SiteSettings\n exclude = []\n\n\nclass ListForm(CustomForm):\n class Meta:\n model = models.List\n fields = [\"user\", \"name\", \"description\", \"curation\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"statuses\", \"note\"]\n", "path": "bookwyrm/forms.py"}], "after_files": [{"content": "\"\"\" using django model forms \"\"\"\nimport datetime\nfrom collections import defaultdict\n\nfrom django import forms\nfrom django.forms import ModelForm, PasswordInput, widgets\nfrom django.forms.widgets import Textarea\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\n\n\nclass CustomForm(ModelForm):\n \"\"\" add css classes to the forms \"\"\"\n\n def __init__(self, *args, **kwargs):\n css_classes = defaultdict(lambda: \"\")\n css_classes[\"text\"] = \"input\"\n css_classes[\"password\"] = \"input\"\n css_classes[\"email\"] = \"input\"\n css_classes[\"number\"] = \"input\"\n css_classes[\"checkbox\"] = \"checkbox\"\n css_classes[\"textarea\"] = \"textarea\"\n super(CustomForm, self).__init__(*args, **kwargs)\n for visible in self.visible_fields():\n if hasattr(visible.field.widget, \"input_type\"):\n input_type = visible.field.widget.input_type\n if isinstance(visible.field.widget, Textarea):\n input_type = \"textarea\"\n visible.field.widget.attrs[\"cols\"] = None\n visible.field.widget.attrs[\"rows\"] = None\n visible.field.widget.attrs[\"class\"] = css_classes[input_type]\n\n\n# pylint: disable=missing-class-docstring\nclass LoginForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"password\": PasswordInput(),\n }\n\n\nclass RegisterForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"email\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\"password\": PasswordInput()}\n\n\nclass RatingForm(CustomForm):\n class Meta:\n model = models.ReviewRating\n fields = [\"user\", \"book\", \"rating\", \"privacy\"]\n\n\nclass ReviewForm(CustomForm):\n class Meta:\n model = models.Review\n fields = [\n \"user\",\n \"book\",\n \"name\",\n \"content\",\n \"rating\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass CommentForm(CustomForm):\n class Meta:\n model = models.Comment\n fields = [\n \"user\",\n \"book\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n \"progress\",\n \"progress_mode\",\n ]\n\n\nclass QuotationForm(CustomForm):\n class Meta:\n model = models.Quotation\n fields = [\n \"user\",\n \"book\",\n \"quote\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass ReplyForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\n \"user\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"reply_parent\",\n \"privacy\",\n ]\n\n\nclass StatusForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\"user\", \"content\", \"content_warning\", \"sensitive\", \"privacy\"]\n\n\nclass EditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"email\",\n \"summary\",\n \"show_goal\",\n \"manually_approves_followers\",\n \"discoverable\",\n \"preferred_timezone\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass LimitedEditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"summary\",\n \"manually_approves_followers\",\n \"discoverable\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass TagForm(CustomForm):\n class Meta:\n model = models.Tag\n fields = [\"name\"]\n help_texts = {f: None for f in fields}\n labels = {\"name\": \"Add a tag\"}\n\n\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n \"edition_rank\",\n \"authors\",\n \"parent_work\",\n \"shelves\",\n \"subjects\", # TODO\n \"subject_places\", # TODO\n \"connector\",\n ]\n\n\nclass AuthorForm(CustomForm):\n class Meta:\n model = models.Author\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n ]\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ExpiryWidget(widgets.Select):\n def value_from_datadict(self, data, files, name):\n \"\"\" human-readable exiration time buckets \"\"\"\n selected_string = super().value_from_datadict(data, files, name)\n\n if selected_string == \"day\":\n interval = datetime.timedelta(days=1)\n elif selected_string == \"week\":\n interval = datetime.timedelta(days=7)\n elif selected_string == \"month\":\n interval = datetime.timedelta(days=31) # Close enough?\n elif selected_string == \"forever\":\n return None\n else:\n return selected_string # \"This will raise\n\n return timezone.now() + interval\n\n\nclass InviteRequestForm(CustomForm):\n def clean(self):\n \"\"\" make sure the email isn't in use by a registered user \"\"\"\n cleaned_data = super().clean()\n email = cleaned_data.get(\"email\")\n if email and models.User.objects.filter(email=email).exists():\n self.add_error(\"email\", _(\"A user with this email already exists.\"))\n\n class Meta:\n model = models.InviteRequest\n fields = [\"email\"]\n\n\nclass CreateInviteForm(CustomForm):\n class Meta:\n model = models.SiteInvite\n exclude = [\"code\", \"user\", \"times_used\", \"invitees\"]\n widgets = {\n \"expiry\": ExpiryWidget(\n choices=[\n (\"day\", _(\"One Day\")),\n (\"week\", _(\"One Week\")),\n (\"month\", _(\"One Month\")),\n (\"forever\", _(\"Does Not Expire\")),\n ]\n ),\n \"use_limit\": widgets.Select(\n choices=[\n (i, _(\"%(count)d uses\" % {\"count\": i}))\n for i in [1, 5, 10, 25, 50, 100]\n ]\n + [(None, _(\"Unlimited\"))]\n ),\n }\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass SiteForm(CustomForm):\n class Meta:\n model = models.SiteSettings\n exclude = []\n\n\nclass ListForm(CustomForm):\n class Meta:\n model = models.List\n fields = [\"user\", \"name\", \"description\", \"curation\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"statuses\", \"note\"]\n", "path": "bookwyrm/forms.py"}]}
| 2,813 | 112 |
gh_patches_debug_35992
|
rasdani/github-patches
|
git_diff
|
safe-global__safe-config-service-8
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Safe App Provider information
The service should (optionally) include information about the provider of the application.
The provider information should have at least the following fields:
```json
{
"name" : <string>,
"url": <string>
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/safe_apps/admin.py`
Content:
```
1 from django.contrib import admin
2
3 from .models import SafeApp
4
5 admin.site.register(SafeApp)
6
```
Path: `src/safe_apps/models.py`
Content:
```
1 from django.contrib.postgres.fields import ArrayField
2 from django.db import models
3
4
5 class SafeApp(models.Model):
6 url = models.URLField(primary_key=True)
7 name = models.CharField(max_length=200)
8 icon_url = models.URLField()
9 description = models.CharField(max_length=200)
10 networks = ArrayField(models.IntegerField())
11
```
Path: `src/safe_apps/migrations/0001_initial.py`
Content:
```
1 # Generated by Django 3.2 on 2021-04-28 13:39
2
3 import django.contrib.postgres.fields
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 initial = True
10
11 dependencies = [
12 ]
13
14 operations = [
15 migrations.CreateModel(
16 name='SafeApp',
17 fields=[
18 ('url', models.URLField(primary_key=True, serialize=False)),
19 ('name', models.CharField(max_length=200)),
20 ('icon_url', models.URLField()),
21 ('description', models.CharField(max_length=200)),
22 ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
23 ],
24 ),
25 ]
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/safe_apps/admin.py b/src/safe_apps/admin.py
--- a/src/safe_apps/admin.py
+++ b/src/safe_apps/admin.py
@@ -1,5 +1,6 @@
from django.contrib import admin
-from .models import SafeApp
+from .models import SafeApp, Provider
-admin.site.register(SafeApp)
+models = [SafeApp, Provider]
+admin.site.register(models)
diff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py
--- a/src/safe_apps/migrations/0001_initial.py
+++ b/src/safe_apps/migrations/0001_initial.py
@@ -1,7 +1,8 @@
-# Generated by Django 3.2 on 2021-04-28 13:39
+# Generated by Django 3.2 on 2021-04-29 08:22
import django.contrib.postgres.fields
from django.db import migrations, models
+import django.db.models.deletion
class Migration(migrations.Migration):
@@ -12,6 +13,13 @@
]
operations = [
+ migrations.CreateModel(
+ name='Provider',
+ fields=[
+ ('url', models.URLField(primary_key=True, serialize=False)),
+ ('name', models.CharField(max_length=200)),
+ ],
+ ),
migrations.CreateModel(
name='SafeApp',
fields=[
@@ -20,6 +28,7 @@
('icon_url', models.URLField()),
('description', models.CharField(max_length=200)),
('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
+ ('provider', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='safe_apps.provider')),
],
),
]
diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py
--- a/src/safe_apps/models.py
+++ b/src/safe_apps/models.py
@@ -2,9 +2,21 @@
from django.db import models
+class Provider(models.Model):
+ url = models.URLField(primary_key=True)
+ name = models.CharField(max_length=200)
+
+ def __str__(self):
+ return f'{self.name} | {self.url}'
+
+
class SafeApp(models.Model):
url = models.URLField(primary_key=True)
name = models.CharField(max_length=200)
icon_url = models.URLField()
description = models.CharField(max_length=200)
networks = ArrayField(models.IntegerField())
+ provider = models.ForeignKey(Provider, null=True, on_delete=models.SET_NULL)
+
+ def __str__(self):
+ return f'{self.name} | {self.url} | networks={self.networks}'
|
{"golden_diff": "diff --git a/src/safe_apps/admin.py b/src/safe_apps/admin.py\n--- a/src/safe_apps/admin.py\n+++ b/src/safe_apps/admin.py\n@@ -1,5 +1,6 @@\n from django.contrib import admin\n \n-from .models import SafeApp\n+from .models import SafeApp, Provider\n \n-admin.site.register(SafeApp)\n+models = [SafeApp, Provider]\n+admin.site.register(models)\ndiff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py\n--- a/src/safe_apps/migrations/0001_initial.py\n+++ b/src/safe_apps/migrations/0001_initial.py\n@@ -1,7 +1,8 @@\n-# Generated by Django 3.2 on 2021-04-28 13:39\n+# Generated by Django 3.2 on 2021-04-29 08:22\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n+import django.db.models.deletion\n \n \n class Migration(migrations.Migration):\n@@ -12,6 +13,13 @@\n ]\n \n operations = [\n+ migrations.CreateModel(\n+ name='Provider',\n+ fields=[\n+ ('url', models.URLField(primary_key=True, serialize=False)),\n+ ('name', models.CharField(max_length=200)),\n+ ],\n+ ),\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n@@ -20,6 +28,7 @@\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n+ ('provider', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='safe_apps.provider')),\n ],\n ),\n ]\ndiff --git a/src/safe_apps/models.py b/src/safe_apps/models.py\n--- a/src/safe_apps/models.py\n+++ b/src/safe_apps/models.py\n@@ -2,9 +2,21 @@\n from django.db import models\n \n \n+class Provider(models.Model):\n+ url = models.URLField(primary_key=True)\n+ name = models.CharField(max_length=200)\n+\n+ def __str__(self):\n+ return f'{self.name} | {self.url}'\n+\n+\n class SafeApp(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n+ provider = models.ForeignKey(Provider, null=True, on_delete=models.SET_NULL)\n+\n+ def __str__(self):\n+ return f'{self.name} | {self.url} | networks={self.networks}'\n", "issue": "Add Safe App Provider information\nThe service should (optionally) include information about the provider of the application.\r\n\r\nThe provider information should have at least the following fields:\r\n\r\n```json\r\n{\r\n \"name\" : <string>,\r\n \"url\": <string>\r\n}\r\n```\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import SafeApp\n\nadmin.site.register(SafeApp)\n", "path": "src/safe_apps/admin.py"}, {"content": "from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\n\nclass SafeApp(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n", "path": "src/safe_apps/models.py"}, {"content": "# Generated by Django 3.2 on 2021-04-28 13:39\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n ],\n ),\n ]\n", "path": "src/safe_apps/migrations/0001_initial.py"}], "after_files": [{"content": "from django.contrib import admin\n\nfrom .models import SafeApp, Provider\n\nmodels = [SafeApp, Provider]\nadmin.site.register(models)\n", "path": "src/safe_apps/admin.py"}, {"content": "from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\n\nclass Provider(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n\n def __str__(self):\n return f'{self.name} | {self.url}'\n\n\nclass SafeApp(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n provider = models.ForeignKey(Provider, null=True, on_delete=models.SET_NULL)\n\n def __str__(self):\n return f'{self.name} | {self.url} | networks={self.networks}'\n", "path": "src/safe_apps/models.py"}, {"content": "# Generated by Django 3.2 on 2021-04-29 08:22\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Provider',\n fields=[\n ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n ('provider', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='safe_apps.provider')),\n ],\n ),\n ]\n", "path": "src/safe_apps/migrations/0001_initial.py"}]}
| 674 | 634 |
gh_patches_debug_35623
|
rasdani/github-patches
|
git_diff
|
pfnet__pytorch-pfn-extras-395
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Merge `_Engine` with `Trainer`
Now that only class inheriting `ppe.engine._Engine` is `ppe.training.Trainer`, we can merge these classes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_pfn_extras/training/_trainer.py`
Content:
```
1 # mypy: ignore-errors
2
3 import queue
4 import time
5 import warnings
6 from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, TYPE_CHECKING
7
8 import torch
9
10 from pytorch_pfn_extras import training
11 from pytorch_pfn_extras.training import extension as extension
12 from pytorch_pfn_extras.training import trigger as trigger_module
13 import pytorch_pfn_extras.reporting as reporting
14 from pytorch_pfn_extras.profiler import record
15
16 from pytorch_pfn_extras.training._manager_protocol import ExtensionsManagerProtocol
17 from pytorch_pfn_extras.training.trigger import Trigger, TriggerLike
18
19 if TYPE_CHECKING:
20 from pytorch_pfn_extras import handler as handler_module
21 from pytorch_pfn_extras.training._evaluator import Evaluator
22 from pytorch_pfn_extras.profiler._time_summary import _ReportNotification
23
24
25 class _Engine:
26 def __init__(
27 self,
28 handler: 'handler_module.BaseHandler',
29 models: Union[torch.nn.Module, Dict[str, torch.nn.Module]],
30 **kwargs: Any,
31 ) -> None:
32 self.handler = handler
33 self._manager: Optional['training.ExtensionsManager'] = None
34
35 # The followings are used when setting up a manager instance
36 if not isinstance(models, dict):
37 if not isinstance(models, torch.nn.Module):
38 raise ValueError(
39 'model must be an instance of dict or toch.nn.Module')
40 self._models = {'main': models}
41 else:
42 self._models = models
43 self._kwargs = kwargs
44 self._extensions: List[ # list of (args, kwargs)
45 Tuple[Tuple['training.Extension', Optional[str],
46 'TriggerLike', Optional[int]],
47 Dict[str, Any]]] = []
48 self._manager_state: Optional[Dict[str, Any]] = None
49
50 def extend(
51 self,
52 extension: 'training.Extension',
53 name: Optional[str] = None,
54 trigger: 'TriggerLike' = None,
55 priority: Optional[int] = None,
56 *,
57 call_before_training: bool = False,
58 **kwargs: Any,
59 ) -> None:
60 if self._manager is not None:
61 raise RuntimeError('cannot extend after starting the engine')
62 self._extensions.append(
63 ((extension, name, trigger, priority),
64 dict(call_before_training=call_before_training, **kwargs)))
65
66 def _setup_manager(self, iters_per_epoch: int) -> 'training.ExtensionsManager':
67 from pytorch_pfn_extras.training import ExtensionsManager
68 self._manager = ExtensionsManager(
69 self._models, iters_per_epoch=iters_per_epoch, **self._kwargs)
70 for ex_args, ex_kwargs in self._extensions:
71 self._manager.extend(*ex_args, **ex_kwargs)
72 if self._manager_state is not None:
73 self.manager.load_state_dict(self._manager_state)
74 return self._manager
75
76 @property
77 def manager(self) -> 'training.ExtensionsManager':
78 if self._manager is None:
79 raise RuntimeError('the engine is not started yet')
80 return self._manager
81
82 @property
83 def models(self) -> Dict[str, torch.nn.Module]:
84 # TODO(kmaehashi): do we need this convenient interface for handlers?
85 return self.manager.raw_models
86
87 @property
88 def optimizers(self) -> Dict[str, torch.optim.Optimizer]:
89 return self.manager.optimizers
90
91 def state_dict(self) -> Dict[str, Any]:
92 return self.manager.state_dict()
93
94 def load_state_dict(self, to_load: Dict[str, Any]) -> None:
95 if self._manager is None:
96 self._manager_state = to_load
97 return
98 self.manager.load_state_dict(to_load)
99
100 def run(self, *args: Any, **kwargs: Any) -> None:
101 raise NotImplementedError
102
103
104 class Trainer(_Engine):
105 def __init__(
106 self,
107 handler: 'handler_module.BaseHandler',
108 *,
109 evaluator: Optional[Union['Evaluator', Tuple['Evaluator', TriggerLike]]],
110 **kwargs: Any,
111 ):
112 super().__init__(handler, **kwargs)
113 if isinstance(evaluator, tuple):
114 self.evaluator: Optional['Evaluator'] = None
115 self.evaluator, trigger = evaluator
116 self.evaluator_trigger = trigger_module.get_trigger(trigger)
117 else:
118 self.evaluator = evaluator
119 self.evaluator_trigger = trigger_module.get_trigger((1, 'epoch'))
120 self.val_loader = None
121
122 @property
123 def epoch(self) -> int:
124 return self.manager.epoch
125
126 @property
127 def epoch_detail(self) -> float:
128 return self.manager.epoch_detail
129
130 @property
131 def iteration(self) -> int:
132 return self.manager.iteration
133
134 @property
135 def is_before_training(self) -> bool:
136 return self.manager.iteration == 0
137
138 @property
139 def stop_trigger(self) -> Trigger:
140 return self._stop_trigger
141
142 @stop_trigger.setter
143 def stop_trigger(self, trigger: Trigger) -> None:
144 self._stop_trigger = trigger
145
146 def get_optimizer(self, name: str) -> torch.optim.Optimizer:
147 return self.manager.optimizers[name]
148
149 def set_optimizer(self, name: str, optimizer: torch.optim.Optimizer) -> None:
150 self.manager.optimizers[name] = optimizer # type: ignore[index]
151
152 def is_epoch_last_iter(self, idx: int) -> bool:
153 return (idx + 1) == (self.manager._iters_per_epoch)
154
155 def _complete_step(
156 self,
157 idx: int,
158 outs: Any,
159 *,
160 is_deferred: bool = False,
161 ) -> None:
162 self._deferred = False # notify that the function was called
163 c_idx = self._idxs.get()
164 # Asure that iterations complete in order
165 if c_idx != idx:
166 raise RuntimeError(
167 'Completed a not expected iteration. '
168 '{} was expected but completion of {} happened'.format(
169 c_idx, idx)
170 )
171 x = self._inputs.get()
172 begin = self._times.get()
173 observed = self._observed.get()
174 (
175 record_iteration,
176 record_run_iteration,
177 record_train_step,
178 ) = self._profile_records.get()
179 # If the iteration was not deferred this is still under the
180 # `manager.run_iteration` scope
181 # Change the current reporter observation
182 # To be the one to be completed
183 if is_deferred:
184 # Complete profiler record of `train_step`
185 record_train_step.complete()
186 # We want to report the previously obtained values in `train_step`
187 cm_iter = self.manager.complete_iteration(observation=observed)
188 cm_iter.__enter__()
189 else:
190 reporting.get_current_reporter().observation = observed
191 self.manager.observation = observed
192 self.handler.train_post_step(self, idx, x, outs)
193 reporting.report({"elapsed_time": time.time() - begin})
194 if is_deferred:
195 cm_iter.__exit__(None, None, None)
196 # Complete profiler record of `run_iteration` and iteration
197 record_run_iteration.complete()
198 record_iteration.complete()
199
200 def _run_evaluator(self) -> None:
201 assert self.evaluator is not None
202 if self._val_loader is None:
203 raise ValueError('"val_loader" is not given.')
204 self.evaluator.handler.train_validation_begin(self, self.evaluator)
205 self.evaluator.run(self._val_loader, eval_len=self._eval_len)
206 self.evaluator.handler.train_validation_end(self, self.evaluator)
207
208 def run(self, # type: ignore[override]
209 train_loader: Iterable[Any],
210 val_loader: Optional[Iterable[Any]] = None,
211 *,
212 train_len: Optional[int] = None,
213 eval_len: Optional[int] = None) -> None:
214 """Executes the training loop.
215
216 Args:
217 train_loader (torch.utils.data.DataLoader):
218 A data loader for training.
219 val_loader (torch.utils.data.DataLoader, optional):
220 A data loader passed to ``Evaluator.run()``.
221 train_len (int, optional):
222 The number of iterations per one training epoch. The default
223 value is inferred from the size of training data loader.
224 eval_len (int, optional):
225 The number of iterations per one evaluation epoch, passed
226 to ``Evaluator.run()``
227
228 .. seealso::
229 - :meth:`pytorch_pfn_extras.training._evaluator.Evaluator`
230 """
231 if train_len is None:
232 train_len = len(train_loader) # type: ignore[arg-type]
233
234 self._val_loader = val_loader
235 self._eval_len = eval_len
236
237 class _EvaluatorExt:
238 def __init__(self, trainer: 'Trainer') -> None:
239 self.name = 'Evaluator'
240 self.needs_model_state = True
241 self._trainer = trainer
242
243 def __call__(self, manager: ExtensionsManagerProtocol) -> None:
244 self._trainer._run_evaluator()
245
246 if self._manager is None:
247 self._manager = self._setup_manager(train_len)
248 if self.evaluator is not None:
249 # Register the evaluator as an extension to the manager
250 # To be triggered with the correct timing
251 self._manager.extend(
252 _EvaluatorExt(self),
253 trigger=self.evaluator_trigger,
254 priority=extension.PRIORITY_WRITER,
255 )
256 self.handler.train_setup(self, train_loader)
257 if self.evaluator is None:
258 if val_loader is not None:
259 warnings.warn(
260 '`val_loader` is given whereas the evaluator is missing.',
261 UserWarning)
262 else:
263 if val_loader is None:
264 raise ValueError('`val_loader` is required')
265 self.evaluator.handler.eval_setup(self.evaluator, val_loader)
266
267 while not self.manager.stop_trigger:
268 self.handler.train_epoch_begin(self, train_loader)
269
270 # When iterations are completed in the callback
271 # This is needed to avoid being constantly passing parameters
272 self._idxs: 'queue.Queue[int]' = queue.Queue()
273 self._inputs: 'queue.Queue[Any]' = queue.Queue()
274 self._times: 'queue.Queue[float]' = queue.Queue()
275 self._observed: 'queue.Queue[reporting.Observation]' = queue.Queue()
276 # Iterator must be created after `train_epoch_begin` as it may be
277 # using a DistributedSampler.
278 loader_iter = iter(train_loader)
279 self._profile_records: 'queue.Queue[List[_ReportNotification]]' \
280 = queue.Queue()
281 for idx in range(train_len):
282 with record(
283 "pytorch_pfn_extras.training.Trainer:iteration",
284 use_cuda=torch.cuda.is_available()
285 ) as ntf0:
286 try:
287 with record(
288 "pytorch_pfn_extras.training.Trainer:get_data"
289 ):
290 x = next(loader_iter)
291 except StopIteration:
292 loader_iter = iter(train_loader)
293 with record(
294 "pytorch_pfn_extras.training.Trainer:get_data"
295 ):
296 x = next(loader_iter)
297 begin = time.time()
298 self._idxs.put(idx)
299 self._inputs.put(x)
300 self._times.put(begin)
301 self._deferred = True
302 with record(
303 "pytorch_pfn_extras.training.Trainer:run_iteration",
304 use_cuda=torch.cuda.is_available()
305 ) as ntf1, \
306 self.manager.run_iteration() as iter_notifier:
307 self._observed.put(self.manager.observation)
308 with record(
309 "pytorch_pfn_extras.training.Trainer:train_step",
310 use_cuda=torch.cuda.is_available(),
311 ) as ntf2:
312 self._profile_records.put([ntf0, ntf1, ntf2])
313 self.handler.train_step(
314 self, idx, x, complete_fn=self._complete_step)
315 # Check if the callback was called
316 if self._deferred:
317 # The iteration will be completed later
318 ntf0.defer()
319 ntf1.defer()
320 ntf2.defer()
321 iter_notifier.defer()
322 # In some cases, DataLoaders are continuos
323 # And will keep yielding results even if the epoch
324 # is completed. We forcefully exit at the end of
325 # every epoch
326 if (
327 self.is_epoch_last_iter(idx)
328 or self.manager.stop_trigger
329 ):
330 break
331 # In handlers that support a completely Async model train_epoch_end
332 # Will take care of completing pending work
333 self.handler.train_epoch_end(self)
334
335
336 # For backward compatibility
337 _Trainer = Trainer
338
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_pfn_extras/training/_trainer.py b/pytorch_pfn_extras/training/_trainer.py
--- a/pytorch_pfn_extras/training/_trainer.py
+++ b/pytorch_pfn_extras/training/_trainer.py
@@ -22,13 +22,15 @@
from pytorch_pfn_extras.profiler._time_summary import _ReportNotification
-class _Engine:
+class Trainer:
def __init__(
self,
handler: 'handler_module.BaseHandler',
+ *,
+ evaluator: Optional[Union['Evaluator', Tuple['Evaluator', TriggerLike]]],
models: Union[torch.nn.Module, Dict[str, torch.nn.Module]],
**kwargs: Any,
- ) -> None:
+ ):
self.handler = handler
self._manager: Optional['training.ExtensionsManager'] = None
@@ -47,6 +49,15 @@
Dict[str, Any]]] = []
self._manager_state: Optional[Dict[str, Any]] = None
+ if isinstance(evaluator, tuple):
+ self.evaluator: Optional['Evaluator'] = None
+ self.evaluator, trigger = evaluator
+ self.evaluator_trigger = trigger_module.get_trigger(trigger)
+ else:
+ self.evaluator = evaluator
+ self.evaluator_trigger = trigger_module.get_trigger((1, 'epoch'))
+ self.val_loader = None
+
def extend(
self,
extension: 'training.Extension',
@@ -97,28 +108,6 @@
return
self.manager.load_state_dict(to_load)
- def run(self, *args: Any, **kwargs: Any) -> None:
- raise NotImplementedError
-
-
-class Trainer(_Engine):
- def __init__(
- self,
- handler: 'handler_module.BaseHandler',
- *,
- evaluator: Optional[Union['Evaluator', Tuple['Evaluator', TriggerLike]]],
- **kwargs: Any,
- ):
- super().__init__(handler, **kwargs)
- if isinstance(evaluator, tuple):
- self.evaluator: Optional['Evaluator'] = None
- self.evaluator, trigger = evaluator
- self.evaluator_trigger = trigger_module.get_trigger(trigger)
- else:
- self.evaluator = evaluator
- self.evaluator_trigger = trigger_module.get_trigger((1, 'epoch'))
- self.val_loader = None
-
@property
def epoch(self) -> int:
return self.manager.epoch
|
{"golden_diff": "diff --git a/pytorch_pfn_extras/training/_trainer.py b/pytorch_pfn_extras/training/_trainer.py\n--- a/pytorch_pfn_extras/training/_trainer.py\n+++ b/pytorch_pfn_extras/training/_trainer.py\n@@ -22,13 +22,15 @@\n from pytorch_pfn_extras.profiler._time_summary import _ReportNotification\n \n \n-class _Engine:\n+class Trainer:\n def __init__(\n self,\n handler: 'handler_module.BaseHandler',\n+ *,\n+ evaluator: Optional[Union['Evaluator', Tuple['Evaluator', TriggerLike]]],\n models: Union[torch.nn.Module, Dict[str, torch.nn.Module]],\n **kwargs: Any,\n- ) -> None:\n+ ):\n self.handler = handler\n self._manager: Optional['training.ExtensionsManager'] = None\n \n@@ -47,6 +49,15 @@\n Dict[str, Any]]] = []\n self._manager_state: Optional[Dict[str, Any]] = None\n \n+ if isinstance(evaluator, tuple):\n+ self.evaluator: Optional['Evaluator'] = None\n+ self.evaluator, trigger = evaluator\n+ self.evaluator_trigger = trigger_module.get_trigger(trigger)\n+ else:\n+ self.evaluator = evaluator\n+ self.evaluator_trigger = trigger_module.get_trigger((1, 'epoch'))\n+ self.val_loader = None\n+\n def extend(\n self,\n extension: 'training.Extension',\n@@ -97,28 +108,6 @@\n return\n self.manager.load_state_dict(to_load)\n \n- def run(self, *args: Any, **kwargs: Any) -> None:\n- raise NotImplementedError\n-\n-\n-class Trainer(_Engine):\n- def __init__(\n- self,\n- handler: 'handler_module.BaseHandler',\n- *,\n- evaluator: Optional[Union['Evaluator', Tuple['Evaluator', TriggerLike]]],\n- **kwargs: Any,\n- ):\n- super().__init__(handler, **kwargs)\n- if isinstance(evaluator, tuple):\n- self.evaluator: Optional['Evaluator'] = None\n- self.evaluator, trigger = evaluator\n- self.evaluator_trigger = trigger_module.get_trigger(trigger)\n- else:\n- self.evaluator = evaluator\n- self.evaluator_trigger = trigger_module.get_trigger((1, 'epoch'))\n- self.val_loader = None\n-\n @property\n def epoch(self) -> int:\n return self.manager.epoch\n", "issue": "Merge `_Engine` with `Trainer`\nNow that only class inheriting `ppe.engine._Engine` is `ppe.training.Trainer`, we can merge these classes.\n", "before_files": [{"content": "# mypy: ignore-errors\n\nimport queue\nimport time\nimport warnings\nfrom typing import Any, Dict, Iterable, List, Optional, Tuple, Union, TYPE_CHECKING\n\nimport torch\n\nfrom pytorch_pfn_extras import training\nfrom pytorch_pfn_extras.training import extension as extension\nfrom pytorch_pfn_extras.training import trigger as trigger_module\nimport pytorch_pfn_extras.reporting as reporting\nfrom pytorch_pfn_extras.profiler import record\n\nfrom pytorch_pfn_extras.training._manager_protocol import ExtensionsManagerProtocol\nfrom pytorch_pfn_extras.training.trigger import Trigger, TriggerLike\n\nif TYPE_CHECKING:\n from pytorch_pfn_extras import handler as handler_module\n from pytorch_pfn_extras.training._evaluator import Evaluator\n from pytorch_pfn_extras.profiler._time_summary import _ReportNotification\n\n\nclass _Engine:\n def __init__(\n self,\n handler: 'handler_module.BaseHandler',\n models: Union[torch.nn.Module, Dict[str, torch.nn.Module]],\n **kwargs: Any,\n ) -> None:\n self.handler = handler\n self._manager: Optional['training.ExtensionsManager'] = None\n\n # The followings are used when setting up a manager instance\n if not isinstance(models, dict):\n if not isinstance(models, torch.nn.Module):\n raise ValueError(\n 'model must be an instance of dict or toch.nn.Module')\n self._models = {'main': models}\n else:\n self._models = models\n self._kwargs = kwargs\n self._extensions: List[ # list of (args, kwargs)\n Tuple[Tuple['training.Extension', Optional[str],\n 'TriggerLike', Optional[int]],\n Dict[str, Any]]] = []\n self._manager_state: Optional[Dict[str, Any]] = None\n\n def extend(\n self,\n extension: 'training.Extension',\n name: Optional[str] = None,\n trigger: 'TriggerLike' = None,\n priority: Optional[int] = None,\n *,\n call_before_training: bool = False,\n **kwargs: Any,\n ) -> None:\n if self._manager is not None:\n raise RuntimeError('cannot extend after starting the engine')\n self._extensions.append(\n ((extension, name, trigger, priority),\n dict(call_before_training=call_before_training, **kwargs)))\n\n def _setup_manager(self, iters_per_epoch: int) -> 'training.ExtensionsManager':\n from pytorch_pfn_extras.training import ExtensionsManager\n self._manager = ExtensionsManager(\n self._models, iters_per_epoch=iters_per_epoch, **self._kwargs)\n for ex_args, ex_kwargs in self._extensions:\n self._manager.extend(*ex_args, **ex_kwargs)\n if self._manager_state is not None:\n self.manager.load_state_dict(self._manager_state)\n return self._manager\n\n @property\n def manager(self) -> 'training.ExtensionsManager':\n if self._manager is None:\n raise RuntimeError('the engine is not started yet')\n return self._manager\n\n @property\n def models(self) -> Dict[str, torch.nn.Module]:\n # TODO(kmaehashi): do we need this convenient interface for handlers?\n return self.manager.raw_models\n\n @property\n def optimizers(self) -> Dict[str, torch.optim.Optimizer]:\n return self.manager.optimizers\n\n def state_dict(self) -> Dict[str, Any]:\n return self.manager.state_dict()\n\n def load_state_dict(self, to_load: Dict[str, Any]) -> None:\n if self._manager is None:\n self._manager_state = to_load\n return\n self.manager.load_state_dict(to_load)\n\n def run(self, *args: Any, **kwargs: Any) -> None:\n raise NotImplementedError\n\n\nclass Trainer(_Engine):\n def __init__(\n self,\n handler: 'handler_module.BaseHandler',\n *,\n evaluator: Optional[Union['Evaluator', Tuple['Evaluator', TriggerLike]]],\n **kwargs: Any,\n ):\n super().__init__(handler, **kwargs)\n if isinstance(evaluator, tuple):\n self.evaluator: Optional['Evaluator'] = None\n self.evaluator, trigger = evaluator\n self.evaluator_trigger = trigger_module.get_trigger(trigger)\n else:\n self.evaluator = evaluator\n self.evaluator_trigger = trigger_module.get_trigger((1, 'epoch'))\n self.val_loader = None\n\n @property\n def epoch(self) -> int:\n return self.manager.epoch\n\n @property\n def epoch_detail(self) -> float:\n return self.manager.epoch_detail\n\n @property\n def iteration(self) -> int:\n return self.manager.iteration\n\n @property\n def is_before_training(self) -> bool:\n return self.manager.iteration == 0\n\n @property\n def stop_trigger(self) -> Trigger:\n return self._stop_trigger\n\n @stop_trigger.setter\n def stop_trigger(self, trigger: Trigger) -> None:\n self._stop_trigger = trigger\n\n def get_optimizer(self, name: str) -> torch.optim.Optimizer:\n return self.manager.optimizers[name]\n\n def set_optimizer(self, name: str, optimizer: torch.optim.Optimizer) -> None:\n self.manager.optimizers[name] = optimizer # type: ignore[index]\n\n def is_epoch_last_iter(self, idx: int) -> bool:\n return (idx + 1) == (self.manager._iters_per_epoch)\n\n def _complete_step(\n self,\n idx: int,\n outs: Any,\n *,\n is_deferred: bool = False,\n ) -> None:\n self._deferred = False # notify that the function was called\n c_idx = self._idxs.get()\n # Asure that iterations complete in order\n if c_idx != idx:\n raise RuntimeError(\n 'Completed a not expected iteration. '\n '{} was expected but completion of {} happened'.format(\n c_idx, idx)\n )\n x = self._inputs.get()\n begin = self._times.get()\n observed = self._observed.get()\n (\n record_iteration,\n record_run_iteration,\n record_train_step,\n ) = self._profile_records.get()\n # If the iteration was not deferred this is still under the\n # `manager.run_iteration` scope\n # Change the current reporter observation\n # To be the one to be completed\n if is_deferred:\n # Complete profiler record of `train_step`\n record_train_step.complete()\n # We want to report the previously obtained values in `train_step`\n cm_iter = self.manager.complete_iteration(observation=observed)\n cm_iter.__enter__()\n else:\n reporting.get_current_reporter().observation = observed\n self.manager.observation = observed\n self.handler.train_post_step(self, idx, x, outs)\n reporting.report({\"elapsed_time\": time.time() - begin})\n if is_deferred:\n cm_iter.__exit__(None, None, None)\n # Complete profiler record of `run_iteration` and iteration\n record_run_iteration.complete()\n record_iteration.complete()\n\n def _run_evaluator(self) -> None:\n assert self.evaluator is not None\n if self._val_loader is None:\n raise ValueError('\"val_loader\" is not given.')\n self.evaluator.handler.train_validation_begin(self, self.evaluator)\n self.evaluator.run(self._val_loader, eval_len=self._eval_len)\n self.evaluator.handler.train_validation_end(self, self.evaluator)\n\n def run(self, # type: ignore[override]\n train_loader: Iterable[Any],\n val_loader: Optional[Iterable[Any]] = None,\n *,\n train_len: Optional[int] = None,\n eval_len: Optional[int] = None) -> None:\n \"\"\"Executes the training loop.\n\n Args:\n train_loader (torch.utils.data.DataLoader):\n A data loader for training.\n val_loader (torch.utils.data.DataLoader, optional):\n A data loader passed to ``Evaluator.run()``.\n train_len (int, optional):\n The number of iterations per one training epoch. The default\n value is inferred from the size of training data loader.\n eval_len (int, optional):\n The number of iterations per one evaluation epoch, passed\n to ``Evaluator.run()``\n\n .. seealso::\n - :meth:`pytorch_pfn_extras.training._evaluator.Evaluator`\n \"\"\"\n if train_len is None:\n train_len = len(train_loader) # type: ignore[arg-type]\n\n self._val_loader = val_loader\n self._eval_len = eval_len\n\n class _EvaluatorExt:\n def __init__(self, trainer: 'Trainer') -> None:\n self.name = 'Evaluator'\n self.needs_model_state = True\n self._trainer = trainer\n\n def __call__(self, manager: ExtensionsManagerProtocol) -> None:\n self._trainer._run_evaluator()\n\n if self._manager is None:\n self._manager = self._setup_manager(train_len)\n if self.evaluator is not None:\n # Register the evaluator as an extension to the manager\n # To be triggered with the correct timing\n self._manager.extend(\n _EvaluatorExt(self),\n trigger=self.evaluator_trigger,\n priority=extension.PRIORITY_WRITER,\n )\n self.handler.train_setup(self, train_loader)\n if self.evaluator is None:\n if val_loader is not None:\n warnings.warn(\n '`val_loader` is given whereas the evaluator is missing.',\n UserWarning)\n else:\n if val_loader is None:\n raise ValueError('`val_loader` is required')\n self.evaluator.handler.eval_setup(self.evaluator, val_loader)\n\n while not self.manager.stop_trigger:\n self.handler.train_epoch_begin(self, train_loader)\n\n # When iterations are completed in the callback\n # This is needed to avoid being constantly passing parameters\n self._idxs: 'queue.Queue[int]' = queue.Queue()\n self._inputs: 'queue.Queue[Any]' = queue.Queue()\n self._times: 'queue.Queue[float]' = queue.Queue()\n self._observed: 'queue.Queue[reporting.Observation]' = queue.Queue()\n # Iterator must be created after `train_epoch_begin` as it may be\n # using a DistributedSampler.\n loader_iter = iter(train_loader)\n self._profile_records: 'queue.Queue[List[_ReportNotification]]' \\\n = queue.Queue()\n for idx in range(train_len):\n with record(\n \"pytorch_pfn_extras.training.Trainer:iteration\",\n use_cuda=torch.cuda.is_available()\n ) as ntf0:\n try:\n with record(\n \"pytorch_pfn_extras.training.Trainer:get_data\"\n ):\n x = next(loader_iter)\n except StopIteration:\n loader_iter = iter(train_loader)\n with record(\n \"pytorch_pfn_extras.training.Trainer:get_data\"\n ):\n x = next(loader_iter)\n begin = time.time()\n self._idxs.put(idx)\n self._inputs.put(x)\n self._times.put(begin)\n self._deferred = True\n with record(\n \"pytorch_pfn_extras.training.Trainer:run_iteration\",\n use_cuda=torch.cuda.is_available()\n ) as ntf1, \\\n self.manager.run_iteration() as iter_notifier:\n self._observed.put(self.manager.observation)\n with record(\n \"pytorch_pfn_extras.training.Trainer:train_step\",\n use_cuda=torch.cuda.is_available(),\n ) as ntf2:\n self._profile_records.put([ntf0, ntf1, ntf2])\n self.handler.train_step(\n self, idx, x, complete_fn=self._complete_step)\n # Check if the callback was called\n if self._deferred:\n # The iteration will be completed later\n ntf0.defer()\n ntf1.defer()\n ntf2.defer()\n iter_notifier.defer()\n # In some cases, DataLoaders are continuos\n # And will keep yielding results even if the epoch\n # is completed. We forcefully exit at the end of\n # every epoch\n if (\n self.is_epoch_last_iter(idx)\n or self.manager.stop_trigger\n ):\n break\n # In handlers that support a completely Async model train_epoch_end\n # Will take care of completing pending work\n self.handler.train_epoch_end(self)\n\n\n# For backward compatibility\n_Trainer = Trainer\n", "path": "pytorch_pfn_extras/training/_trainer.py"}], "after_files": [{"content": "# mypy: ignore-errors\n\nimport queue\nimport time\nimport warnings\nfrom typing import Any, Dict, Iterable, List, Optional, Tuple, Union, TYPE_CHECKING\n\nimport torch\n\nfrom pytorch_pfn_extras import training\nfrom pytorch_pfn_extras.training import extension as extension\nfrom pytorch_pfn_extras.training import trigger as trigger_module\nimport pytorch_pfn_extras.reporting as reporting\nfrom pytorch_pfn_extras.profiler import record\n\nfrom pytorch_pfn_extras.training._manager_protocol import ExtensionsManagerProtocol\nfrom pytorch_pfn_extras.training.trigger import Trigger, TriggerLike\n\nif TYPE_CHECKING:\n from pytorch_pfn_extras import handler as handler_module\n from pytorch_pfn_extras.training._evaluator import Evaluator\n from pytorch_pfn_extras.profiler._time_summary import _ReportNotification\n\n\nclass Trainer:\n def __init__(\n self,\n handler: 'handler_module.BaseHandler',\n *,\n evaluator: Optional[Union['Evaluator', Tuple['Evaluator', TriggerLike]]],\n models: Union[torch.nn.Module, Dict[str, torch.nn.Module]],\n **kwargs: Any,\n ):\n self.handler = handler\n self._manager: Optional['training.ExtensionsManager'] = None\n\n # The followings are used when setting up a manager instance\n if not isinstance(models, dict):\n if not isinstance(models, torch.nn.Module):\n raise ValueError(\n 'model must be an instance of dict or toch.nn.Module')\n self._models = {'main': models}\n else:\n self._models = models\n self._kwargs = kwargs\n self._extensions: List[ # list of (args, kwargs)\n Tuple[Tuple['training.Extension', Optional[str],\n 'TriggerLike', Optional[int]],\n Dict[str, Any]]] = []\n self._manager_state: Optional[Dict[str, Any]] = None\n\n if isinstance(evaluator, tuple):\n self.evaluator: Optional['Evaluator'] = None\n self.evaluator, trigger = evaluator\n self.evaluator_trigger = trigger_module.get_trigger(trigger)\n else:\n self.evaluator = evaluator\n self.evaluator_trigger = trigger_module.get_trigger((1, 'epoch'))\n self.val_loader = None\n\n def extend(\n self,\n extension: 'training.Extension',\n name: Optional[str] = None,\n trigger: 'TriggerLike' = None,\n priority: Optional[int] = None,\n *,\n call_before_training: bool = False,\n **kwargs: Any,\n ) -> None:\n if self._manager is not None:\n raise RuntimeError('cannot extend after starting the engine')\n self._extensions.append(\n ((extension, name, trigger, priority),\n dict(call_before_training=call_before_training, **kwargs)))\n\n def _setup_manager(self, iters_per_epoch: int) -> 'training.ExtensionsManager':\n from pytorch_pfn_extras.training import ExtensionsManager\n self._manager = ExtensionsManager(\n self._models, iters_per_epoch=iters_per_epoch, **self._kwargs)\n for ex_args, ex_kwargs in self._extensions:\n self._manager.extend(*ex_args, **ex_kwargs)\n if self._manager_state is not None:\n self.manager.load_state_dict(self._manager_state)\n return self._manager\n\n @property\n def manager(self) -> 'training.ExtensionsManager':\n if self._manager is None:\n raise RuntimeError('the engine is not started yet')\n return self._manager\n\n @property\n def models(self) -> Dict[str, torch.nn.Module]:\n # TODO(kmaehashi): do we need this convenient interface for handlers?\n return self.manager.raw_models\n\n @property\n def optimizers(self) -> Dict[str, torch.optim.Optimizer]:\n return self.manager.optimizers\n\n def state_dict(self) -> Dict[str, Any]:\n return self.manager.state_dict()\n\n def load_state_dict(self, to_load: Dict[str, Any]) -> None:\n if self._manager is None:\n self._manager_state = to_load\n return\n self.manager.load_state_dict(to_load)\n\n @property\n def epoch(self) -> int:\n return self.manager.epoch\n\n @property\n def epoch_detail(self) -> float:\n return self.manager.epoch_detail\n\n @property\n def iteration(self) -> int:\n return self.manager.iteration\n\n @property\n def is_before_training(self) -> bool:\n return self.manager.iteration == 0\n\n @property\n def stop_trigger(self) -> Trigger:\n return self._stop_trigger\n\n @stop_trigger.setter\n def stop_trigger(self, trigger: Trigger) -> None:\n self._stop_trigger = trigger\n\n def get_optimizer(self, name: str) -> torch.optim.Optimizer:\n return self.manager.optimizers[name]\n\n def set_optimizer(self, name: str, optimizer: torch.optim.Optimizer) -> None:\n self.manager.optimizers[name] = optimizer # type: ignore[index]\n\n def is_epoch_last_iter(self, idx: int) -> bool:\n return (idx + 1) == (self.manager._iters_per_epoch)\n\n def _complete_step(\n self,\n idx: int,\n outs: Any,\n *,\n is_deferred: bool = False,\n ) -> None:\n self._deferred = False # notify that the function was called\n c_idx = self._idxs.get()\n # Asure that iterations complete in order\n if c_idx != idx:\n raise RuntimeError(\n 'Completed a not expected iteration. '\n '{} was expected but completion of {} happened'.format(\n c_idx, idx)\n )\n x = self._inputs.get()\n begin = self._times.get()\n observed = self._observed.get()\n (\n record_iteration,\n record_run_iteration,\n record_train_step,\n ) = self._profile_records.get()\n # If the iteration was not deferred this is still under the\n # `manager.run_iteration` scope\n # Change the current reporter observation\n # To be the one to be completed\n if is_deferred:\n # Complete profiler record of `train_step`\n record_train_step.complete()\n # We want to report the previously obtained values in `train_step`\n cm_iter = self.manager.complete_iteration(observation=observed)\n cm_iter.__enter__()\n else:\n reporting.get_current_reporter().observation = observed\n self.manager.observation = observed\n self.handler.train_post_step(self, idx, x, outs)\n reporting.report({\"elapsed_time\": time.time() - begin})\n if is_deferred:\n cm_iter.__exit__(None, None, None)\n # Complete profiler record of `run_iteration` and iteration\n record_run_iteration.complete()\n record_iteration.complete()\n\n def _run_evaluator(self) -> None:\n assert self.evaluator is not None\n if self._val_loader is None:\n raise ValueError('\"val_loader\" is not given.')\n self.evaluator.handler.train_validation_begin(self, self.evaluator)\n self.evaluator.run(self._val_loader, eval_len=self._eval_len)\n self.evaluator.handler.train_validation_end(self, self.evaluator)\n\n def run(self, # type: ignore[override]\n train_loader: Iterable[Any],\n val_loader: Optional[Iterable[Any]] = None,\n *,\n train_len: Optional[int] = None,\n eval_len: Optional[int] = None) -> None:\n \"\"\"Executes the training loop.\n\n Args:\n train_loader (torch.utils.data.DataLoader):\n A data loader for training.\n val_loader (torch.utils.data.DataLoader, optional):\n A data loader passed to ``Evaluator.run()``.\n train_len (int, optional):\n The number of iterations per one training epoch. The default\n value is inferred from the size of training data loader.\n eval_len (int, optional):\n The number of iterations per one evaluation epoch, passed\n to ``Evaluator.run()``\n\n .. seealso::\n - :meth:`pytorch_pfn_extras.training._evaluator.Evaluator`\n \"\"\"\n if train_len is None:\n train_len = len(train_loader) # type: ignore[arg-type]\n\n self._val_loader = val_loader\n self._eval_len = eval_len\n\n class _EvaluatorExt:\n def __init__(self, trainer: 'Trainer') -> None:\n self.name = 'Evaluator'\n self.needs_model_state = True\n self._trainer = trainer\n\n def __call__(self, manager: ExtensionsManagerProtocol) -> None:\n self._trainer._run_evaluator()\n\n if self._manager is None:\n self._manager = self._setup_manager(train_len)\n if self.evaluator is not None:\n # Register the evaluator as an extension to the manager\n # To be triggered with the correct timing\n self._manager.extend(\n _EvaluatorExt(self),\n trigger=self.evaluator_trigger,\n priority=extension.PRIORITY_WRITER,\n )\n self.handler.train_setup(self, train_loader)\n if self.evaluator is None:\n if val_loader is not None:\n warnings.warn(\n '`val_loader` is given whereas the evaluator is missing.',\n UserWarning)\n else:\n if val_loader is None:\n raise ValueError('`val_loader` is required')\n self.evaluator.handler.eval_setup(self.evaluator, val_loader)\n\n while not self.manager.stop_trigger:\n self.handler.train_epoch_begin(self, train_loader)\n\n # When iterations are completed in the callback\n # This is needed to avoid being constantly passing parameters\n self._idxs: 'queue.Queue[int]' = queue.Queue()\n self._inputs: 'queue.Queue[Any]' = queue.Queue()\n self._times: 'queue.Queue[float]' = queue.Queue()\n self._observed: 'queue.Queue[reporting.Observation]' = queue.Queue()\n # Iterator must be created after `train_epoch_begin` as it may be\n # using a DistributedSampler.\n loader_iter = iter(train_loader)\n self._profile_records: 'queue.Queue[List[_ReportNotification]]' \\\n = queue.Queue()\n for idx in range(train_len):\n with record(\n \"pytorch_pfn_extras.training.Trainer:iteration\",\n use_cuda=torch.cuda.is_available()\n ) as ntf0:\n try:\n with record(\n \"pytorch_pfn_extras.training.Trainer:get_data\"\n ):\n x = next(loader_iter)\n except StopIteration:\n loader_iter = iter(train_loader)\n with record(\n \"pytorch_pfn_extras.training.Trainer:get_data\"\n ):\n x = next(loader_iter)\n begin = time.time()\n self._idxs.put(idx)\n self._inputs.put(x)\n self._times.put(begin)\n self._deferred = True\n with record(\n \"pytorch_pfn_extras.training.Trainer:run_iteration\",\n use_cuda=torch.cuda.is_available()\n ) as ntf1, \\\n self.manager.run_iteration() as iter_notifier:\n self._observed.put(self.manager.observation)\n with record(\n \"pytorch_pfn_extras.training.Trainer:train_step\",\n use_cuda=torch.cuda.is_available(),\n ) as ntf2:\n self._profile_records.put([ntf0, ntf1, ntf2])\n self.handler.train_step(\n self, idx, x, complete_fn=self._complete_step)\n # Check if the callback was called\n if self._deferred:\n # The iteration will be completed later\n ntf0.defer()\n ntf1.defer()\n ntf2.defer()\n iter_notifier.defer()\n # In some cases, DataLoaders are continuos\n # And will keep yielding results even if the epoch\n # is completed. We forcefully exit at the end of\n # every epoch\n if (\n self.is_epoch_last_iter(idx)\n or self.manager.stop_trigger\n ):\n break\n # In handlers that support a completely Async model train_epoch_end\n # Will take care of completing pending work\n self.handler.train_epoch_end(self)\n\n\n# For backward compatibility\n_Trainer = Trainer\n", "path": "pytorch_pfn_extras/training/_trainer.py"}]}
| 3,909 | 542 |
gh_patches_debug_17476
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-840
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not possible to install Ray from git
I want to install Ray for Python 3.
```
$ apt-get install -y cmake pkg-config python3-dev build-essential autoconf curl libtool libboost-all-dev unzip
$ pip3 install git+https://github.com/ray-project/ray.git@37282330c0ea687fd1b983176dce85731fcf189d#subdirectory=python
```
But this tries to install it for python2.7, and not python3, failing with:
```
CMake Error at cmake_modules/FindNumPy.cmake:62 (message):
NumPy import failure:
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: No module named numpy
```
Because numpy is installed only for Python 3.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/setup.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import shutil
7 import subprocess
8 import sys
9
10 from setuptools import setup, find_packages, Distribution
11 import setuptools.command.build_ext as _build_ext
12
13 # Ideally, we could include these files by putting them in a
14 # MANIFEST.in or using the package_data argument to setup, but the
15 # MANIFEST.in gets applied at the very beginning when setup.py runs
16 # before these files have been created, so we have to move the files
17 # manually.
18 ray_files = [
19 "ray/core/src/common/thirdparty/redis/src/redis-server",
20 "ray/core/src/common/redis_module/libray_redis_module.so",
21 "ray/core/src/plasma/plasma_store",
22 "ray/core/src/plasma/plasma_manager",
23 "ray/core/src/local_scheduler/local_scheduler",
24 "ray/core/src/local_scheduler/liblocal_scheduler_library.so",
25 "ray/core/src/numbuf/libnumbuf.so",
26 "ray/core/src/global_scheduler/global_scheduler",
27 "ray/WebUI.ipynb"
28 ]
29
30
31 class build_ext(_build_ext.build_ext):
32 def run(self):
33 # Note: We are passing in sys.executable so that we use the same
34 # version of Python to build pyarrow inside the build.sh script. Note
35 # that certain flags will not be passed along such as --user or sudo.
36 # TODO(rkn): Fix this.
37 subprocess.check_call(["../build.sh", sys.executable])
38
39 # We also need to install pyarrow along with Ray, so make sure that the
40 # relevant non-Python pyarrow files get copied.
41 pyarrow_files = [
42 os.path.join("ray/pyarrow_files/pyarrow", filename)
43 for filename in os.listdir("./ray/pyarrow_files/pyarrow")
44 if not os.path.isdir(os.path.join("ray/pyarrow_files/pyarrow",
45 filename))]
46
47 files_to_include = ray_files + pyarrow_files
48
49 for filename in files_to_include:
50 self.move_file(filename)
51 # Copy over the autogenerated flatbuffer Python bindings.
52 generated_python_directory = "ray/core/generated"
53 for filename in os.listdir(generated_python_directory):
54 if filename[-3:] == ".py":
55 self.move_file(os.path.join(generated_python_directory,
56 filename))
57
58 def move_file(self, filename):
59 # TODO(rkn): This feels very brittle. It may not handle all cases. See
60 # https://github.com/apache/arrow/blob/master/python/setup.py for an
61 # example.
62 source = filename
63 destination = os.path.join(self.build_lib, filename)
64 # Create the target directory if it doesn't already exist.
65 parent_directory = os.path.dirname(destination)
66 if not os.path.exists(parent_directory):
67 os.makedirs(parent_directory)
68 print("Copying {} to {}.".format(source, destination))
69 shutil.copy(source, destination)
70
71
72 class BinaryDistribution(Distribution):
73 def has_ext_modules(self):
74 return True
75
76
77 setup(name="ray",
78 version="0.1.2",
79 packages=find_packages(),
80 cmdclass={"build_ext": build_ext},
81 # The BinaryDistribution argument triggers build_ext.
82 distclass=BinaryDistribution,
83 install_requires=["numpy",
84 "funcsigs",
85 "click",
86 "colorama",
87 "psutil",
88 "redis",
89 "cloudpickle >= 0.2.2",
90 "flatbuffers"],
91 entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
92 include_package_data=True,
93 zip_safe=False,
94 license="Apache 2.0")
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -81,12 +81,15 @@
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=["numpy",
+ "cython",
"funcsigs",
"click",
"colorama",
"psutil",
"redis",
"cloudpickle >= 0.2.2",
+ # The six module is required by pyarrow.
+ "six >= 1.0.0",
"flatbuffers"],
entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
include_package_data=True,
|
{"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -81,12 +81,15 @@\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n+ \"cython\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n+ # The six module is required by pyarrow.\n+ \"six >= 1.0.0\",\n \"flatbuffers\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n", "issue": "Not possible to install Ray from git\nI want to install Ray for Python 3.\r\n\r\n```\r\n$ apt-get install -y cmake pkg-config python3-dev build-essential autoconf curl libtool libboost-all-dev unzip\r\n$ pip3 install git+https://github.com/ray-project/ray.git@37282330c0ea687fd1b983176dce85731fcf189d#subdirectory=python\r\n```\r\n\r\nBut this tries to install it for python2.7, and not python3, failing with:\r\n\r\n```\r\n CMake Error at cmake_modules/FindNumPy.cmake:62 (message):\r\n NumPy import failure:\r\n \r\n Traceback (most recent call last):\r\n \r\n File \"<string>\", line 1, in <module>\r\n \r\n ImportError: No module named numpy\r\n```\r\n\r\nBecause numpy is installed only for Python 3.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\nray_files = [\n \"ray/core/src/common/thirdparty/redis/src/redis-server\",\n \"ray/core/src/common/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store\",\n \"ray/core/src/plasma/plasma_manager\",\n \"ray/core/src/local_scheduler/local_scheduler\",\n \"ray/core/src/local_scheduler/liblocal_scheduler_library.so\",\n \"ray/core/src/numbuf/libnumbuf.so\",\n \"ray/core/src/global_scheduler/global_scheduler\",\n \"ray/WebUI.ipynb\"\n]\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = [\n os.path.join(\"ray/pyarrow_files/pyarrow\", filename)\n for filename in os.listdir(\"./ray/pyarrow_files/pyarrow\")\n if not os.path.isdir(os.path.join(\"ray/pyarrow_files/pyarrow\",\n filename))]\n\n files_to_include = ray_files + pyarrow_files\n\n for filename in files_to_include:\n self.move_file(filename)\n # Copy over the autogenerated flatbuffer Python bindings.\n generated_python_directory = \"ray/core/generated\"\n for filename in os.listdir(generated_python_directory):\n if filename[-3:] == \".py\":\n self.move_file(os.path.join(generated_python_directory,\n filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nsetup(name=\"ray\",\n version=\"0.1.2\",\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n \"flatbuffers\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\nray_files = [\n \"ray/core/src/common/thirdparty/redis/src/redis-server\",\n \"ray/core/src/common/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store\",\n \"ray/core/src/plasma/plasma_manager\",\n \"ray/core/src/local_scheduler/local_scheduler\",\n \"ray/core/src/local_scheduler/liblocal_scheduler_library.so\",\n \"ray/core/src/numbuf/libnumbuf.so\",\n \"ray/core/src/global_scheduler/global_scheduler\",\n \"ray/WebUI.ipynb\"\n]\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = [\n os.path.join(\"ray/pyarrow_files/pyarrow\", filename)\n for filename in os.listdir(\"./ray/pyarrow_files/pyarrow\")\n if not os.path.isdir(os.path.join(\"ray/pyarrow_files/pyarrow\",\n filename))]\n\n files_to_include = ray_files + pyarrow_files\n\n for filename in files_to_include:\n self.move_file(filename)\n # Copy over the autogenerated flatbuffer Python bindings.\n generated_python_directory = \"ray/core/generated\"\n for filename in os.listdir(generated_python_directory):\n if filename[-3:] == \".py\":\n self.move_file(os.path.join(generated_python_directory,\n filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nsetup(name=\"ray\",\n version=\"0.1.2\",\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n \"cython\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n # The six module is required by pyarrow.\n \"six >= 1.0.0\",\n \"flatbuffers\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]}
| 1,409 | 156 |
gh_patches_debug_9371
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-688
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Engineering: Front-end - Redesign the `Get started with Login.gov` button
Redesign the `Get started with Login.gov` button to new specs:
- The text `Get started with` is moved into the button
- New font required: Public Sans https://github.com/uswds/public-sans
- Button should elegantly and responsively resize itself in 1-line mode and 2-line mode
- The fallback `Login.gov` text should also be there for screenreaders
- Desktop composition: 289px x 41px
<img width="334" alt="image" src="https://user-images.githubusercontent.com/3673236/172735124-93f631bc-d655-4aff-a6d9-faf07dcef0e2.png">
- Mobile composition: 2-line mobile size, 289px x 72px
<img width="363" alt="image" src="https://user-images.githubusercontent.com/3673236/172735067-3e785d8f-235b-4e32-af0a-bcf17eed6ca5.png">
https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(IAL2-Login.gov)?node-id=3830%3A9882
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/settings.py`
Content:
```
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6
7 def _filter_empty(ls):
8 return [s for s in ls if s]
9
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
14 # SECURITY WARNING: keep the secret key used in production secret!
15 SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "secret")
16
17 # SECURITY WARNING: don't run with debug turned on in production!
18 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
19
20 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
21
22 ALLOWED_HOSTS = _filter_empty(os.environ.get("DJANGO_ALLOWED_HOSTS", "localhost,127.0.0.1").split(","))
23
24 # Application definition
25
26 INSTALLED_APPS = [
27 "django.contrib.messages",
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 "benefits.oauth",
34 ]
35
36 if ADMIN:
37 INSTALLED_APPS.extend(
38 [
39 "django.contrib.admin",
40 "django.contrib.auth",
41 "django.contrib.contenttypes",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.contrib.messages.middleware.MessageMiddleware",
49 "django.middleware.locale.LocaleMiddleware",
50 "benefits.core.middleware.Healthcheck",
51 "django.middleware.common.CommonMiddleware",
52 "django.middleware.csrf.CsrfViewMiddleware",
53 "django.middleware.clickjacking.XFrameOptionsMiddleware",
54 "csp.middleware.CSPMiddleware",
55 "benefits.core.middleware.ChangedLanguageEvent",
56 ]
57
58 if ADMIN:
59 MIDDLEWARE.extend(
60 [
61 "django.contrib.auth.middleware.AuthenticationMiddleware",
62 "django.contrib.messages.middleware.MessageMiddleware",
63 ]
64 )
65
66 if DEBUG:
67 MIDDLEWARE.extend(["benefits.core.middleware.DebugSession"])
68
69 CSRF_COOKIE_AGE = None
70 CSRF_COOKIE_SAMESITE = "Strict"
71 CSRF_COOKIE_HTTPONLY = True
72 CSRF_TRUSTED_ORIGINS = _filter_empty(os.environ.get("DJANGO_TRUSTED_ORIGINS", "http://localhost,http://127.0.0.1").split(","))
73
74 # With `Strict`, the user loses their Django session between leaving our app to
75 # sign in with OAuth, and coming back into our app from the OAuth redirect.
76 # This is because `Strict` disallows our cookie being sent from an external
77 # domain and so the session cookie is lost.
78 #
79 # `Lax` allows the cookie to travel with the user and be sent back to us by the
80 # OAuth server, as long as the request is "safe" i.e. GET
81 SESSION_COOKIE_SAMESITE = "Lax"
82 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
83 SESSION_EXPIRE_AT_BROWSER_CLOSE = True
84 SESSION_COOKIE_NAME = "_benefitssessionid"
85
86 if not DEBUG:
87 CSRF_COOKIE_SECURE = True
88 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
89 SESSION_COOKIE_SECURE = True
90
91 SECURE_BROWSER_XSS_FILTER = True
92
93 # the NGINX reverse proxy sits in front of the application in deployed environments
94 # SSL terminates before getting to Django, and NGINX adds this header to indicate
95 # if the original request was secure or not
96 #
97 # See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header
98 if not DEBUG:
99 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
100
101 ROOT_URLCONF = "benefits.urls"
102
103 template_ctx_processors = [
104 "django.template.context_processors.request",
105 "django.contrib.messages.context_processors.messages",
106 "benefits.core.context_processors.analytics",
107 "benefits.core.context_processors.authentication",
108 "benefits.core.context_processors.recaptcha",
109 ]
110
111 if DEBUG:
112 template_ctx_processors.extend(
113 [
114 "django.template.context_processors.debug",
115 "benefits.core.context_processors.debug",
116 ]
117 )
118
119 if ADMIN:
120 template_ctx_processors.extend(
121 [
122 "django.contrib.auth.context_processors.auth",
123 "django.contrib.messages.context_processors.messages",
124 ]
125 )
126
127 TEMPLATES = [
128 {
129 "BACKEND": "django.template.backends.django.DjangoTemplates",
130 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
131 "APP_DIRS": True,
132 "OPTIONS": {
133 "context_processors": template_ctx_processors,
134 },
135 },
136 ]
137
138 WSGI_APPLICATION = "benefits.wsgi.application"
139
140 DATABASES = {
141 "default": {
142 "ENGINE": "django.db.backends.sqlite3",
143 "NAME": "django.db",
144 }
145 }
146
147 # Password validation
148
149 AUTH_PASSWORD_VALIDATORS = []
150
151 if ADMIN:
152 AUTH_PASSWORD_VALIDATORS.extend(
153 [
154 {
155 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
156 },
157 {
158 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
159 },
160 {
161 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
162 },
163 {
164 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
165 },
166 ]
167 )
168
169 # OAuth configuration
170
171 OAUTH_AUTHORITY = os.environ.get("DJANGO_OAUTH_AUTHORITY", "http://example.com")
172 OAUTH_CLIENT_NAME = os.environ.get("DJANGO_OAUTH_CLIENT_NAME", "benefits-oauth-client-name")
173 OAUTH_CLIENT_ID = os.environ.get("DJANGO_OAUTH_CLIENT_ID", "benefits-oauth-client-id")
174
175 if OAUTH_CLIENT_NAME:
176 AUTHLIB_OAUTH_CLIENTS = {
177 OAUTH_CLIENT_NAME: {
178 "client_id": OAUTH_CLIENT_ID,
179 "server_metadata_url": f"{OAUTH_AUTHORITY}/.well-known/openid-configuration",
180 "client_kwargs": {"code_challenge_method": "S256", "scope": "openid"},
181 }
182 }
183
184 # Internationalization
185
186 LANGUAGE_CODE = "en"
187
188 LANGUAGE_COOKIE_HTTPONLY = True
189 LANGUAGE_COOKIE_SAMESITE = "Strict"
190 LANGUAGE_COOKIE_SECURE = True
191
192 LANGUAGES = [("en", "English"), ("es", "Español")]
193
194 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
195
196 USE_I18N = True
197 USE_L10N = True
198
199 TIME_ZONE = "UTC"
200 USE_TZ = True
201
202 # Static files (CSS, JavaScript, Images)
203
204 STATIC_URL = "/static/"
205 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
206 STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
207 STATIC_ROOT = os.path.join(BASE_DIR, "static")
208
209 # Logging configuration
210
211 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
212 LOGGING = {
213 "version": 1,
214 "disable_existing_loggers": False,
215 "formatters": {
216 "default": {
217 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
218 "datefmt": "%d/%b/%Y %H:%M:%S",
219 "style": "{",
220 },
221 },
222 "handlers": {
223 "default": {"class": "logging.StreamHandler", "formatter": "default"},
224 },
225 "root": {
226 "handlers": ["default"],
227 "level": LOG_LEVEL,
228 },
229 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
230 }
231
232 # Analytics configuration
233
234 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
235
236 # rate limit configuration
237
238 # number of requests allowed in the given period
239 RATE_LIMIT = int(os.environ.get("DJANGO_RATE_LIMIT", 5))
240
241 # HTTP request methods to rate limit
242 RATE_LIMIT_METHODS = os.environ.get("DJANGO_RATE_LIMIT_METHODS", "POST").upper().split(",")
243
244 # number of seconds before additional requests are denied
245 RATE_LIMIT_PERIOD = int(os.environ.get("DJANGO_RATE_LIMIT_PERIOD", 60))
246
247 # Rate Limit feature flag
248 RATE_LIMIT_ENABLED = all((RATE_LIMIT > 0, len(RATE_LIMIT_METHODS) > 0, RATE_LIMIT_PERIOD > 0))
249
250 # reCAPTCHA configuration
251
252 RECAPTCHA_API_URL = os.environ.get("DJANGO_RECAPTCHA_API_URL", "https://www.google.com/recaptcha/api.js")
253 RECAPTCHA_SITE_KEY = os.environ.get("DJANGO_RECAPTCHA_SITE_KEY")
254 RECAPTCHA_SECRET_KEY = os.environ.get("DJANGO_RECAPTCHA_SECRET_KEY")
255 RECAPTCHA_VERIFY_URL = os.environ.get("DJANGO_RECAPTCHA_VERIFY_URL", "https://www.google.com/recaptcha/api/siteverify")
256 RECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))
257
258 # Content Security Policy
259 # Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html
260
261 # In particular, note that the inner single-quotes are required!
262 # https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings
263
264 CSP_DEFAULT_SRC = ["'self'"]
265
266 CSP_CONNECT_SRC = ["'self'", "https://api.amplitude.com/"]
267 env_connect_src = _filter_empty(os.environ.get("DJANGO_CSP_CONNECT_SRC", "").split(","))
268 CSP_CONNECT_SRC.extend(env_connect_src)
269
270 CSP_FONT_SRC = ["https://california.azureedge.net/cdt/statetemplate/", "https://fonts.gstatic.com/"]
271 env_font_src = _filter_empty(os.environ.get("DJANGO_CSP_FONT_SRC", "").split(","))
272 CSP_FONT_SRC.extend(env_font_src)
273
274 CSP_FRAME_ANCESTORS = ["'none'"]
275
276 CSP_FRAME_SRC = ["'none'"]
277 env_frame_src = _filter_empty(os.environ.get("DJANGO_CSP_FRAME_SRC", "").split(","))
278 CSP_FRAME_SRC.extend(env_frame_src)
279 if RECAPTCHA_ENABLED:
280 CSP_FRAME_SRC.append("https://www.google.com")
281
282
283 CSP_SCRIPT_SRC = [
284 "'unsafe-inline'",
285 "https://california.azureedge.net/cdt/statetemplate/",
286 "https://cdn.amplitude.com/libs/",
287 "https://code.jquery.com/",
288 "*.littlepay.com",
289 ]
290 env_script_src = _filter_empty(os.environ.get("DJANGO_CSP_SCRIPT_SRC", "").split(","))
291 CSP_SCRIPT_SRC.extend(env_script_src)
292 if RECAPTCHA_ENABLED:
293 CSP_SCRIPT_SRC.extend(["https://www.google.com/recaptcha/", "https://www.gstatic.com/recaptcha/releases/"])
294
295 CSP_STYLE_SRC = [
296 "'self'",
297 "'unsafe-inline'",
298 "https://california.azureedge.net/cdt/statetemplate/",
299 "https://fonts.googleapis.com/css",
300 ]
301 env_style_src = _filter_empty(os.environ.get("DJANGO_CSP_STYLE_SRC", "").split(","))
302 CSP_STYLE_SRC.extend(env_style_src)
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -267,7 +267,7 @@
env_connect_src = _filter_empty(os.environ.get("DJANGO_CSP_CONNECT_SRC", "").split(","))
CSP_CONNECT_SRC.extend(env_connect_src)
-CSP_FONT_SRC = ["https://california.azureedge.net/cdt/statetemplate/", "https://fonts.gstatic.com/"]
+CSP_FONT_SRC = ["'self'", "https://california.azureedge.net/cdt/statetemplate/", "https://fonts.gstatic.com/"]
env_font_src = _filter_empty(os.environ.get("DJANGO_CSP_FONT_SRC", "").split(","))
CSP_FONT_SRC.extend(env_font_src)
|
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -267,7 +267,7 @@\n env_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\n CSP_CONNECT_SRC.extend(env_connect_src)\n \n-CSP_FONT_SRC = [\"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\n+CSP_FONT_SRC = [\"'self'\", \"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\n env_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\n CSP_FONT_SRC.extend(env_font_src)\n", "issue": "Engineering: Front-end - Redesign the `Get started with Login.gov` button\nRedesign the `Get started with Login.gov` button to new specs:\r\n\r\n\r\n- The text `Get started with` is moved into the button\r\n- New font required: Public Sans https://github.com/uswds/public-sans\r\n- Button should elegantly and responsively resize itself in 1-line mode and 2-line mode\r\n- The fallback `Login.gov` text should also be there for screenreaders\r\n- Desktop composition: 289px x 41px\r\n<img width=\"334\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/172735124-93f631bc-d655-4aff-a6d9-faf07dcef0e2.png\">\r\n\r\n- Mobile composition: 2-line mobile size, 289px x 72px\r\n<img width=\"363\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/172735067-3e785d8f-235b-4e32-af0a-bcf17eed6ca5.png\">\r\n\r\n\r\nhttps://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(IAL2-Login.gov)?node-id=3830%3A9882\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n\ndef _filter_empty(ls):\n return [s for s in ls if s]\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"DJANGO_SECRET_KEY\", \"secret\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = _filter_empty(os.environ.get(\"DJANGO_ALLOWED_HOSTS\", \"localhost,127.0.0.1\").split(\",\"))\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n \"benefits.oauth\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nif DEBUG:\n MIDDLEWARE.extend([\"benefits.core.middleware.DebugSession\"])\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\nCSRF_TRUSTED_ORIGINS = _filter_empty(os.environ.get(\"DJANGO_TRUSTED_ORIGINS\", \"http://localhost,http://127.0.0.1\").split(\",\"))\n\n# With `Strict`, the user loses their Django session between leaving our app to\n# sign in with OAuth, and coming back into our app from the OAuth redirect.\n# This is because `Strict` disallows our cookie being sent from an external\n# domain and so the session cookie is lost.\n#\n# `Lax` allows the cookie to travel with the user and be sent back to us by the\n# OAuth server, as long as the request is \"safe\" i.e. GET\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_NAME = \"_benefitssessionid\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\n# the NGINX reverse proxy sits in front of the application in deployed environments\n# SSL terminates before getting to Django, and NGINX adds this header to indicate\n# if the original request was secure or not\n#\n# See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header\nif not DEBUG:\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"benefits.core.context_processors.analytics\",\n \"benefits.core.context_processors.authentication\",\n \"benefits.core.context_processors.recaptcha\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"django.db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# OAuth configuration\n\nOAUTH_AUTHORITY = os.environ.get(\"DJANGO_OAUTH_AUTHORITY\", \"http://example.com\")\nOAUTH_CLIENT_NAME = os.environ.get(\"DJANGO_OAUTH_CLIENT_NAME\", \"benefits-oauth-client-name\")\nOAUTH_CLIENT_ID = os.environ.get(\"DJANGO_OAUTH_CLIENT_ID\", \"benefits-oauth-client-id\")\n\nif OAUTH_CLIENT_NAME:\n AUTHLIB_OAUTH_CLIENTS = {\n OAUTH_CLIENT_NAME: {\n \"client_id\": OAUTH_CLIENT_ID,\n \"server_metadata_url\": f\"{OAUTH_AUTHORITY}/.well-known/openid-configuration\",\n \"client_kwargs\": {\"code_challenge_method\": \"S256\", \"scope\": \"openid\"},\n }\n }\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# rate limit configuration\n\n# number of requests allowed in the given period\nRATE_LIMIT = int(os.environ.get(\"DJANGO_RATE_LIMIT\", 5))\n\n# HTTP request methods to rate limit\nRATE_LIMIT_METHODS = os.environ.get(\"DJANGO_RATE_LIMIT_METHODS\", \"POST\").upper().split(\",\")\n\n# number of seconds before additional requests are denied\nRATE_LIMIT_PERIOD = int(os.environ.get(\"DJANGO_RATE_LIMIT_PERIOD\", 60))\n\n# Rate Limit feature flag\nRATE_LIMIT_ENABLED = all((RATE_LIMIT > 0, len(RATE_LIMIT_METHODS) > 0, RATE_LIMIT_PERIOD > 0))\n\n# reCAPTCHA configuration\n\nRECAPTCHA_API_URL = os.environ.get(\"DJANGO_RECAPTCHA_API_URL\", \"https://www.google.com/recaptcha/api.js\")\nRECAPTCHA_SITE_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SITE_KEY\")\nRECAPTCHA_SECRET_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SECRET_KEY\")\nRECAPTCHA_VERIFY_URL = os.environ.get(\"DJANGO_RECAPTCHA_VERIFY_URL\", \"https://www.google.com/recaptcha/api/siteverify\")\nRECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nCSP_CONNECT_SRC = [\"'self'\", \"https://api.amplitude.com/\"]\nenv_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\nCSP_CONNECT_SRC.extend(env_connect_src)\n\nCSP_FONT_SRC = [\"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\nenv_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\nCSP_FONT_SRC.extend(env_font_src)\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\n\nCSP_FRAME_SRC = [\"'none'\"]\nenv_frame_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FRAME_SRC\", \"\").split(\",\"))\nCSP_FRAME_SRC.extend(env_frame_src)\nif RECAPTCHA_ENABLED:\n CSP_FRAME_SRC.append(\"https://www.google.com\")\n\n\nCSP_SCRIPT_SRC = [\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/cdt/statetemplate/\",\n \"https://cdn.amplitude.com/libs/\",\n \"https://code.jquery.com/\",\n \"*.littlepay.com\",\n]\nenv_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\nCSP_SCRIPT_SRC.extend(env_script_src)\nif RECAPTCHA_ENABLED:\n CSP_SCRIPT_SRC.extend([\"https://www.google.com/recaptcha/\", \"https://www.gstatic.com/recaptcha/releases/\"])\n\nCSP_STYLE_SRC = [\n \"'self'\",\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/cdt/statetemplate/\",\n \"https://fonts.googleapis.com/css\",\n]\nenv_style_src = _filter_empty(os.environ.get(\"DJANGO_CSP_STYLE_SRC\", \"\").split(\",\"))\nCSP_STYLE_SRC.extend(env_style_src)\n", "path": "benefits/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n\ndef _filter_empty(ls):\n return [s for s in ls if s]\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"DJANGO_SECRET_KEY\", \"secret\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = _filter_empty(os.environ.get(\"DJANGO_ALLOWED_HOSTS\", \"localhost,127.0.0.1\").split(\",\"))\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n \"benefits.oauth\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nif DEBUG:\n MIDDLEWARE.extend([\"benefits.core.middleware.DebugSession\"])\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\nCSRF_TRUSTED_ORIGINS = _filter_empty(os.environ.get(\"DJANGO_TRUSTED_ORIGINS\", \"http://localhost,http://127.0.0.1\").split(\",\"))\n\n# With `Strict`, the user loses their Django session between leaving our app to\n# sign in with OAuth, and coming back into our app from the OAuth redirect.\n# This is because `Strict` disallows our cookie being sent from an external\n# domain and so the session cookie is lost.\n#\n# `Lax` allows the cookie to travel with the user and be sent back to us by the\n# OAuth server, as long as the request is \"safe\" i.e. GET\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_NAME = \"_benefitssessionid\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\n# the NGINX reverse proxy sits in front of the application in deployed environments\n# SSL terminates before getting to Django, and NGINX adds this header to indicate\n# if the original request was secure or not\n#\n# See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header\nif not DEBUG:\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"benefits.core.context_processors.analytics\",\n \"benefits.core.context_processors.authentication\",\n \"benefits.core.context_processors.recaptcha\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"django.db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# OAuth configuration\n\nOAUTH_AUTHORITY = os.environ.get(\"DJANGO_OAUTH_AUTHORITY\", \"http://example.com\")\nOAUTH_CLIENT_NAME = os.environ.get(\"DJANGO_OAUTH_CLIENT_NAME\", \"benefits-oauth-client-name\")\nOAUTH_CLIENT_ID = os.environ.get(\"DJANGO_OAUTH_CLIENT_ID\", \"benefits-oauth-client-id\")\n\nif OAUTH_CLIENT_NAME:\n AUTHLIB_OAUTH_CLIENTS = {\n OAUTH_CLIENT_NAME: {\n \"client_id\": OAUTH_CLIENT_ID,\n \"server_metadata_url\": f\"{OAUTH_AUTHORITY}/.well-known/openid-configuration\",\n \"client_kwargs\": {\"code_challenge_method\": \"S256\", \"scope\": \"openid\"},\n }\n }\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# rate limit configuration\n\n# number of requests allowed in the given period\nRATE_LIMIT = int(os.environ.get(\"DJANGO_RATE_LIMIT\", 5))\n\n# HTTP request methods to rate limit\nRATE_LIMIT_METHODS = os.environ.get(\"DJANGO_RATE_LIMIT_METHODS\", \"POST\").upper().split(\",\")\n\n# number of seconds before additional requests are denied\nRATE_LIMIT_PERIOD = int(os.environ.get(\"DJANGO_RATE_LIMIT_PERIOD\", 60))\n\n# Rate Limit feature flag\nRATE_LIMIT_ENABLED = all((RATE_LIMIT > 0, len(RATE_LIMIT_METHODS) > 0, RATE_LIMIT_PERIOD > 0))\n\n# reCAPTCHA configuration\n\nRECAPTCHA_API_URL = os.environ.get(\"DJANGO_RECAPTCHA_API_URL\", \"https://www.google.com/recaptcha/api.js\")\nRECAPTCHA_SITE_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SITE_KEY\")\nRECAPTCHA_SECRET_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SECRET_KEY\")\nRECAPTCHA_VERIFY_URL = os.environ.get(\"DJANGO_RECAPTCHA_VERIFY_URL\", \"https://www.google.com/recaptcha/api/siteverify\")\nRECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nCSP_CONNECT_SRC = [\"'self'\", \"https://api.amplitude.com/\"]\nenv_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\nCSP_CONNECT_SRC.extend(env_connect_src)\n\nCSP_FONT_SRC = [\"'self'\", \"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\nenv_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\nCSP_FONT_SRC.extend(env_font_src)\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\n\nCSP_FRAME_SRC = [\"'none'\"]\nenv_frame_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FRAME_SRC\", \"\").split(\",\"))\nCSP_FRAME_SRC.extend(env_frame_src)\nif RECAPTCHA_ENABLED:\n CSP_FRAME_SRC.append(\"https://www.google.com\")\n\n\nCSP_SCRIPT_SRC = [\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/cdt/statetemplate/\",\n \"https://cdn.amplitude.com/libs/\",\n \"https://code.jquery.com/\",\n \"*.littlepay.com\",\n]\nenv_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\nCSP_SCRIPT_SRC.extend(env_script_src)\nif RECAPTCHA_ENABLED:\n CSP_SCRIPT_SRC.extend([\"https://www.google.com/recaptcha/\", \"https://www.gstatic.com/recaptcha/releases/\"])\n\nCSP_STYLE_SRC = [\n \"'self'\",\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/cdt/statetemplate/\",\n \"https://fonts.googleapis.com/css\",\n]\nenv_style_src = _filter_empty(os.environ.get(\"DJANGO_CSP_STYLE_SRC\", \"\").split(\",\"))\nCSP_STYLE_SRC.extend(env_style_src)\n", "path": "benefits/settings.py"}]}
| 3,713 | 165 |
gh_patches_debug_35523
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-557
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError using SentryAsgiMiddleware with FastAPI/Starlette app
I cannot get `SentryAsgiMiddleware` to work with our FastAPI app. We tried to follow the example in the [Sentry docs](https://docs.sentry.io/platforms/python/asgi/), so the app module basically looks like this:
```python
from fastapi import FastAPI
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
...
app = FastAPI()
@app.post()
...
app = SentryAsgiMiddleware(app)
```
This gives an error on all requests, see the following stack trace:
```python
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 385, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/usr/local/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/usr/local/lib/python3.7/site-packages/uvicorn/middleware/asgi2.py", line 7, in __call__
await instance(receive, send)
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 54, in run_asgi2
scope, lambda: self.app(scope)(receive, send)
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 93, in _run_app
raise exc from None
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 90, in _run_app
return await callback()
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 54, in <lambda>
scope, lambda: self.app(scope)(receive, send)
TypeError: __call__() missing 2 required positional arguments: 'receive' and 'send'
```
Library versions:
- python==3.7.5
- sentry-sdk==0.13.2
- uvicorn==0.10.8
- fastapi==0.42.0
- starlette==0.12.9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/asgi.py`
Content:
```
1 """
2 An ASGI middleware.
3
4 Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
5 """
6
7 import functools
8 import urllib
9
10 from sentry_sdk._types import MYPY
11 from sentry_sdk.hub import Hub, _should_send_default_pii
12 from sentry_sdk.integrations._wsgi_common import _filter_headers
13 from sentry_sdk.utils import ContextVar, event_from_exception, transaction_from_function
14 from sentry_sdk.tracing import Span
15
16 if MYPY:
17 from typing import Dict
18 from typing import Any
19 from typing import Optional
20
21 from sentry_sdk._types import Event, Hint
22
23
24 _asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
25
26
27 def _capture_exception(hub, exc):
28 # type: (Hub, Any) -> None
29
30 # Check client here as it might have been unset while streaming response
31 if hub.client is not None:
32 event, hint = event_from_exception(
33 exc,
34 client_options=hub.client.options,
35 mechanism={"type": "asgi", "handled": False},
36 )
37 hub.capture_event(event, hint=hint)
38
39
40 class SentryAsgiMiddleware:
41 __slots__ = ("app",)
42
43 def __init__(self, app):
44 # type: (Any) -> None
45 self.app = app
46
47 def __call__(self, scope, receive=None, send=None):
48 # type: (Any, Any, Any) -> Any
49 if receive is None or send is None:
50
51 async def run_asgi2(receive, send):
52 # type: (Any, Any) -> Any
53 return await self._run_app(
54 scope, lambda: self.app(scope)(receive, send)
55 )
56
57 return run_asgi2
58 else:
59 return self._run_app(scope, lambda: self.app(scope, receive, send))
60
61 async def _run_app(self, scope, callback):
62 # type: (Any, Any) -> Any
63 if _asgi_middleware_applied.get(False):
64 return await callback()
65
66 _asgi_middleware_applied.set(True)
67 try:
68 hub = Hub(Hub.current)
69 with hub:
70 with hub.configure_scope() as sentry_scope:
71 sentry_scope.clear_breadcrumbs()
72 sentry_scope._name = "asgi"
73 processor = functools.partial(
74 self.event_processor, asgi_scope=scope
75 )
76 sentry_scope.add_event_processor(processor)
77
78 if scope["type"] in ("http", "websocket"):
79 span = Span.continue_from_headers(dict(scope["headers"]))
80 span.op = "{}.server".format(scope["type"])
81 else:
82 span = Span()
83 span.op = "asgi.server"
84
85 span.set_tag("asgi.type", scope["type"])
86 span.transaction = "generic ASGI request"
87
88 with hub.start_span(span) as span:
89 try:
90 return await callback()
91 except Exception as exc:
92 _capture_exception(hub, exc)
93 raise exc from None
94 finally:
95 _asgi_middleware_applied.set(False)
96
97 def event_processor(self, event, hint, asgi_scope):
98 # type: (Event, Hint, Any) -> Optional[Event]
99 request_info = event.get("request", {})
100
101 if asgi_scope["type"] in ("http", "websocket"):
102 request_info["url"] = self.get_url(asgi_scope)
103 request_info["method"] = asgi_scope["method"]
104 request_info["headers"] = _filter_headers(self.get_headers(asgi_scope))
105 request_info["query_string"] = self.get_query(asgi_scope)
106
107 if asgi_scope.get("client") and _should_send_default_pii():
108 request_info["env"] = {"REMOTE_ADDR": asgi_scope["client"][0]}
109
110 if asgi_scope.get("endpoint"):
111 # Webframeworks like Starlette mutate the ASGI env once routing is
112 # done, which is sometime after the request has started. If we have
113 # an endpoint, overwrite our path-based transaction name.
114 event["transaction"] = self.get_transaction(asgi_scope)
115
116 event["request"] = request_info
117
118 return event
119
120 def get_url(self, scope):
121 # type: (Any) -> str
122 """
123 Extract URL from the ASGI scope, without also including the querystring.
124 """
125 scheme = scope.get("scheme", "http")
126 server = scope.get("server", None)
127 path = scope.get("root_path", "") + scope["path"]
128
129 for key, value in scope["headers"]:
130 if key == b"host":
131 host_header = value.decode("latin-1")
132 return "%s://%s%s" % (scheme, host_header, path)
133
134 if server is not None:
135 host, port = server
136 default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
137 if port != default_port:
138 return "%s://%s:%s%s" % (scheme, host, port, path)
139 return "%s://%s%s" % (scheme, host, path)
140 return path
141
142 def get_query(self, scope):
143 # type: (Any) -> Any
144 """
145 Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
146 """
147 return urllib.parse.unquote(scope["query_string"].decode("latin-1"))
148
149 def get_headers(self, scope):
150 # type: (Any) -> Dict[str, Any]
151 """
152 Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
153 """
154 headers = {} # type: Dict[str, str]
155 for raw_key, raw_value in scope["headers"]:
156 key = raw_key.decode("latin-1")
157 value = raw_value.decode("latin-1")
158 if key in headers:
159 headers[key] = headers[key] + ", " + value
160 else:
161 headers[key] = value
162 return headers
163
164 def get_transaction(self, scope):
165 # type: (Any) -> Optional[str]
166 """
167 Return a transaction string to identify the routed endpoint.
168 """
169 return transaction_from_function(scope["endpoint"])
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/integrations/asgi.py b/sentry_sdk/integrations/asgi.py
--- a/sentry_sdk/integrations/asgi.py
+++ b/sentry_sdk/integrations/asgi.py
@@ -4,7 +4,9 @@
Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
"""
+import asyncio
import functools
+import inspect
import urllib
from sentry_sdk._types import MYPY
@@ -17,6 +19,7 @@
from typing import Dict
from typing import Any
from typing import Optional
+ from typing import Callable
from sentry_sdk._types import Event, Hint
@@ -37,26 +40,45 @@
hub.capture_event(event, hint=hint)
+def _looks_like_asgi3(app):
+ # type: (Any) -> bool
+ """
+ Try to figure out if an application object supports ASGI3.
+
+ This is how uvicorn figures out the application version as well.
+ """
+ if inspect.isclass(app):
+ return hasattr(app, "__await__")
+ elif inspect.isfunction(app):
+ return asyncio.iscoroutinefunction(app)
+ else:
+ call = getattr(app, "__call__", None) # noqa
+ return asyncio.iscoroutinefunction(call)
+
+
class SentryAsgiMiddleware:
- __slots__ = ("app",)
+ __slots__ = ("app", "__call__")
def __init__(self, app):
# type: (Any) -> None
self.app = app
- def __call__(self, scope, receive=None, send=None):
- # type: (Any, Any, Any) -> Any
- if receive is None or send is None:
+ if _looks_like_asgi3(app):
+ self.__call__ = self._run_asgi3 # type: Callable[..., Any]
+ else:
+ self.__call__ = self._run_asgi2
- async def run_asgi2(receive, send):
- # type: (Any, Any) -> Any
- return await self._run_app(
- scope, lambda: self.app(scope)(receive, send)
- )
+ def _run_asgi2(self, scope):
+ # type: (Any) -> Any
+ async def inner(receive, send):
+ # type: (Any, Any) -> Any
+ return await self._run_app(scope, lambda: self.app(scope)(receive, send))
- return run_asgi2
- else:
- return self._run_app(scope, lambda: self.app(scope, receive, send))
+ return inner
+
+ async def _run_asgi3(self, scope, receive, send):
+ # type: (Any, Any, Any) -> Any
+ return await self._run_app(scope, lambda: self.app(scope, receive, send))
async def _run_app(self, scope, callback):
# type: (Any, Any) -> Any
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/asgi.py b/sentry_sdk/integrations/asgi.py\n--- a/sentry_sdk/integrations/asgi.py\n+++ b/sentry_sdk/integrations/asgi.py\n@@ -4,7 +4,9 @@\n Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.\n \"\"\"\n \n+import asyncio\n import functools\n+import inspect\n import urllib\n \n from sentry_sdk._types import MYPY\n@@ -17,6 +19,7 @@\n from typing import Dict\n from typing import Any\n from typing import Optional\n+ from typing import Callable\n \n from sentry_sdk._types import Event, Hint\n \n@@ -37,26 +40,45 @@\n hub.capture_event(event, hint=hint)\n \n \n+def _looks_like_asgi3(app):\n+ # type: (Any) -> bool\n+ \"\"\"\n+ Try to figure out if an application object supports ASGI3.\n+\n+ This is how uvicorn figures out the application version as well.\n+ \"\"\"\n+ if inspect.isclass(app):\n+ return hasattr(app, \"__await__\")\n+ elif inspect.isfunction(app):\n+ return asyncio.iscoroutinefunction(app)\n+ else:\n+ call = getattr(app, \"__call__\", None) # noqa\n+ return asyncio.iscoroutinefunction(call)\n+\n+\n class SentryAsgiMiddleware:\n- __slots__ = (\"app\",)\n+ __slots__ = (\"app\", \"__call__\")\n \n def __init__(self, app):\n # type: (Any) -> None\n self.app = app\n \n- def __call__(self, scope, receive=None, send=None):\n- # type: (Any, Any, Any) -> Any\n- if receive is None or send is None:\n+ if _looks_like_asgi3(app):\n+ self.__call__ = self._run_asgi3 # type: Callable[..., Any]\n+ else:\n+ self.__call__ = self._run_asgi2\n \n- async def run_asgi2(receive, send):\n- # type: (Any, Any) -> Any\n- return await self._run_app(\n- scope, lambda: self.app(scope)(receive, send)\n- )\n+ def _run_asgi2(self, scope):\n+ # type: (Any) -> Any\n+ async def inner(receive, send):\n+ # type: (Any, Any) -> Any\n+ return await self._run_app(scope, lambda: self.app(scope)(receive, send))\n \n- return run_asgi2\n- else:\n- return self._run_app(scope, lambda: self.app(scope, receive, send))\n+ return inner\n+\n+ async def _run_asgi3(self, scope, receive, send):\n+ # type: (Any, Any, Any) -> Any\n+ return await self._run_app(scope, lambda: self.app(scope, receive, send))\n \n async def _run_app(self, scope, callback):\n # type: (Any, Any) -> Any\n", "issue": "TypeError using SentryAsgiMiddleware with FastAPI/Starlette app\nI cannot get `SentryAsgiMiddleware` to work with our FastAPI app. We tried to follow the example in the [Sentry docs](https://docs.sentry.io/platforms/python/asgi/), so the app module basically looks like this:\r\n\r\n```python\r\nfrom fastapi import FastAPI\r\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\r\n...\r\napp = FastAPI()\r\n\r\[email protected]()\r\n...\r\n\r\napp = SentryAsgiMiddleware(app)\r\n```\r\n\r\nThis gives an error on all requests, see the following stack trace:\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 385, in run_asgi\r\n result = await app(self.scope, self.receive, self.send)\r\n File \"/usr/local/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py\", line 45, in __call__\r\n return await self.app(scope, receive, send)\r\n File \"/usr/local/lib/python3.7/site-packages/uvicorn/middleware/asgi2.py\", line 7, in __call__\r\n await instance(receive, send)\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py\", line 54, in run_asgi2\r\n scope, lambda: self.app(scope)(receive, send)\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py\", line 93, in _run_app\r\n raise exc from None\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py\", line 90, in _run_app\r\n return await callback()\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py\", line 54, in <lambda>\r\n scope, lambda: self.app(scope)(receive, send)\r\nTypeError: __call__() missing 2 required positional arguments: 'receive' and 'send'\r\n```\r\n\r\nLibrary versions:\r\n\r\n- python==3.7.5\r\n- sentry-sdk==0.13.2\r\n- uvicorn==0.10.8\r\n- fastapi==0.42.0\r\n- starlette==0.12.9\n", "before_files": [{"content": "\"\"\"\nAn ASGI middleware.\n\nBased on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.\n\"\"\"\n\nimport functools\nimport urllib\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations._wsgi_common import _filter_headers\nfrom sentry_sdk.utils import ContextVar, event_from_exception, transaction_from_function\nfrom sentry_sdk.tracing import Span\n\nif MYPY:\n from typing import Dict\n from typing import Any\n from typing import Optional\n\n from sentry_sdk._types import Event, Hint\n\n\n_asgi_middleware_applied = ContextVar(\"sentry_asgi_middleware_applied\")\n\n\ndef _capture_exception(hub, exc):\n # type: (Hub, Any) -> None\n\n # Check client here as it might have been unset while streaming response\n if hub.client is not None:\n event, hint = event_from_exception(\n exc,\n client_options=hub.client.options,\n mechanism={\"type\": \"asgi\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\nclass SentryAsgiMiddleware:\n __slots__ = (\"app\",)\n\n def __init__(self, app):\n # type: (Any) -> None\n self.app = app\n\n def __call__(self, scope, receive=None, send=None):\n # type: (Any, Any, Any) -> Any\n if receive is None or send is None:\n\n async def run_asgi2(receive, send):\n # type: (Any, Any) -> Any\n return await self._run_app(\n scope, lambda: self.app(scope)(receive, send)\n )\n\n return run_asgi2\n else:\n return self._run_app(scope, lambda: self.app(scope, receive, send))\n\n async def _run_app(self, scope, callback):\n # type: (Any, Any) -> Any\n if _asgi_middleware_applied.get(False):\n return await callback()\n\n _asgi_middleware_applied.set(True)\n try:\n hub = Hub(Hub.current)\n with hub:\n with hub.configure_scope() as sentry_scope:\n sentry_scope.clear_breadcrumbs()\n sentry_scope._name = \"asgi\"\n processor = functools.partial(\n self.event_processor, asgi_scope=scope\n )\n sentry_scope.add_event_processor(processor)\n\n if scope[\"type\"] in (\"http\", \"websocket\"):\n span = Span.continue_from_headers(dict(scope[\"headers\"]))\n span.op = \"{}.server\".format(scope[\"type\"])\n else:\n span = Span()\n span.op = \"asgi.server\"\n\n span.set_tag(\"asgi.type\", scope[\"type\"])\n span.transaction = \"generic ASGI request\"\n\n with hub.start_span(span) as span:\n try:\n return await callback()\n except Exception as exc:\n _capture_exception(hub, exc)\n raise exc from None\n finally:\n _asgi_middleware_applied.set(False)\n\n def event_processor(self, event, hint, asgi_scope):\n # type: (Event, Hint, Any) -> Optional[Event]\n request_info = event.get(\"request\", {})\n\n if asgi_scope[\"type\"] in (\"http\", \"websocket\"):\n request_info[\"url\"] = self.get_url(asgi_scope)\n request_info[\"method\"] = asgi_scope[\"method\"]\n request_info[\"headers\"] = _filter_headers(self.get_headers(asgi_scope))\n request_info[\"query_string\"] = self.get_query(asgi_scope)\n\n if asgi_scope.get(\"client\") and _should_send_default_pii():\n request_info[\"env\"] = {\"REMOTE_ADDR\": asgi_scope[\"client\"][0]}\n\n if asgi_scope.get(\"endpoint\"):\n # Webframeworks like Starlette mutate the ASGI env once routing is\n # done, which is sometime after the request has started. If we have\n # an endpoint, overwrite our path-based transaction name.\n event[\"transaction\"] = self.get_transaction(asgi_scope)\n\n event[\"request\"] = request_info\n\n return event\n\n def get_url(self, scope):\n # type: (Any) -> str\n \"\"\"\n Extract URL from the ASGI scope, without also including the querystring.\n \"\"\"\n scheme = scope.get(\"scheme\", \"http\")\n server = scope.get(\"server\", None)\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n\n for key, value in scope[\"headers\"]:\n if key == b\"host\":\n host_header = value.decode(\"latin-1\")\n return \"%s://%s%s\" % (scheme, host_header, path)\n\n if server is not None:\n host, port = server\n default_port = {\"http\": 80, \"https\": 443, \"ws\": 80, \"wss\": 443}[scheme]\n if port != default_port:\n return \"%s://%s:%s%s\" % (scheme, host, port, path)\n return \"%s://%s%s\" % (scheme, host, path)\n return path\n\n def get_query(self, scope):\n # type: (Any) -> Any\n \"\"\"\n Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n return urllib.parse.unquote(scope[\"query_string\"].decode(\"latin-1\"))\n\n def get_headers(self, scope):\n # type: (Any) -> Dict[str, Any]\n \"\"\"\n Extract headers from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n headers = {} # type: Dict[str, str]\n for raw_key, raw_value in scope[\"headers\"]:\n key = raw_key.decode(\"latin-1\")\n value = raw_value.decode(\"latin-1\")\n if key in headers:\n headers[key] = headers[key] + \", \" + value\n else:\n headers[key] = value\n return headers\n\n def get_transaction(self, scope):\n # type: (Any) -> Optional[str]\n \"\"\"\n Return a transaction string to identify the routed endpoint.\n \"\"\"\n return transaction_from_function(scope[\"endpoint\"])\n", "path": "sentry_sdk/integrations/asgi.py"}], "after_files": [{"content": "\"\"\"\nAn ASGI middleware.\n\nBased on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.\n\"\"\"\n\nimport asyncio\nimport functools\nimport inspect\nimport urllib\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations._wsgi_common import _filter_headers\nfrom sentry_sdk.utils import ContextVar, event_from_exception, transaction_from_function\nfrom sentry_sdk.tracing import Span\n\nif MYPY:\n from typing import Dict\n from typing import Any\n from typing import Optional\n from typing import Callable\n\n from sentry_sdk._types import Event, Hint\n\n\n_asgi_middleware_applied = ContextVar(\"sentry_asgi_middleware_applied\")\n\n\ndef _capture_exception(hub, exc):\n # type: (Hub, Any) -> None\n\n # Check client here as it might have been unset while streaming response\n if hub.client is not None:\n event, hint = event_from_exception(\n exc,\n client_options=hub.client.options,\n mechanism={\"type\": \"asgi\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _looks_like_asgi3(app):\n # type: (Any) -> bool\n \"\"\"\n Try to figure out if an application object supports ASGI3.\n\n This is how uvicorn figures out the application version as well.\n \"\"\"\n if inspect.isclass(app):\n return hasattr(app, \"__await__\")\n elif inspect.isfunction(app):\n return asyncio.iscoroutinefunction(app)\n else:\n call = getattr(app, \"__call__\", None) # noqa\n return asyncio.iscoroutinefunction(call)\n\n\nclass SentryAsgiMiddleware:\n __slots__ = (\"app\", \"__call__\")\n\n def __init__(self, app):\n # type: (Any) -> None\n self.app = app\n\n if _looks_like_asgi3(app):\n self.__call__ = self._run_asgi3 # type: Callable[..., Any]\n else:\n self.__call__ = self._run_asgi2\n\n def _run_asgi2(self, scope):\n # type: (Any) -> Any\n async def inner(receive, send):\n # type: (Any, Any) -> Any\n return await self._run_app(scope, lambda: self.app(scope)(receive, send))\n\n return inner\n\n async def _run_asgi3(self, scope, receive, send):\n # type: (Any, Any, Any) -> Any\n return await self._run_app(scope, lambda: self.app(scope, receive, send))\n\n async def _run_app(self, scope, callback):\n # type: (Any, Any) -> Any\n if _asgi_middleware_applied.get(False):\n return await callback()\n\n _asgi_middleware_applied.set(True)\n try:\n hub = Hub(Hub.current)\n with hub:\n with hub.configure_scope() as sentry_scope:\n sentry_scope.clear_breadcrumbs()\n sentry_scope._name = \"asgi\"\n processor = functools.partial(\n self.event_processor, asgi_scope=scope\n )\n sentry_scope.add_event_processor(processor)\n\n if scope[\"type\"] in (\"http\", \"websocket\"):\n span = Span.continue_from_headers(dict(scope[\"headers\"]))\n span.op = \"{}.server\".format(scope[\"type\"])\n else:\n span = Span()\n span.op = \"asgi.server\"\n\n span.set_tag(\"asgi.type\", scope[\"type\"])\n span.transaction = \"generic ASGI request\"\n\n with hub.start_span(span) as span:\n try:\n return await callback()\n except Exception as exc:\n _capture_exception(hub, exc)\n raise exc from None\n finally:\n _asgi_middleware_applied.set(False)\n\n def event_processor(self, event, hint, asgi_scope):\n # type: (Event, Hint, Any) -> Optional[Event]\n request_info = event.get(\"request\", {})\n\n if asgi_scope[\"type\"] in (\"http\", \"websocket\"):\n request_info[\"url\"] = self.get_url(asgi_scope)\n request_info[\"method\"] = asgi_scope[\"method\"]\n request_info[\"headers\"] = _filter_headers(self.get_headers(asgi_scope))\n request_info[\"query_string\"] = self.get_query(asgi_scope)\n\n if asgi_scope.get(\"client\") and _should_send_default_pii():\n request_info[\"env\"] = {\"REMOTE_ADDR\": asgi_scope[\"client\"][0]}\n\n if asgi_scope.get(\"endpoint\"):\n # Webframeworks like Starlette mutate the ASGI env once routing is\n # done, which is sometime after the request has started. If we have\n # an endpoint, overwrite our path-based transaction name.\n event[\"transaction\"] = self.get_transaction(asgi_scope)\n\n event[\"request\"] = request_info\n\n return event\n\n def get_url(self, scope):\n # type: (Any) -> str\n \"\"\"\n Extract URL from the ASGI scope, without also including the querystring.\n \"\"\"\n scheme = scope.get(\"scheme\", \"http\")\n server = scope.get(\"server\", None)\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n\n for key, value in scope[\"headers\"]:\n if key == b\"host\":\n host_header = value.decode(\"latin-1\")\n return \"%s://%s%s\" % (scheme, host_header, path)\n\n if server is not None:\n host, port = server\n default_port = {\"http\": 80, \"https\": 443, \"ws\": 80, \"wss\": 443}[scheme]\n if port != default_port:\n return \"%s://%s:%s%s\" % (scheme, host, port, path)\n return \"%s://%s%s\" % (scheme, host, path)\n return path\n\n def get_query(self, scope):\n # type: (Any) -> Any\n \"\"\"\n Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n return urllib.parse.unquote(scope[\"query_string\"].decode(\"latin-1\"))\n\n def get_headers(self, scope):\n # type: (Any) -> Dict[str, Any]\n \"\"\"\n Extract headers from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n headers = {} # type: Dict[str, str]\n for raw_key, raw_value in scope[\"headers\"]:\n key = raw_key.decode(\"latin-1\")\n value = raw_value.decode(\"latin-1\")\n if key in headers:\n headers[key] = headers[key] + \", \" + value\n else:\n headers[key] = value\n return headers\n\n def get_transaction(self, scope):\n # type: (Any) -> Optional[str]\n \"\"\"\n Return a transaction string to identify the routed endpoint.\n \"\"\"\n return transaction_from_function(scope[\"endpoint\"])\n", "path": "sentry_sdk/integrations/asgi.py"}]}
| 2,546 | 690 |
gh_patches_debug_3891
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-580
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Empty md5 sum errors
When md5 sum is empty in DVC-file:
```
$ dvc checkout
'data/eval.txt': cache file not found
'data/model.p': cache file not found
```
This happens if I resolve DVC-file merge conflicts by replacing md5 checksums to empty.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/output.py`
Content:
```
1 import os
2 import stat
3 import json
4 import shutil
5 from checksumdir import dirhash
6
7 from dvc.system import System
8 from dvc.utils import file_md5
9 from dvc.exceptions import DvcException
10 from dvc.logger import Logger
11
12
13 class OutputError(DvcException):
14 pass
15
16
17 class CmdOutputError(DvcException):
18 def __init__(self, path, msg):
19 super(CmdOutputError, self).__init__('Output file \'{}\' error: {}'.format(path, msg))
20
21
22 class CmdOutputNoCacheError(CmdOutputError):
23 def __init__(self, path):
24 super(CmdOutputNoCacheError, self).__init__(path, 'no cache')
25
26
27 class CmdOutputOutsideOfRepoError(CmdOutputError):
28 def __init__(self, path):
29 super(CmdOutputOutsideOfRepoError, self).__init__(path, 'outside of repository')
30
31
32 class CmdOutputDoesNotExistError(CmdOutputError):
33 def __init__(self, path):
34 super(CmdOutputDoesNotExistError, self).__init__(path, 'does not exist')
35
36
37 class CmdOutputIsNotFileOrDirError(CmdOutputError):
38 def __init__(self, path):
39 super(CmdOutputIsNotFileOrDirError, self).__init__(path, 'not a file or directory')
40
41
42 class CmdOutputAlreadyTrackedError(CmdOutputError):
43 def __init__(self, path):
44 super(CmdOutputAlreadyTrackedError, self).__init__(path, 'already tracked by scm(e.g. git)')
45
46
47 class Dependency(object):
48 PARAM_RELPATH = 'relpath'
49 PARAM_PATH = 'path'
50 PARAM_MD5 = 'md5'
51 MD5_DIR_SUFFIX = '.dir'
52
53 def __init__(self, project, path, md5=None):
54 self.project = project
55 self.path = os.path.abspath(os.path.realpath(path))
56
57 if not self.path.startswith(self.project.root_dir):
58 raise CmdOutputOutsideOfRepoError(self.rel_path)
59
60 self.md5 = md5
61
62 @property
63 def rel_path(self):
64 return os.path.relpath(self.path)
65
66 def _changed_md5(self):
67 if not os.path.exists(self.path):
68 return True
69
70 return self.project.state.changed(self.path, self.md5)
71
72 def changed(self):
73 return self._changed_md5()
74
75 def status(self):
76 if self.changed():
77 #FIXME better msgs
78 return {self.rel_path: 'changed'}
79 return {}
80
81 @staticmethod
82 def is_dir_cache(cache):
83 return cache.endswith(Output.MD5_DIR_SUFFIX)
84
85 def save(self):
86 if not os.path.exists(self.path):
87 raise CmdOutputDoesNotExistError(self.rel_path)
88
89 if not os.path.isfile(self.path) and not os.path.isdir(self.path):
90 raise CmdOutputIsNotFileOrDirError(self.rel_path)
91
92 self.md5 = self.project.state.update(self.path)
93
94 def dumpd(self, cwd):
95 return {
96 Output.PARAM_PATH: os.path.relpath(self.path, cwd),
97 Output.PARAM_MD5: self.md5,
98 }
99
100 @classmethod
101 def loadd(cls, project, d, cwd=os.curdir):
102 path = os.path.join(cwd, d[Output.PARAM_PATH])
103 md5 = d.get(Output.PARAM_MD5, None)
104 return cls(project, path, md5=md5)
105
106 @classmethod
107 def loadd_from(cls, project, d_list, cwd=os.curdir):
108 return [cls.loadd(project, x, cwd=cwd) for x in d_list]
109
110 @classmethod
111 def loads(cls, project, s, cwd=os.curdir):
112 return cls(project, os.path.join(cwd, s), md5=None)
113
114 @classmethod
115 def loads_from(cls, project, s_list, cwd=os.curdir):
116 return [cls.loads(project, x, cwd=cwd) for x in s_list]
117
118 def stage(self):
119 for stage in self.project.stages():
120 for out in stage.outs:
121 if self.path == out.path:
122 return stage
123 return None
124
125
126 class Output(Dependency):
127 PARAM_CACHE = 'cache'
128
129 def __init__(self, project, path, md5=None, use_cache=True):
130 super(Output, self).__init__(project, path, md5=md5)
131 self.use_cache = use_cache
132
133 @property
134 def cache(self):
135 if not self.md5:
136 return None
137
138 return self.project.cache.get(self.md5)
139
140 @property
141 def rel_cache(self):
142 if not self.cache:
143 return None
144
145 return os.path.relpath(self.cache)
146
147 def dumpd(self, cwd):
148 ret = super(Output, self).dumpd(cwd)
149 ret[Output.PARAM_CACHE] = self.use_cache
150 return ret
151
152 @classmethod
153 def loadd(cls, project, d, cwd=os.curdir):
154 ret = super(Output, cls).loadd(project, d, cwd=cwd)
155 ret.use_cache = d.get(Output.PARAM_CACHE, True)
156 return ret
157
158 @classmethod
159 def loads(cls, project, s, use_cache=True, cwd=os.curdir):
160 ret = super(Output, cls).loads(project, s, cwd=cwd)
161 ret.use_cache = use_cache
162 return ret
163
164 @classmethod
165 def loads_from(cls, project, s_list, use_cache=False, cwd=os.curdir):
166 return [cls.loads(project, x, use_cache=use_cache, cwd=cwd) for x in s_list]
167
168 def _changed_cache(self, cache):
169 md5 = self.project.state.update(cache)
170 if md5 != self.project.cache.path_to_md5(cache):
171 self.project.logger.warn('Corrupted cache file {}'.format(os.path.relpath(cache)))
172 os.unlink(cache)
173 return True
174
175 return False
176
177 def _changed_file(self, path, cache):
178 if os.path.isfile(path) and \
179 os.path.isfile(cache) and \
180 System.samefile(path, cache) and \
181 not self._changed_cache(cache):
182 return False
183
184 return True
185
186 def _changed_dir(self):
187 if not os.path.isdir(self.path) or not os.path.isfile(self.cache):
188 return True
189
190 dir_info = self._collect_dir() # slow!
191 dir_info_cached = self.load_dir_cache(self.cache) # slow. why?
192
193 if not self.are_dir_info_equal(dir_info, dir_info_cached):
194 return True
195
196 return False
197
198 @staticmethod
199 def are_dir_info_equal(dir_info1, dir_info2):
200 return Output.dir_info_dict(dir_info1) == Output.dir_info_dict(dir_info2)
201
202 @staticmethod
203 def dir_info_dict(dir_info):
204 return {i['relpath']: i['md5'] for i in dir_info}
205
206 def changed(self):
207 if not self.use_cache:
208 ret = super(Output, self).changed()
209 elif not self.cache:
210 ret = True
211 elif self.is_dir_cache(self.cache):
212 ret = self._changed_dir()
213 else:
214 ret = self._changed_file(self.path, self.cache)
215
216 msg = u'Data file or dir \'{}\' with cache \'{}\' '
217 if ret:
218 msg += 'changed'
219 else:
220 msg += 'didn\'t change'
221 self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))
222
223 return ret
224
225 def hardlink(self, src, link):
226 rel_src = os.path.relpath(src)
227 rel_link = os.path.relpath(link)
228 self.project.logger.debug(u'creating hardlink {} -> {}'.format(rel_src, rel_link))
229
230 dname = os.path.dirname(link)
231 if not os.path.exists(dname):
232 os.makedirs(dname)
233
234 System.hardlink(src, link)
235
236 @staticmethod
237 def load_dir_cache(path):
238 if os.path.isabs(path):
239 relpath = os.path.relpath(path)
240 else:
241 relpath = path
242
243 try:
244 with open(path, 'r') as fd:
245 d = json.load(fd)
246 except Exception as exc:
247 msg = u'Failed to load dir cache \'{}\''
248 Logger.error(msg.format(relpath), exc)
249 return []
250
251 if not isinstance(d, list):
252 msg = u'Dir cache file format error \'{}\': skipping the file'
253 Logger.error(msg.format(relpath))
254 return []
255
256 return d
257
258 @staticmethod
259 def get_dir_cache(path):
260 res = {}
261 d = Output.load_dir_cache(path)
262
263 for entry in d:
264 res[entry[Output.PARAM_RELPATH]] = entry[Output.PARAM_MD5]
265
266 return res
267
268 def dir_cache(self):
269 res = {}
270 dir_cache = self.get_dir_cache(self.cache)
271
272 for relpath, md5 in dir_cache.items():
273 res[relpath] = self.project.cache.get(md5)
274
275 return res
276
277 def checkout(self):
278 if not self.use_cache:
279 return
280
281 msg = u'Checking out \'{}\' with cache \'{}\''
282 self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))
283
284 if not self.changed():
285 msg = u'Data file \'{}\' with cache \'{}\' didn\'t change, skipping checkout.'
286 self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))
287 return
288
289 if not self.cache or not os.path.exists(self.cache):
290 self.project.logger.warn(u'\'{}\': cache file not found'.format(self.rel_path))
291 self.remove()
292 return
293
294 if os.path.exists(self.path):
295 msg = u'Data file \'{}\' exists. Removing before checkout'
296 self.project.logger.debug(msg.format(self.rel_path))
297 self.remove()
298
299 if not self.is_dir_cache(self.cache):
300 self.hardlink(self.cache, self.path)
301 return
302
303 for relpath, cache in self.dir_cache().items():
304 path = os.path.join(self.path, relpath)
305 self.hardlink(cache, path)
306
307 def _collect_dir(self):
308 dir_info = []
309
310 for root, dirs, files in os.walk(self.path):
311 for fname in files:
312 path = os.path.join(root, fname)
313 relpath = os.path.relpath(path, self.path)
314
315 md5 = self.project.state.update(path, dump=False)
316 dir_info.append({self.PARAM_RELPATH: relpath, self.PARAM_MD5: md5})
317
318 self.project.state.dump()
319
320 return dir_info
321
322 def _save_dir(self):
323 dname = os.path.dirname(self.cache)
324 dir_info = self._collect_dir()
325
326 for entry in dir_info:
327 md5 = entry[self.PARAM_MD5]
328 relpath = entry[self.PARAM_RELPATH]
329 path = os.path.join(self.path, relpath)
330 cache = self.project.cache.get(md5)
331
332 if os.path.exists(cache):
333 self._remove(path)
334 self.hardlink(cache, path)
335 else:
336 self.hardlink(path, cache)
337
338 if not os.path.isdir(dname):
339 os.makedirs(dname)
340
341 with open(self.cache, 'w+') as fd:
342 json.dump(dir_info, fd)
343
344 def save(self):
345 super(Output, self).save()
346
347 if not self.use_cache:
348 return
349
350 self.project.logger.debug(u'Saving \'{}\' to \'{}\''.format(self.rel_path, self.rel_cache))
351
352 if self.project.scm.is_tracked(self.path):
353 raise CmdOutputAlreadyTrackedError(self.rel_path)
354
355 if not self.changed():
356 return
357
358 if os.path.exists(self.cache):
359 # This means that we already have cache for this data.
360 # We remove data and link it to existing cache to save
361 # some space.
362 msg = u'Cache \'{}\' already exists, performing checkout for \'{}\''
363 self.project.logger.debug(msg.format(self.rel_cache, self.rel_path))
364 self.checkout()
365 return
366
367 if os.path.isdir(self.path):
368 self._save_dir()
369 else:
370 self.hardlink(self.path, self.cache)
371
372 def _remove(self, path):
373 if not os.path.exists(path):
374 return
375
376 self.project.logger.debug(u'Removing \'{}\''.format(os.path.relpath(path)))
377 if os.path.isfile(path):
378 os.unlink(path)
379 else:
380 shutil.rmtree(path)
381
382 def remove(self):
383 self._remove(self.path)
384
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/output.py b/dvc/output.py
--- a/dvc/output.py
+++ b/dvc/output.py
@@ -287,7 +287,8 @@
return
if not self.cache or not os.path.exists(self.cache):
- self.project.logger.warn(u'\'{}\': cache file not found'.format(self.rel_path))
+ if self.cache:
+ self.project.logger.warn(u'\'{}({})\': cache file not found'.format(self.rel_cache, self.rel_path))
self.remove()
return
|
{"golden_diff": "diff --git a/dvc/output.py b/dvc/output.py\n--- a/dvc/output.py\n+++ b/dvc/output.py\n@@ -287,7 +287,8 @@\n return\n \n if not self.cache or not os.path.exists(self.cache):\n- self.project.logger.warn(u'\\'{}\\': cache file not found'.format(self.rel_path))\n+ if self.cache:\n+ self.project.logger.warn(u'\\'{}({})\\': cache file not found'.format(self.rel_cache, self.rel_path))\n self.remove()\n return\n", "issue": "Empty md5 sum errors\nWhen md5 sum is empty in DVC-file:\r\n```\r\n$ dvc checkout\r\n'data/eval.txt': cache file not found\r\n'data/model.p': cache file not found\r\n```\r\n\r\nThis happens if I resolve DVC-file merge conflicts by replacing md5 checksums to empty. \n", "before_files": [{"content": "import os\nimport stat\nimport json\nimport shutil\nfrom checksumdir import dirhash\n\nfrom dvc.system import System\nfrom dvc.utils import file_md5\nfrom dvc.exceptions import DvcException\nfrom dvc.logger import Logger\n\n\nclass OutputError(DvcException):\n pass\n\n\nclass CmdOutputError(DvcException):\n def __init__(self, path, msg):\n super(CmdOutputError, self).__init__('Output file \\'{}\\' error: {}'.format(path, msg))\n\n\nclass CmdOutputNoCacheError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputNoCacheError, self).__init__(path, 'no cache')\n\n\nclass CmdOutputOutsideOfRepoError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputOutsideOfRepoError, self).__init__(path, 'outside of repository')\n\n\nclass CmdOutputDoesNotExistError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputDoesNotExistError, self).__init__(path, 'does not exist')\n\n\nclass CmdOutputIsNotFileOrDirError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputIsNotFileOrDirError, self).__init__(path, 'not a file or directory')\n\n\nclass CmdOutputAlreadyTrackedError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputAlreadyTrackedError, self).__init__(path, 'already tracked by scm(e.g. git)')\n\n\nclass Dependency(object):\n PARAM_RELPATH = 'relpath'\n PARAM_PATH = 'path'\n PARAM_MD5 = 'md5'\n MD5_DIR_SUFFIX = '.dir'\n\n def __init__(self, project, path, md5=None):\n self.project = project\n self.path = os.path.abspath(os.path.realpath(path))\n\n if not self.path.startswith(self.project.root_dir):\n raise CmdOutputOutsideOfRepoError(self.rel_path)\n\n self.md5 = md5\n\n @property\n def rel_path(self):\n return os.path.relpath(self.path)\n\n def _changed_md5(self):\n if not os.path.exists(self.path):\n return True\n\n return self.project.state.changed(self.path, self.md5)\n\n def changed(self):\n return self._changed_md5()\n\n def status(self):\n if self.changed():\n #FIXME better msgs\n return {self.rel_path: 'changed'}\n return {}\n\n @staticmethod\n def is_dir_cache(cache):\n return cache.endswith(Output.MD5_DIR_SUFFIX)\n\n def save(self):\n if not os.path.exists(self.path):\n raise CmdOutputDoesNotExistError(self.rel_path)\n\n if not os.path.isfile(self.path) and not os.path.isdir(self.path):\n raise CmdOutputIsNotFileOrDirError(self.rel_path)\n\n self.md5 = self.project.state.update(self.path)\n\n def dumpd(self, cwd):\n return {\n Output.PARAM_PATH: os.path.relpath(self.path, cwd),\n Output.PARAM_MD5: self.md5,\n }\n\n @classmethod\n def loadd(cls, project, d, cwd=os.curdir):\n path = os.path.join(cwd, d[Output.PARAM_PATH])\n md5 = d.get(Output.PARAM_MD5, None)\n return cls(project, path, md5=md5)\n\n @classmethod\n def loadd_from(cls, project, d_list, cwd=os.curdir):\n return [cls.loadd(project, x, cwd=cwd) for x in d_list]\n\n @classmethod\n def loads(cls, project, s, cwd=os.curdir):\n return cls(project, os.path.join(cwd, s), md5=None)\n\n @classmethod\n def loads_from(cls, project, s_list, cwd=os.curdir):\n return [cls.loads(project, x, cwd=cwd) for x in s_list]\n\n def stage(self):\n for stage in self.project.stages():\n for out in stage.outs:\n if self.path == out.path:\n return stage\n return None\n\n\nclass Output(Dependency):\n PARAM_CACHE = 'cache'\n\n def __init__(self, project, path, md5=None, use_cache=True):\n super(Output, self).__init__(project, path, md5=md5)\n self.use_cache = use_cache\n\n @property\n def cache(self):\n if not self.md5:\n return None\n\n return self.project.cache.get(self.md5)\n\n @property\n def rel_cache(self):\n if not self.cache:\n return None\n\n return os.path.relpath(self.cache)\n\n def dumpd(self, cwd):\n ret = super(Output, self).dumpd(cwd)\n ret[Output.PARAM_CACHE] = self.use_cache\n return ret\n\n @classmethod\n def loadd(cls, project, d, cwd=os.curdir):\n ret = super(Output, cls).loadd(project, d, cwd=cwd)\n ret.use_cache = d.get(Output.PARAM_CACHE, True)\n return ret\n\n @classmethod\n def loads(cls, project, s, use_cache=True, cwd=os.curdir):\n ret = super(Output, cls).loads(project, s, cwd=cwd)\n ret.use_cache = use_cache\n return ret\n\n @classmethod\n def loads_from(cls, project, s_list, use_cache=False, cwd=os.curdir):\n return [cls.loads(project, x, use_cache=use_cache, cwd=cwd) for x in s_list]\n\n def _changed_cache(self, cache):\n md5 = self.project.state.update(cache)\n if md5 != self.project.cache.path_to_md5(cache):\n self.project.logger.warn('Corrupted cache file {}'.format(os.path.relpath(cache)))\n os.unlink(cache)\n return True\n\n return False\n\n def _changed_file(self, path, cache):\n if os.path.isfile(path) and \\\n os.path.isfile(cache) and \\\n System.samefile(path, cache) and \\\n not self._changed_cache(cache):\n return False\n\n return True\n\n def _changed_dir(self):\n if not os.path.isdir(self.path) or not os.path.isfile(self.cache):\n return True\n\n dir_info = self._collect_dir() # slow!\n dir_info_cached = self.load_dir_cache(self.cache) # slow. why?\n\n if not self.are_dir_info_equal(dir_info, dir_info_cached):\n return True\n\n return False\n\n @staticmethod\n def are_dir_info_equal(dir_info1, dir_info2):\n return Output.dir_info_dict(dir_info1) == Output.dir_info_dict(dir_info2)\n\n @staticmethod\n def dir_info_dict(dir_info):\n return {i['relpath']: i['md5'] for i in dir_info}\n\n def changed(self):\n if not self.use_cache:\n ret = super(Output, self).changed()\n elif not self.cache:\n ret = True\n elif self.is_dir_cache(self.cache):\n ret = self._changed_dir()\n else:\n ret = self._changed_file(self.path, self.cache)\n\n msg = u'Data file or dir \\'{}\\' with cache \\'{}\\' '\n if ret:\n msg += 'changed'\n else:\n msg += 'didn\\'t change'\n self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))\n\n return ret\n\n def hardlink(self, src, link):\n rel_src = os.path.relpath(src)\n rel_link = os.path.relpath(link)\n self.project.logger.debug(u'creating hardlink {} -> {}'.format(rel_src, rel_link))\n\n dname = os.path.dirname(link)\n if not os.path.exists(dname):\n os.makedirs(dname)\n\n System.hardlink(src, link)\n\n @staticmethod\n def load_dir_cache(path):\n if os.path.isabs(path):\n relpath = os.path.relpath(path)\n else:\n relpath = path\n\n try:\n with open(path, 'r') as fd:\n d = json.load(fd)\n except Exception as exc:\n msg = u'Failed to load dir cache \\'{}\\''\n Logger.error(msg.format(relpath), exc)\n return []\n\n if not isinstance(d, list):\n msg = u'Dir cache file format error \\'{}\\': skipping the file'\n Logger.error(msg.format(relpath))\n return []\n\n return d\n\n @staticmethod\n def get_dir_cache(path):\n res = {}\n d = Output.load_dir_cache(path)\n\n for entry in d:\n res[entry[Output.PARAM_RELPATH]] = entry[Output.PARAM_MD5]\n\n return res\n\n def dir_cache(self):\n res = {}\n dir_cache = self.get_dir_cache(self.cache)\n\n for relpath, md5 in dir_cache.items():\n res[relpath] = self.project.cache.get(md5)\n\n return res\n\n def checkout(self):\n if not self.use_cache:\n return\n\n msg = u'Checking out \\'{}\\' with cache \\'{}\\''\n self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))\n\n if not self.changed():\n msg = u'Data file \\'{}\\' with cache \\'{}\\' didn\\'t change, skipping checkout.'\n self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))\n return\n\n if not self.cache or not os.path.exists(self.cache):\n self.project.logger.warn(u'\\'{}\\': cache file not found'.format(self.rel_path))\n self.remove()\n return\n\n if os.path.exists(self.path):\n msg = u'Data file \\'{}\\' exists. Removing before checkout'\n self.project.logger.debug(msg.format(self.rel_path))\n self.remove()\n\n if not self.is_dir_cache(self.cache):\n self.hardlink(self.cache, self.path)\n return\n\n for relpath, cache in self.dir_cache().items():\n path = os.path.join(self.path, relpath)\n self.hardlink(cache, path)\n\n def _collect_dir(self):\n dir_info = []\n\n for root, dirs, files in os.walk(self.path):\n for fname in files:\n path = os.path.join(root, fname)\n relpath = os.path.relpath(path, self.path)\n\n md5 = self.project.state.update(path, dump=False)\n dir_info.append({self.PARAM_RELPATH: relpath, self.PARAM_MD5: md5})\n\n self.project.state.dump()\n\n return dir_info\n\n def _save_dir(self):\n dname = os.path.dirname(self.cache)\n dir_info = self._collect_dir()\n\n for entry in dir_info:\n md5 = entry[self.PARAM_MD5]\n relpath = entry[self.PARAM_RELPATH]\n path = os.path.join(self.path, relpath)\n cache = self.project.cache.get(md5)\n\n if os.path.exists(cache):\n self._remove(path)\n self.hardlink(cache, path)\n else:\n self.hardlink(path, cache)\n\n if not os.path.isdir(dname):\n os.makedirs(dname)\n\n with open(self.cache, 'w+') as fd:\n json.dump(dir_info, fd)\n\n def save(self):\n super(Output, self).save()\n\n if not self.use_cache:\n return\n\n self.project.logger.debug(u'Saving \\'{}\\' to \\'{}\\''.format(self.rel_path, self.rel_cache))\n\n if self.project.scm.is_tracked(self.path):\n raise CmdOutputAlreadyTrackedError(self.rel_path)\n\n if not self.changed():\n return\n\n if os.path.exists(self.cache):\n # This means that we already have cache for this data.\n # We remove data and link it to existing cache to save\n # some space.\n msg = u'Cache \\'{}\\' already exists, performing checkout for \\'{}\\''\n self.project.logger.debug(msg.format(self.rel_cache, self.rel_path))\n self.checkout()\n return\n\n if os.path.isdir(self.path):\n self._save_dir()\n else:\n self.hardlink(self.path, self.cache)\n\n def _remove(self, path):\n if not os.path.exists(path):\n return\n\n self.project.logger.debug(u'Removing \\'{}\\''.format(os.path.relpath(path)))\n if os.path.isfile(path):\n os.unlink(path)\n else:\n shutil.rmtree(path)\n\n def remove(self):\n self._remove(self.path)\n", "path": "dvc/output.py"}], "after_files": [{"content": "import os\nimport stat\nimport json\nimport shutil\nfrom checksumdir import dirhash\n\nfrom dvc.system import System\nfrom dvc.utils import file_md5\nfrom dvc.exceptions import DvcException\nfrom dvc.logger import Logger\n\n\nclass OutputError(DvcException):\n pass\n\n\nclass CmdOutputError(DvcException):\n def __init__(self, path, msg):\n super(CmdOutputError, self).__init__('Output file \\'{}\\' error: {}'.format(path, msg))\n\n\nclass CmdOutputNoCacheError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputNoCacheError, self).__init__(path, 'no cache')\n\n\nclass CmdOutputOutsideOfRepoError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputOutsideOfRepoError, self).__init__(path, 'outside of repository')\n\n\nclass CmdOutputDoesNotExistError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputDoesNotExistError, self).__init__(path, 'does not exist')\n\n\nclass CmdOutputIsNotFileOrDirError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputIsNotFileOrDirError, self).__init__(path, 'not a file or directory')\n\n\nclass CmdOutputAlreadyTrackedError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputAlreadyTrackedError, self).__init__(path, 'already tracked by scm(e.g. git)')\n\n\nclass Dependency(object):\n PARAM_RELPATH = 'relpath'\n PARAM_PATH = 'path'\n PARAM_MD5 = 'md5'\n MD5_DIR_SUFFIX = '.dir'\n\n def __init__(self, project, path, md5=None):\n self.project = project\n self.path = os.path.abspath(os.path.realpath(path))\n\n if not self.path.startswith(self.project.root_dir):\n raise CmdOutputOutsideOfRepoError(self.rel_path)\n\n self.md5 = md5\n\n @property\n def rel_path(self):\n return os.path.relpath(self.path)\n\n def _changed_md5(self):\n if not os.path.exists(self.path):\n return True\n\n return self.project.state.changed(self.path, self.md5)\n\n def changed(self):\n return self._changed_md5()\n\n def status(self):\n if self.changed():\n #FIXME better msgs\n return {self.rel_path: 'changed'}\n return {}\n\n @staticmethod\n def is_dir_cache(cache):\n return cache.endswith(Output.MD5_DIR_SUFFIX)\n\n def save(self):\n if not os.path.exists(self.path):\n raise CmdOutputDoesNotExistError(self.rel_path)\n\n if not os.path.isfile(self.path) and not os.path.isdir(self.path):\n raise CmdOutputIsNotFileOrDirError(self.rel_path)\n\n self.md5 = self.project.state.update(self.path)\n\n def dumpd(self, cwd):\n return {\n Output.PARAM_PATH: os.path.relpath(self.path, cwd),\n Output.PARAM_MD5: self.md5,\n }\n\n @classmethod\n def loadd(cls, project, d, cwd=os.curdir):\n path = os.path.join(cwd, d[Output.PARAM_PATH])\n md5 = d.get(Output.PARAM_MD5, None)\n return cls(project, path, md5=md5)\n\n @classmethod\n def loadd_from(cls, project, d_list, cwd=os.curdir):\n return [cls.loadd(project, x, cwd=cwd) for x in d_list]\n\n @classmethod\n def loads(cls, project, s, cwd=os.curdir):\n return cls(project, os.path.join(cwd, s), md5=None)\n\n @classmethod\n def loads_from(cls, project, s_list, cwd=os.curdir):\n return [cls.loads(project, x, cwd=cwd) for x in s_list]\n\n def stage(self):\n for stage in self.project.stages():\n for out in stage.outs:\n if self.path == out.path:\n return stage\n return None\n\n\nclass Output(Dependency):\n PARAM_CACHE = 'cache'\n\n def __init__(self, project, path, md5=None, use_cache=True):\n super(Output, self).__init__(project, path, md5=md5)\n self.use_cache = use_cache\n\n @property\n def cache(self):\n if not self.md5:\n return None\n\n return self.project.cache.get(self.md5)\n\n @property\n def rel_cache(self):\n if not self.cache:\n return None\n\n return os.path.relpath(self.cache)\n\n def dumpd(self, cwd):\n ret = super(Output, self).dumpd(cwd)\n ret[Output.PARAM_CACHE] = self.use_cache\n return ret\n\n @classmethod\n def loadd(cls, project, d, cwd=os.curdir):\n ret = super(Output, cls).loadd(project, d, cwd=cwd)\n ret.use_cache = d.get(Output.PARAM_CACHE, True)\n return ret\n\n @classmethod\n def loads(cls, project, s, use_cache=True, cwd=os.curdir):\n ret = super(Output, cls).loads(project, s, cwd=cwd)\n ret.use_cache = use_cache\n return ret\n\n @classmethod\n def loads_from(cls, project, s_list, use_cache=False, cwd=os.curdir):\n return [cls.loads(project, x, use_cache=use_cache, cwd=cwd) for x in s_list]\n\n def _changed_cache(self, cache):\n md5 = self.project.state.update(cache)\n if md5 != self.project.cache.path_to_md5(cache):\n self.project.logger.warn('Corrupted cache file {}'.format(os.path.relpath(cache)))\n os.unlink(cache)\n return True\n\n return False\n\n def _changed_file(self, path, cache):\n if os.path.isfile(path) and \\\n os.path.isfile(cache) and \\\n System.samefile(path, cache) and \\\n not self._changed_cache(cache):\n return False\n\n return True\n\n def _changed_dir(self):\n if not os.path.isdir(self.path) or not os.path.isfile(self.cache):\n return True\n\n dir_info = self._collect_dir() # slow!\n dir_info_cached = self.load_dir_cache(self.cache) # slow. why?\n\n if not self.are_dir_info_equal(dir_info, dir_info_cached):\n return True\n\n return False\n\n @staticmethod\n def are_dir_info_equal(dir_info1, dir_info2):\n return Output.dir_info_dict(dir_info1) == Output.dir_info_dict(dir_info2)\n\n @staticmethod\n def dir_info_dict(dir_info):\n return {i['relpath']: i['md5'] for i in dir_info}\n\n def changed(self):\n if not self.use_cache:\n ret = super(Output, self).changed()\n elif not self.cache:\n ret = True\n elif self.is_dir_cache(self.cache):\n ret = self._changed_dir()\n else:\n ret = self._changed_file(self.path, self.cache)\n\n msg = u'Data file or dir \\'{}\\' with cache \\'{}\\' '\n if ret:\n msg += 'changed'\n else:\n msg += 'didn\\'t change'\n self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))\n\n return ret\n\n def hardlink(self, src, link):\n rel_src = os.path.relpath(src)\n rel_link = os.path.relpath(link)\n self.project.logger.debug(u'creating hardlink {} -> {}'.format(rel_src, rel_link))\n\n dname = os.path.dirname(link)\n if not os.path.exists(dname):\n os.makedirs(dname)\n\n System.hardlink(src, link)\n\n @staticmethod\n def load_dir_cache(path):\n if os.path.isabs(path):\n relpath = os.path.relpath(path)\n else:\n relpath = path\n\n try:\n with open(path, 'r') as fd:\n d = json.load(fd)\n except Exception as exc:\n msg = u'Failed to load dir cache \\'{}\\''\n Logger.error(msg.format(relpath), exc)\n return []\n\n if not isinstance(d, list):\n msg = u'Dir cache file format error \\'{}\\': skipping the file'\n Logger.error(msg.format(relpath))\n return []\n\n return d\n\n @staticmethod\n def get_dir_cache(path):\n res = {}\n d = Output.load_dir_cache(path)\n\n for entry in d:\n res[entry[Output.PARAM_RELPATH]] = entry[Output.PARAM_MD5]\n\n return res\n\n def dir_cache(self):\n res = {}\n dir_cache = self.get_dir_cache(self.cache)\n\n for relpath, md5 in dir_cache.items():\n res[relpath] = self.project.cache.get(md5)\n\n return res\n\n def checkout(self):\n if not self.use_cache:\n return\n\n msg = u'Checking out \\'{}\\' with cache \\'{}\\''\n self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))\n\n if not self.changed():\n msg = u'Data file \\'{}\\' with cache \\'{}\\' didn\\'t change, skipping checkout.'\n self.project.logger.debug(msg.format(self.rel_path, self.rel_cache))\n return\n\n if not self.cache or not os.path.exists(self.cache):\n if self.cache:\n self.project.logger.warn(u'\\'{}({})\\': cache file not found'.format(self.rel_cache, self.rel_path))\n self.remove()\n return\n\n if os.path.exists(self.path):\n msg = u'Data file \\'{}\\' exists. Removing before checkout'\n self.project.logger.debug(msg.format(self.rel_path))\n self.remove()\n\n if not self.is_dir_cache(self.cache):\n self.hardlink(self.cache, self.path)\n return\n\n for relpath, cache in self.dir_cache().items():\n path = os.path.join(self.path, relpath)\n self.hardlink(cache, path)\n\n def _collect_dir(self):\n dir_info = []\n\n for root, dirs, files in os.walk(self.path):\n for fname in files:\n path = os.path.join(root, fname)\n relpath = os.path.relpath(path, self.path)\n\n md5 = self.project.state.update(path, dump=False)\n dir_info.append({self.PARAM_RELPATH: relpath, self.PARAM_MD5: md5})\n\n self.project.state.dump()\n\n return dir_info\n\n def _save_dir(self):\n dname = os.path.dirname(self.cache)\n dir_info = self._collect_dir()\n\n for entry in dir_info:\n md5 = entry[self.PARAM_MD5]\n relpath = entry[self.PARAM_RELPATH]\n path = os.path.join(self.path, relpath)\n cache = self.project.cache.get(md5)\n\n if os.path.exists(cache):\n self._remove(path)\n self.hardlink(cache, path)\n else:\n self.hardlink(path, cache)\n\n if not os.path.isdir(dname):\n os.makedirs(dname)\n\n with open(self.cache, 'w+') as fd:\n json.dump(dir_info, fd)\n\n def save(self):\n super(Output, self).save()\n\n if not self.use_cache:\n return\n\n self.project.logger.debug(u'Saving \\'{}\\' to \\'{}\\''.format(self.rel_path, self.rel_cache))\n\n if self.project.scm.is_tracked(self.path):\n raise CmdOutputAlreadyTrackedError(self.rel_path)\n\n if not self.changed():\n return\n\n if os.path.exists(self.cache):\n # This means that we already have cache for this data.\n # We remove data and link it to existing cache to save\n # some space.\n msg = u'Cache \\'{}\\' already exists, performing checkout for \\'{}\\''\n self.project.logger.debug(msg.format(self.rel_cache, self.rel_path))\n self.checkout()\n return\n\n if os.path.isdir(self.path):\n self._save_dir()\n else:\n self.hardlink(self.path, self.cache)\n\n def _remove(self, path):\n if not os.path.exists(path):\n return\n\n self.project.logger.debug(u'Removing \\'{}\\''.format(os.path.relpath(path)))\n if os.path.isfile(path):\n os.unlink(path)\n else:\n shutil.rmtree(path)\n\n def remove(self):\n self._remove(self.path)\n", "path": "dvc/output.py"}]}
| 4,079 | 120 |
gh_patches_debug_15745
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-874
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pwndbg gets stuck in GDB 10.1
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
pwndbg gets stuck in GDB 10.1
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
### Steps to reproduce

<!--
What do we have to do to reproduce the problem?
If this is connected to particular C/asm code,
please provide the smallest C code that reproduces the issue.
-->
### My setup
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
```
Gdb: 10.1
Python: 3.8.6 (default, Sep 30 2020, 04:00:38) [GCC 10.2.0]
Pwndbg: 1.1.0
Capstone: 4.0.1024
Unicorn: 1.0.2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/heap/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import pwndbg.heap.heap
5 import pwndbg.symbol
6
7 current = None
8
9 heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')
10
11 @pwndbg.events.new_objfile
12 def update():
13 import pwndbg.heap.dlmalloc
14 import pwndbg.heap.ptmalloc
15
16 global current
17
18
19 if pwndbg.symbol.address('ptmalloc_init'):
20 current = pwndbg.heap.ptmalloc.Heap()
21
22 else:
23 # Default to ptmalloc heap for now until
24 # there are more implementations
25 current = pwndbg.heap.ptmalloc.Heap()
26
```
Path: `pwndbg/heap/dlmalloc.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import gdb
5
6 import pwndbg.events
7 import pwndbg.typeinfo
8
9
10 class Heap(pwndbg.heap.heap.BaseHeap):
11 pass
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/heap/__init__.py b/pwndbg/heap/__init__.py
--- a/pwndbg/heap/__init__.py
+++ b/pwndbg/heap/__init__.py
@@ -8,18 +8,8 @@
heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')
[email protected]_objfile
[email protected]
def update():
- import pwndbg.heap.dlmalloc
import pwndbg.heap.ptmalloc
-
global current
-
-
- if pwndbg.symbol.address('ptmalloc_init'):
- current = pwndbg.heap.ptmalloc.Heap()
-
- else:
- # Default to ptmalloc heap for now until
- # there are more implementations
- current = pwndbg.heap.ptmalloc.Heap()
+ current = pwndbg.heap.ptmalloc.Heap()
diff --git a/pwndbg/heap/dlmalloc.py b/pwndbg/heap/dlmalloc.py
deleted file mode 100644
--- a/pwndbg/heap/dlmalloc.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import gdb
-
-import pwndbg.events
-import pwndbg.typeinfo
-
-
-class Heap(pwndbg.heap.heap.BaseHeap):
- pass
|
{"golden_diff": "diff --git a/pwndbg/heap/__init__.py b/pwndbg/heap/__init__.py\n--- a/pwndbg/heap/__init__.py\n+++ b/pwndbg/heap/__init__.py\n@@ -8,18 +8,8 @@\n \n heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')\n \[email protected]_objfile\[email protected]\n def update():\n- import pwndbg.heap.dlmalloc\n import pwndbg.heap.ptmalloc\n-\n global current\n-\n-\n- if pwndbg.symbol.address('ptmalloc_init'):\n- current = pwndbg.heap.ptmalloc.Heap()\n-\n- else:\n- # Default to ptmalloc heap for now until\n- # there are more implementations\n- current = pwndbg.heap.ptmalloc.Heap()\n+ current = pwndbg.heap.ptmalloc.Heap()\ndiff --git a/pwndbg/heap/dlmalloc.py b/pwndbg/heap/dlmalloc.py\ndeleted file mode 100644\n--- a/pwndbg/heap/dlmalloc.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-#!/usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\n-import gdb\n-\n-import pwndbg.events\n-import pwndbg.typeinfo\n-\n-\n-class Heap(pwndbg.heap.heap.BaseHeap):\n- pass\n", "issue": "pwndbg gets stuck in GDB 10.1\n<!--\r\nBefore reporting a new issue, make sure that we do not have any duplicates already open.\r\nIf there is one it might be good to take part in the discussion there.\r\n\r\nPlease make sure you have checked that the issue persists on LATEST pwndbg version.\r\n\r\nBelow is a template for BUG REPORTS.\r\nDon't include it if this is a FEATURE REQUEST.\r\n-->\r\n\r\n\r\n### Description\r\npwndbg gets stuck in GDB 10.1\r\n<!--\r\nBriefly describe the problem you are having in a few paragraphs.\r\n-->\r\n\r\n### Steps to reproduce\r\n\r\n\r\n<!--\r\nWhat do we have to do to reproduce the problem?\r\nIf this is connected to particular C/asm code, \r\nplease provide the smallest C code that reproduces the issue.\r\n-->\r\n\r\n### My setup\r\n\r\n<!--\r\nShow us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).\r\n\r\nNOTE: We are currently supporting only Ubuntu installations.\r\nIt is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).\r\nIf you would like to change this situation - help us improving pwndbg and supporting other distros!\r\n\r\nThis can be displayed in pwndbg through `version` command.\r\n\r\nIf it is somehow unavailable, use:\r\n* `show version` - for gdb\r\n* `py import sys; print(sys.version)` - for python\r\n* pwndbg version/git commit id\r\n-->\r\n```\r\nGdb: 10.1\r\nPython: 3.8.6 (default, Sep 30 2020, 04:00:38) [GCC 10.2.0]\r\nPwndbg: 1.1.0\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.2\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pwndbg.heap.heap\nimport pwndbg.symbol\n\ncurrent = None\n\nheap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')\n\[email protected]_objfile\ndef update():\n import pwndbg.heap.dlmalloc\n import pwndbg.heap.ptmalloc\n\n global current\n\n\n if pwndbg.symbol.address('ptmalloc_init'):\n current = pwndbg.heap.ptmalloc.Heap()\n\n else:\n # Default to ptmalloc heap for now until\n # there are more implementations\n current = pwndbg.heap.ptmalloc.Heap()\n", "path": "pwndbg/heap/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.typeinfo\n\n\nclass Heap(pwndbg.heap.heap.BaseHeap):\n pass\n", "path": "pwndbg/heap/dlmalloc.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pwndbg.heap.heap\nimport pwndbg.symbol\n\ncurrent = None\n\nheap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')\n\[email protected]\ndef update():\n import pwndbg.heap.ptmalloc\n global current\n current = pwndbg.heap.ptmalloc.Heap()\n", "path": "pwndbg/heap/__init__.py"}, {"content": null, "path": "pwndbg/heap/dlmalloc.py"}]}
| 1,027 | 326 |
gh_patches_debug_20387
|
rasdani/github-patches
|
git_diff
|
unionai-oss__pandera-416
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improve contribution instructions and expose in the docs
#### Location of the documentation
NA
#### Documentation problem
Currently, the contribution documentation is in [github](https://github.com/pandera-dev/pandera/blob/master/.github/CONTRIBUTING.md) but it would be nice to also expose it in the online documentation.
Additionally, we also want to document the process of contributing:
Enhancements (New features)
- create a `feature/<my-feature>` or `feature/<issue-number>` branch
- make a pull request to `dev`
Bugs
- create a `bugfix/<my-fix>` or `bugfix/<issue-number>` branch
- make a pull request to `master`
Docs
- create a `docs/<my-docs>` or `docs/<issue-number>` branch
- make a pull request to `master`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 import doctest
10 import logging as pylogging
11
12 # If extensions (or modules to document with autodoc) are in another directory,
13 # add these directories to sys.path here. If the directory is relative to the
14 # documentation root, use os.path.abspath to make it absolute, like shown here.
15 #
16 import os
17 import sys
18
19 from sphinx.util import logging
20
21 sys.path.insert(0, os.path.abspath("../../pandera"))
22
23
24 # -- Project information -----------------------------------------------------
25
26 project = "pandera"
27 copyright = "2019, Niels Bantilan, Nigel Markey, Jean-Francois Zinque"
28 author = "Niels Bantilan, Nigel Markey, Jean-Francois Zinque"
29
30
31 # -- General configuration ---------------------------------------------------
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 "sphinx.ext.autodoc",
38 "sphinx.ext.autosummary",
39 "sphinx.ext.intersphinx",
40 "sphinx.ext.doctest",
41 "sphinx_autodoc_typehints",
42 "sphinx.ext.viewcode",
43 ]
44
45 doctest_global_setup = """
46 import sys
47 import pandas as pd
48 import numpy as np
49 from packaging import version
50 pd.options.display.max_columns = None # For Travis on macOS
51 pd.options.display.max_rows = None # For Travis on macOS
52
53 try:
54 import hypothesis
55 except ImportError:
56 SKIP_STRATEGY = True
57 else:
58 SKIP_STRATEGY = False
59
60 SKIP = sys.version_info < (3, 6)
61 PY36 = sys.version_info < (3, 7)
62 SKIP_PANDAS_LT_V1 = version.parse(pd.__version__).release < (1, 0) or PY36
63 """
64
65 doctest_default_flags = (
66 0
67 | doctest.DONT_ACCEPT_TRUE_FOR_1
68 | doctest.ELLIPSIS
69 | doctest.IGNORE_EXCEPTION_DETAIL
70 | doctest.NORMALIZE_WHITESPACE
71 )
72
73 # Add any paths that contain templates here, relative to this directory.
74 templates_path = ["_templates"]
75
76 # The master toctree document.
77 master_doc = "index"
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 # This pattern also affects html_static_path and html_extra_path.
82 exclude_patterns = []
83
84 autoclass_content = "both"
85 pygments_style = None
86
87 autodoc_default_options = {
88 # 'special-members': '__call__',
89 "undoc-members": False,
90 # 'exclude-members': '__weakref__'
91 }
92
93 # -- Options for HTML output -------------------------------------------------
94
95 # The theme to use for HTML and HTML Help pages. See the documentation for
96 # a list of builtin themes.
97 #
98 html_theme = "sphinx_rtd_theme"
99
100 # Theme options are theme-specific and customize the look and feel of a theme
101 # further. For a list of options available for each theme, see the
102 # documentation.
103
104 html_logo = "_static/pandera-banner-white.png"
105 html_theme_options = {
106 "collapse_navigation": False,
107 "logo_only": True,
108 "analytics_id": "UA-71018060-2",
109 }
110
111 # Add any paths that contain custom static files (such as style sheets) here,
112 # relative to this directory. They are copied after the builtin static files,
113 # so a file named "default.css" will overwrite the builtin "default.css".
114 html_static_path = ["_static"]
115
116 rst_prolog = """
117 .. role:: red
118 .. role:: green
119 """
120
121 autosummary_generate = ["API_reference.rst"]
122 autosummary_filename_map = {
123 "pandera.Check": "pandera.Check",
124 "pandera.check": "pandera.check_decorator",
125 }
126
127 intersphinx_mapping = {
128 "python": ("https://docs.python.org/3/", None),
129 "numpy": ("https://docs.scipy.org/doc/numpy/", None),
130 "pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None),
131 }
132
133
134 # this is a workaround to filter out forward reference issue in
135 # sphinx_autodoc_typehints
136 class FilterPandasTypeAnnotationWarning(pylogging.Filter):
137 def filter(self, record: pylogging.LogRecord) -> bool:
138 # You probably should make this check more specific by checking
139 # that dataclass name is in the message, so that you don't filter out
140 # other meaningful warnings
141 return not record.getMessage().startswith(
142 "Cannot resolve forward reference in type annotations of "
143 '"pandera.typing.DataFrame"'
144 )
145
146
147 logging.getLogger("sphinx_autodoc_typehints").logger.addFilter(
148 FilterPandasTypeAnnotationWarning()
149 )
150
151
152 def setup(app):
153 app.add_css_file("default.css")
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -14,6 +14,7 @@
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
+import shutil
import sys
from sphinx.util import logging
@@ -40,6 +41,7 @@
"sphinx.ext.doctest",
"sphinx_autodoc_typehints",
"sphinx.ext.viewcode",
+ "recommonmark",
]
doctest_global_setup = """
@@ -70,6 +72,19 @@
| doctest.NORMALIZE_WHITESPACE
)
+source_suffix = {
+ ".rst": "restructuredtext",
+ ".md": "markdown",
+}
+
+# copy CONTRIBUTING.md docs into source directory
+shutil.copyfile(
+ os.path.join(
+ os.path.dirname(__file__), "..", "..", ".github", "CONTRIBUTING.md"
+ ),
+ "CONTRIBUTING.md",
+)
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -14,6 +14,7 @@\n # documentation root, use os.path.abspath to make it absolute, like shown here.\n #\n import os\n+import shutil\n import sys\n \n from sphinx.util import logging\n@@ -40,6 +41,7 @@\n \"sphinx.ext.doctest\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.viewcode\",\n+ \"recommonmark\",\n ]\n \n doctest_global_setup = \"\"\"\n@@ -70,6 +72,19 @@\n | doctest.NORMALIZE_WHITESPACE\n )\n \n+source_suffix = {\n+ \".rst\": \"restructuredtext\",\n+ \".md\": \"markdown\",\n+}\n+\n+# copy CONTRIBUTING.md docs into source directory\n+shutil.copyfile(\n+ os.path.join(\n+ os.path.dirname(__file__), \"..\", \"..\", \".github\", \"CONTRIBUTING.md\"\n+ ),\n+ \"CONTRIBUTING.md\",\n+)\n+\n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n", "issue": "improve contribution instructions and expose in the docs\n#### Location of the documentation\r\n\r\nNA\r\n\r\n#### Documentation problem\r\n\r\nCurrently, the contribution documentation is in [github](https://github.com/pandera-dev/pandera/blob/master/.github/CONTRIBUTING.md) but it would be nice to also expose it in the online documentation.\r\n\r\nAdditionally, we also want to document the process of contributing:\r\n\r\nEnhancements (New features)\r\n- create a `feature/<my-feature>` or `feature/<issue-number>` branch\r\n- make a pull request to `dev`\r\n\r\nBugs\r\n- create a `bugfix/<my-fix>` or `bugfix/<issue-number>` branch\r\n- make a pull request to `master`\r\n\r\nDocs\r\n- create a `docs/<my-docs>` or `docs/<issue-number>` branch\r\n- make a pull request to `master`\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\nimport doctest\nimport logging as pylogging\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nfrom sphinx.util import logging\n\nsys.path.insert(0, os.path.abspath(\"../../pandera\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pandera\"\ncopyright = \"2019, Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\nauthor = \"Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.doctest\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.viewcode\",\n]\n\ndoctest_global_setup = \"\"\"\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom packaging import version\npd.options.display.max_columns = None # For Travis on macOS\npd.options.display.max_rows = None # For Travis on macOS\n\ntry:\n import hypothesis\nexcept ImportError:\n SKIP_STRATEGY = True\nelse:\n SKIP_STRATEGY = False\n\nSKIP = sys.version_info < (3, 6)\nPY36 = sys.version_info < (3, 7)\nSKIP_PANDAS_LT_V1 = version.parse(pd.__version__).release < (1, 0) or PY36\n\"\"\"\n\ndoctest_default_flags = (\n 0\n | doctest.DONT_ACCEPT_TRUE_FOR_1\n | doctest.ELLIPSIS\n | doctest.IGNORE_EXCEPTION_DETAIL\n | doctest.NORMALIZE_WHITESPACE\n)\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\nautoclass_content = \"both\"\npygments_style = None\n\nautodoc_default_options = {\n # 'special-members': '__call__',\n \"undoc-members\": False,\n # 'exclude-members': '__weakref__'\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_logo = \"_static/pandera-banner-white.png\"\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"logo_only\": True,\n \"analytics_id\": \"UA-71018060-2\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nrst_prolog = \"\"\"\n.. role:: red\n.. role:: green\n\"\"\"\n\nautosummary_generate = [\"API_reference.rst\"]\nautosummary_filename_map = {\n \"pandera.Check\": \"pandera.Check\",\n \"pandera.check\": \"pandera.check_decorator\",\n}\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"pandas\": (\"http://pandas.pydata.org/pandas-docs/stable/\", None),\n}\n\n\n# this is a workaround to filter out forward reference issue in\n# sphinx_autodoc_typehints\nclass FilterPandasTypeAnnotationWarning(pylogging.Filter):\n def filter(self, record: pylogging.LogRecord) -> bool:\n # You probably should make this check more specific by checking\n # that dataclass name is in the message, so that you don't filter out\n # other meaningful warnings\n return not record.getMessage().startswith(\n \"Cannot resolve forward reference in type annotations of \"\n '\"pandera.typing.DataFrame\"'\n )\n\n\nlogging.getLogger(\"sphinx_autodoc_typehints\").logger.addFilter(\n FilterPandasTypeAnnotationWarning()\n)\n\n\ndef setup(app):\n app.add_css_file(\"default.css\")\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\nimport doctest\nimport logging as pylogging\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport shutil\nimport sys\n\nfrom sphinx.util import logging\n\nsys.path.insert(0, os.path.abspath(\"../../pandera\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pandera\"\ncopyright = \"2019, Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\nauthor = \"Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.doctest\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.viewcode\",\n \"recommonmark\",\n]\n\ndoctest_global_setup = \"\"\"\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom packaging import version\npd.options.display.max_columns = None # For Travis on macOS\npd.options.display.max_rows = None # For Travis on macOS\n\ntry:\n import hypothesis\nexcept ImportError:\n SKIP_STRATEGY = True\nelse:\n SKIP_STRATEGY = False\n\nSKIP = sys.version_info < (3, 6)\nPY36 = sys.version_info < (3, 7)\nSKIP_PANDAS_LT_V1 = version.parse(pd.__version__).release < (1, 0) or PY36\n\"\"\"\n\ndoctest_default_flags = (\n 0\n | doctest.DONT_ACCEPT_TRUE_FOR_1\n | doctest.ELLIPSIS\n | doctest.IGNORE_EXCEPTION_DETAIL\n | doctest.NORMALIZE_WHITESPACE\n)\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\n# copy CONTRIBUTING.md docs into source directory\nshutil.copyfile(\n os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \".github\", \"CONTRIBUTING.md\"\n ),\n \"CONTRIBUTING.md\",\n)\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\nautoclass_content = \"both\"\npygments_style = None\n\nautodoc_default_options = {\n # 'special-members': '__call__',\n \"undoc-members\": False,\n # 'exclude-members': '__weakref__'\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_logo = \"_static/pandera-banner-white.png\"\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"logo_only\": True,\n \"analytics_id\": \"UA-71018060-2\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nrst_prolog = \"\"\"\n.. role:: red\n.. role:: green\n\"\"\"\n\nautosummary_generate = [\"API_reference.rst\"]\nautosummary_filename_map = {\n \"pandera.Check\": \"pandera.Check\",\n \"pandera.check\": \"pandera.check_decorator\",\n}\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"pandas\": (\"http://pandas.pydata.org/pandas-docs/stable/\", None),\n}\n\n\n# this is a workaround to filter out forward reference issue in\n# sphinx_autodoc_typehints\nclass FilterPandasTypeAnnotationWarning(pylogging.Filter):\n def filter(self, record: pylogging.LogRecord) -> bool:\n # You probably should make this check more specific by checking\n # that dataclass name is in the message, so that you don't filter out\n # other meaningful warnings\n return not record.getMessage().startswith(\n \"Cannot resolve forward reference in type annotations of \"\n '\"pandera.typing.DataFrame\"'\n )\n\n\nlogging.getLogger(\"sphinx_autodoc_typehints\").logger.addFilter(\n FilterPandasTypeAnnotationWarning()\n)\n\n\ndef setup(app):\n app.add_css_file(\"default.css\")\n", "path": "docs/source/conf.py"}]}
| 1,895 | 259 |
gh_patches_debug_14934
|
rasdani/github-patches
|
git_diff
|
hylang__hy-932
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multiple implementations overwrite each others' entrypoint hooks
It's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.
Sorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.
Multiple implementations overwrite each others' entrypoint hooks
It's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.
Sorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice shall be included in
12 # all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 # DEALINGS IN THE SOFTWARE.
21
22 import os
23 import re
24 import sys
25
26 from setuptools import find_packages, setup
27
28 PKG = "hy"
29 VERSIONFILE = os.path.join(PKG, "version.py")
30 verstr = "unknown"
31 try:
32 verstrline = open(VERSIONFILE, "rt").read()
33 except EnvironmentError:
34 pass # Okay, there is no version file.
35 else:
36 VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
37 mo = re.search(VSRE, verstrline, re.M)
38 if mo:
39 __version__ = mo.group(1)
40 else:
41 msg = "if %s.py exists, it is required to be well-formed" % VERSIONFILE
42 raise RuntimeError(msg)
43
44 long_description = """Hy is a Python <--> Lisp layer. It helps
45 make things work nicer, and lets Python and the Hy lisp variant play
46 nice together. """
47
48 install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
49 if sys.version_info[:2] < (2, 7):
50 install_requires.append('argparse>=1.2.1')
51 install_requires.append('importlib>=1.0.2')
52 if os.name == 'nt':
53 install_requires.append('pyreadline==2.0')
54
55 setup(
56 name=PKG,
57 version=__version__,
58 install_requires=install_requires,
59 entry_points={
60 'console_scripts': [
61 'hy = hy.cmdline:hy_main',
62 'hyc = hy.cmdline:hyc_main',
63 'hy2py = hy.cmdline:hy2py_main',
64 ]
65 },
66 packages=find_packages(exclude=['tests*']),
67 package_data={
68 'hy.contrib': ['*.hy'],
69 'hy.core': ['*.hy'],
70 },
71 author="Paul Tagliamonte",
72 author_email="[email protected]",
73 long_description=long_description,
74 description='Lisp and Python love each other.',
75 license="Expat",
76 url="http://hylang.org/",
77 platforms=['any'],
78 classifiers=[
79 "Development Status :: 4 - Beta",
80 "Intended Audience :: Developers",
81 "License :: DFSG approved",
82 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
83 "Operating System :: OS Independent",
84 "Programming Language :: Lisp",
85 "Programming Language :: Python",
86 "Programming Language :: Python :: 2",
87 "Programming Language :: Python :: 2.6",
88 "Programming Language :: Python :: 2.7",
89 "Programming Language :: Python :: 3",
90 "Programming Language :: Python :: 3.3",
91 "Programming Language :: Python :: 3.4",
92 "Topic :: Software Development :: Code Generators",
93 "Topic :: Software Development :: Compilers",
94 "Topic :: Software Development :: Libraries",
95 ]
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,6 +52,8 @@
if os.name == 'nt':
install_requires.append('pyreadline==2.0')
+ver = sys.version_info[0]
+
setup(
name=PKG,
version=__version__,
@@ -59,8 +61,11 @@
entry_points={
'console_scripts': [
'hy = hy.cmdline:hy_main',
+ 'hy%d = hy.cmdline:hy_main' % ver,
'hyc = hy.cmdline:hyc_main',
+ 'hyc%d = hy.cmdline:hyc_main' % ver,
'hy2py = hy.cmdline:hy2py_main',
+ 'hy2py%d = hy.cmdline:hy2py_main' % ver,
]
},
packages=find_packages(exclude=['tests*']),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,6 +52,8 @@\n if os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n \n+ver = sys.version_info[0]\n+\n setup(\n name=PKG,\n version=__version__,\n@@ -59,8 +61,11 @@\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n+ 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n+ 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n+ 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n", "issue": "Multiple implementations overwrite each others' entrypoint hooks\nIt's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.\n\nSorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.\n\nMultiple implementations overwrite each others' entrypoint hooks\nIt's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.\n\nSorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\nverstr = \"unknown\"\ntry:\n verstrline = open(VERSIONFILE, \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n __version__ = mo.group(1)\n else:\n msg = \"if %s.py exists, it is required to be well-formed\" % VERSIONFILE\n raise RuntimeError(msg)\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\nverstr = \"unknown\"\ntry:\n verstrline = open(VERSIONFILE, \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n __version__ = mo.group(1)\n else:\n msg = \"if %s.py exists, it is required to be well-formed\" % VERSIONFILE\n raise RuntimeError(msg)\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n\nver = sys.version_info[0]\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}]}
| 1,593 | 203 |
gh_patches_debug_3001
|
rasdani/github-patches
|
git_diff
|
Bitmessage__PyBitmessage-726
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Trouble sending on multicor machines on 0.4.4
I've seen this on both an OSX box (8 cores) and a linux box (4 cores). I was only able to do the full reproducible on linux, as my `keys.dat` file prevented me from going back to 0.4.3 on the OSX box.
1. Check out v0.4.3.
2. Open top
3. Open bitmessage.
4. Send a message.
5. Processes will start up for each core in top to calculate the PoW more quickly. Message will send.
6. Close bitmessage.
7. Check out `ProtoV3`
8. Send a message.
9. Processes will fire up in top. They'll consume 100% cpu for a few minutes. One by one, the CPU usage on each process will drop to zero.
10. The bitmessage app will still say that we're doing work to calculate the PoW. The message never sends.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/bitmessagemain.py`
Content:
```
1 #!/usr/bin/env python2.7
2 # Copyright (c) 2012 Jonathan Warren
3 # Copyright (c) 2012 The Bitmessage developers
4 # Distributed under the MIT/X11 software license. See the accompanying
5 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
6
7 # Right now, PyBitmessage only support connecting to stream 1. It doesn't
8 # yet contain logic to expand into further streams.
9
10 # The software version variable is now held in shared.py
11
12
13 import sys
14 #Version check
15 #Older versions of Python don't support the print function while Python 3 doesn't
16 #like the print statement, so we use sys.stdout for the version check. After this
17 #check we can then use the print function in the remainder of this file. Currently
18 #in order to use logging, a lot of unnecessary code needs to be executed which could
19 #potentially render this version check useless. So logging won't be used here until
20 #there is a more efficient way to configure logging
21 if sys.hexversion >= 0x3000000:
22 msg = "PyBitmessage does not support Python 3. Python 2.7.3 or later is required. Your version: %s" % sys.version
23 #logger.critical(msg)
24 sys.stdout.write(msg)
25 sys.exit(0)
26 if sys.hexversion < 0x20703F0:
27 msg = "You should use Python 2.7.3 or greater (but not Python 3). Your version: %s" % sys.version
28 #logger.critical(msg)
29 sys.stdout.write(msg)
30 sys.exit(0)
31
32 import signal # Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.
33 # The next 3 are used for the API
34 import singleton
35 import os
36 import socket
37 import ctypes
38 from struct import pack
39
40 from SimpleXMLRPCServer import SimpleXMLRPCServer
41 from api import MySimpleXMLRPCRequestHandler
42 from helper_startup import isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections
43
44 import shared
45 from helper_sql import sqlQuery
46 import threading
47
48 # Classes
49 #from helper_sql import *
50 #from class_sqlThread import *
51 from class_sqlThread import sqlThread
52 from class_singleCleaner import singleCleaner
53 #from class_singleWorker import *
54 from class_objectProcessor import objectProcessor
55 from class_outgoingSynSender import outgoingSynSender
56 from class_singleListener import singleListener
57 from class_singleWorker import singleWorker
58 #from class_addressGenerator import *
59 from class_addressGenerator import addressGenerator
60 from debug import logger
61
62 # Helper Functions
63 import helper_bootstrap
64 import helper_generic
65
66 from subprocess import call
67 import time
68
69
70 def connectToStream(streamNumber):
71 shared.streamsInWhichIAmParticipating[streamNumber] = 'no data'
72 selfInitiatedConnections[streamNumber] = {}
73 shared.inventorySets[streamNumber] = set()
74 queryData = sqlQuery('''SELECT hash FROM inventory WHERE streamnumber=?''', streamNumber)
75 for row in queryData:
76 shared.inventorySets[streamNumber].add(row[0])
77
78
79 if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():
80 # Some XP and Vista systems can only have 10 outgoing connections at a time.
81 maximumNumberOfHalfOpenConnections = 9
82 else:
83 maximumNumberOfHalfOpenConnections = 64
84 for i in range(maximumNumberOfHalfOpenConnections):
85 a = outgoingSynSender()
86 a.setup(streamNumber, selfInitiatedConnections)
87 a.start()
88
89 def _fixWinsock():
90 if not ('win32' in sys.platform) and not ('win64' in sys.platform):
91 return
92
93 # Python 2 on Windows doesn't define a wrapper for
94 # socket.inet_ntop but we can make one ourselves using ctypes
95 if not hasattr(socket, 'inet_ntop'):
96 addressToString = ctypes.windll.ws2_32.WSAAddressToStringA
97 def inet_ntop(family, host):
98 if family == socket.AF_INET:
99 if len(host) != 4:
100 raise ValueError("invalid IPv4 host")
101 host = pack("hH4s8s", socket.AF_INET, 0, host, "\0" * 8)
102 elif family == socket.AF_INET6:
103 if len(host) != 16:
104 raise ValueError("invalid IPv6 host")
105 host = pack("hHL16sL", socket.AF_INET6, 0, 0, host, 0)
106 else:
107 raise ValueError("invalid address family")
108 buf = "\0" * 64
109 lengthBuf = pack("I", len(buf))
110 addressToString(host, len(host), None, buf, lengthBuf)
111 return buf[0:buf.index("\0")]
112 socket.inet_ntop = inet_ntop
113
114 # Same for inet_pton
115 if not hasattr(socket, 'inet_pton'):
116 stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
117 def inet_pton(family, host):
118 buf = "\0" * 28
119 lengthBuf = pack("I", len(buf))
120 if stringToAddress(str(host),
121 int(family),
122 None,
123 buf,
124 lengthBuf) != 0:
125 raise socket.error("illegal IP address passed to inet_pton")
126 if family == socket.AF_INET:
127 return buf[4:8]
128 elif family == socket.AF_INET6:
129 return buf[8:24]
130 else:
131 raise ValueError("invalid address family")
132 socket.inet_pton = inet_pton
133
134 # These sockopts are needed on for IPv6 support
135 if not hasattr(socket, 'IPPROTO_IPV6'):
136 socket.IPPROTO_IPV6 = 41
137 if not hasattr(socket, 'IPV6_V6ONLY'):
138 socket.IPV6_V6ONLY = 27
139
140 # This thread, of which there is only one, runs the API.
141 class singleAPI(threading.Thread):
142
143 def __init__(self):
144 threading.Thread.__init__(self)
145
146 def run(self):
147 se = SimpleXMLRPCServer((shared.config.get('bitmessagesettings', 'apiinterface'), shared.config.getint(
148 'bitmessagesettings', 'apiport')), MySimpleXMLRPCRequestHandler, True, True)
149 se.register_introspection_functions()
150 se.serve_forever()
151
152 # This is a list of current connections (the thread pointers at least)
153 selfInitiatedConnections = {}
154
155 if shared.useVeryEasyProofOfWorkForTesting:
156 shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(
157 shared.networkDefaultProofOfWorkNonceTrialsPerByte / 16)
158 shared.networkDefaultPayloadLengthExtraBytes = int(
159 shared.networkDefaultPayloadLengthExtraBytes / 7000)
160
161 class Main:
162 def start(self, daemon=False):
163 _fixWinsock()
164
165 shared.daemon = daemon
166 # is the application already running? If yes then exit.
167 thisapp = singleton.singleinstance()
168
169 # get curses flag
170 curses = False
171 if '-c' in sys.argv:
172 curses = True
173
174 signal.signal(signal.SIGINT, helper_generic.signal_handler)
175 signal.signal(signal.SIGTERM, helper_generic.signal_handler)
176 # signal.signal(signal.SIGINT, signal.SIG_DFL)
177
178 helper_bootstrap.knownNodes()
179 # Start the address generation thread
180 addressGeneratorThread = addressGenerator()
181 addressGeneratorThread.daemon = True # close the main program even if there are threads left
182 addressGeneratorThread.start()
183
184 # Start the thread that calculates POWs
185 singleWorkerThread = singleWorker()
186 singleWorkerThread.daemon = True # close the main program even if there are threads left
187 singleWorkerThread.start()
188
189 # Start the SQL thread
190 sqlLookup = sqlThread()
191 sqlLookup.daemon = False # DON'T close the main program even if there are threads left. The closeEvent should command this thread to exit gracefully.
192 sqlLookup.start()
193
194 # Start the thread that calculates POWs
195 objectProcessorThread = objectProcessor()
196 objectProcessorThread.daemon = False # DON'T close the main program even the thread remains. This thread checks the shutdown variable after processing each object.
197 objectProcessorThread.start()
198
199 # Start the cleanerThread
200 singleCleanerThread = singleCleaner()
201 singleCleanerThread.daemon = True # close the main program even if there are threads left
202 singleCleanerThread.start()
203
204 shared.reloadMyAddressHashes()
205 shared.reloadBroadcastSendersForWhichImWatching()
206
207 if shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):
208 try:
209 apiNotifyPath = shared.config.get(
210 'bitmessagesettings', 'apinotifypath')
211 except:
212 apiNotifyPath = ''
213 if apiNotifyPath != '':
214 with shared.printLock:
215 print('Trying to call', apiNotifyPath)
216
217 call([apiNotifyPath, "startingUp"])
218 singleAPIThread = singleAPI()
219 singleAPIThread.daemon = True # close the main program even if there are threads left
220 singleAPIThread.start()
221
222 connectToStream(1)
223
224 singleListenerThread = singleListener()
225 singleListenerThread.setup(selfInitiatedConnections)
226 singleListenerThread.daemon = True # close the main program even if there are threads left
227 singleListenerThread.start()
228
229 if daemon == False and shared.safeConfigGetBoolean('bitmessagesettings', 'daemon') == False:
230 if curses == False:
231 try:
232 from PyQt4 import QtCore, QtGui
233 except Exception as err:
234 print('PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\'. If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon')
235 print('Error message:', err)
236 print('You can also run PyBitmessage with the new curses interface by providing \'-c\' as a commandline argument.')
237 os._exit(0)
238
239 import bitmessageqt
240 bitmessageqt.run()
241 else:
242 print('Running with curses')
243 import bitmessagecurses
244 bitmessagecurses.runwrapper()
245 else:
246 shared.config.remove_option('bitmessagesettings', 'dontconnect')
247
248 if daemon:
249 with shared.printLock:
250 print('Running as a daemon. The main program should exit this thread.')
251 else:
252 with shared.printLock:
253 print('Running as a daemon. You can use Ctrl+C to exit.')
254 while True:
255 time.sleep(20)
256
257 def stop(self):
258 with shared.printLock:
259 print('Stopping Bitmessage Deamon.')
260 shared.doCleanShutdown()
261
262
263 #TODO: nice function but no one is using this
264 def getApiAddress(self):
265 if not shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):
266 return None
267 address = shared.config.get('bitmessagesettings', 'apiinterface')
268 port = shared.config.getint('bitmessagesettings', 'apiport')
269 return {'address':address,'port':port}
270
271 if __name__ == "__main__":
272 mainprogram = Main()
273 mainprogram.start()
274
275
276 # So far, the creation of and management of the Bitmessage protocol and this
277 # client is a one-man operation. Bitcoin tips are quite appreciated.
278 # 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py
--- a/src/bitmessagemain.py
+++ b/src/bitmessagemain.py
@@ -172,7 +172,6 @@
curses = True
signal.signal(signal.SIGINT, helper_generic.signal_handler)
- signal.signal(signal.SIGTERM, helper_generic.signal_handler)
# signal.signal(signal.SIGINT, signal.SIG_DFL)
helper_bootstrap.knownNodes()
|
{"golden_diff": "diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py\n--- a/src/bitmessagemain.py\n+++ b/src/bitmessagemain.py\n@@ -172,7 +172,6 @@\n curses = True\n \n signal.signal(signal.SIGINT, helper_generic.signal_handler)\n- signal.signal(signal.SIGTERM, helper_generic.signal_handler)\n # signal.signal(signal.SIGINT, signal.SIG_DFL)\n \n helper_bootstrap.knownNodes()\n", "issue": "Trouble sending on multicor machines on 0.4.4\nI've seen this on both an OSX box (8 cores) and a linux box (4 cores). I was only able to do the full reproducible on linux, as my `keys.dat` file prevented me from going back to 0.4.3 on the OSX box.\n1. Check out v0.4.3.\n2. Open top\n3. Open bitmessage.\n4. Send a message.\n5. Processes will start up for each core in top to calculate the PoW more quickly. Message will send.\n6. Close bitmessage.\n7. Check out `ProtoV3`\n8. Send a message.\n9. Processes will fire up in top. They'll consume 100% cpu for a few minutes. One by one, the CPU usage on each process will drop to zero.\n10. The bitmessage app will still say that we're doing work to calculate the PoW. The message never sends.\n\n", "before_files": [{"content": "#!/usr/bin/env python2.7\n# Copyright (c) 2012 Jonathan Warren\n# Copyright (c) 2012 The Bitmessage developers\n# Distributed under the MIT/X11 software license. See the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n# Right now, PyBitmessage only support connecting to stream 1. It doesn't\n# yet contain logic to expand into further streams.\n\n# The software version variable is now held in shared.py\n\n\nimport sys\n#Version check\n#Older versions of Python don't support the print function while Python 3 doesn't\n#like the print statement, so we use sys.stdout for the version check. After this\n#check we can then use the print function in the remainder of this file. Currently\n#in order to use logging, a lot of unnecessary code needs to be executed which could\n#potentially render this version check useless. So logging won't be used here until\n#there is a more efficient way to configure logging\nif sys.hexversion >= 0x3000000:\n msg = \"PyBitmessage does not support Python 3. Python 2.7.3 or later is required. Your version: %s\" % sys.version\n #logger.critical(msg)\n sys.stdout.write(msg)\n sys.exit(0)\nif sys.hexversion < 0x20703F0:\n msg = \"You should use Python 2.7.3 or greater (but not Python 3). Your version: %s\" % sys.version\n #logger.critical(msg)\n sys.stdout.write(msg)\n sys.exit(0)\n\nimport signal # Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.\n# The next 3 are used for the API\nimport singleton\nimport os\nimport socket\nimport ctypes\nfrom struct import pack\n\nfrom SimpleXMLRPCServer import SimpleXMLRPCServer\nfrom api import MySimpleXMLRPCRequestHandler\nfrom helper_startup import isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections\n\nimport shared\nfrom helper_sql import sqlQuery\nimport threading\n\n# Classes\n#from helper_sql import *\n#from class_sqlThread import *\nfrom class_sqlThread import sqlThread\nfrom class_singleCleaner import singleCleaner\n#from class_singleWorker import *\nfrom class_objectProcessor import objectProcessor\nfrom class_outgoingSynSender import outgoingSynSender\nfrom class_singleListener import singleListener\nfrom class_singleWorker import singleWorker\n#from class_addressGenerator import *\nfrom class_addressGenerator import addressGenerator\nfrom debug import logger\n\n# Helper Functions\nimport helper_bootstrap\nimport helper_generic\n\nfrom subprocess import call\nimport time\n \n\ndef connectToStream(streamNumber):\n shared.streamsInWhichIAmParticipating[streamNumber] = 'no data'\n selfInitiatedConnections[streamNumber] = {}\n shared.inventorySets[streamNumber] = set()\n queryData = sqlQuery('''SELECT hash FROM inventory WHERE streamnumber=?''', streamNumber)\n for row in queryData:\n shared.inventorySets[streamNumber].add(row[0])\n\n \n if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():\n # Some XP and Vista systems can only have 10 outgoing connections at a time.\n maximumNumberOfHalfOpenConnections = 9\n else:\n maximumNumberOfHalfOpenConnections = 64\n for i in range(maximumNumberOfHalfOpenConnections):\n a = outgoingSynSender()\n a.setup(streamNumber, selfInitiatedConnections)\n a.start()\n\ndef _fixWinsock():\n if not ('win32' in sys.platform) and not ('win64' in sys.platform):\n return\n\n # Python 2 on Windows doesn't define a wrapper for\n # socket.inet_ntop but we can make one ourselves using ctypes\n if not hasattr(socket, 'inet_ntop'):\n addressToString = ctypes.windll.ws2_32.WSAAddressToStringA\n def inet_ntop(family, host):\n if family == socket.AF_INET:\n if len(host) != 4:\n raise ValueError(\"invalid IPv4 host\")\n host = pack(\"hH4s8s\", socket.AF_INET, 0, host, \"\\0\" * 8)\n elif family == socket.AF_INET6:\n if len(host) != 16:\n raise ValueError(\"invalid IPv6 host\")\n host = pack(\"hHL16sL\", socket.AF_INET6, 0, 0, host, 0)\n else:\n raise ValueError(\"invalid address family\")\n buf = \"\\0\" * 64\n lengthBuf = pack(\"I\", len(buf))\n addressToString(host, len(host), None, buf, lengthBuf)\n return buf[0:buf.index(\"\\0\")]\n socket.inet_ntop = inet_ntop\n\n # Same for inet_pton\n if not hasattr(socket, 'inet_pton'):\n stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA\n def inet_pton(family, host):\n buf = \"\\0\" * 28\n lengthBuf = pack(\"I\", len(buf))\n if stringToAddress(str(host),\n int(family),\n None,\n buf,\n lengthBuf) != 0:\n raise socket.error(\"illegal IP address passed to inet_pton\")\n if family == socket.AF_INET:\n return buf[4:8]\n elif family == socket.AF_INET6:\n return buf[8:24]\n else:\n raise ValueError(\"invalid address family\")\n socket.inet_pton = inet_pton\n\n # These sockopts are needed on for IPv6 support\n if not hasattr(socket, 'IPPROTO_IPV6'):\n socket.IPPROTO_IPV6 = 41\n if not hasattr(socket, 'IPV6_V6ONLY'):\n socket.IPV6_V6ONLY = 27\n\n# This thread, of which there is only one, runs the API.\nclass singleAPI(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n se = SimpleXMLRPCServer((shared.config.get('bitmessagesettings', 'apiinterface'), shared.config.getint(\n 'bitmessagesettings', 'apiport')), MySimpleXMLRPCRequestHandler, True, True)\n se.register_introspection_functions()\n se.serve_forever()\n\n# This is a list of current connections (the thread pointers at least)\nselfInitiatedConnections = {}\n\nif shared.useVeryEasyProofOfWorkForTesting:\n shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(\n shared.networkDefaultProofOfWorkNonceTrialsPerByte / 16)\n shared.networkDefaultPayloadLengthExtraBytes = int(\n shared.networkDefaultPayloadLengthExtraBytes / 7000)\n\nclass Main:\n def start(self, daemon=False):\n _fixWinsock()\n\n shared.daemon = daemon\n # is the application already running? If yes then exit.\n thisapp = singleton.singleinstance()\n\n # get curses flag\n curses = False\n if '-c' in sys.argv:\n curses = True\n\n signal.signal(signal.SIGINT, helper_generic.signal_handler)\n signal.signal(signal.SIGTERM, helper_generic.signal_handler)\n # signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n helper_bootstrap.knownNodes()\n # Start the address generation thread\n addressGeneratorThread = addressGenerator()\n addressGeneratorThread.daemon = True # close the main program even if there are threads left\n addressGeneratorThread.start()\n\n # Start the thread that calculates POWs\n singleWorkerThread = singleWorker()\n singleWorkerThread.daemon = True # close the main program even if there are threads left\n singleWorkerThread.start()\n\n # Start the SQL thread\n sqlLookup = sqlThread()\n sqlLookup.daemon = False # DON'T close the main program even if there are threads left. The closeEvent should command this thread to exit gracefully.\n sqlLookup.start()\n\n # Start the thread that calculates POWs\n objectProcessorThread = objectProcessor()\n objectProcessorThread.daemon = False # DON'T close the main program even the thread remains. This thread checks the shutdown variable after processing each object.\n objectProcessorThread.start()\n\n # Start the cleanerThread\n singleCleanerThread = singleCleaner()\n singleCleanerThread.daemon = True # close the main program even if there are threads left\n singleCleanerThread.start()\n\n shared.reloadMyAddressHashes()\n shared.reloadBroadcastSendersForWhichImWatching()\n\n if shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):\n try:\n apiNotifyPath = shared.config.get(\n 'bitmessagesettings', 'apinotifypath')\n except:\n apiNotifyPath = ''\n if apiNotifyPath != '':\n with shared.printLock:\n print('Trying to call', apiNotifyPath)\n\n call([apiNotifyPath, \"startingUp\"])\n singleAPIThread = singleAPI()\n singleAPIThread.daemon = True # close the main program even if there are threads left\n singleAPIThread.start()\n\n connectToStream(1)\n\n singleListenerThread = singleListener()\n singleListenerThread.setup(selfInitiatedConnections)\n singleListenerThread.daemon = True # close the main program even if there are threads left\n singleListenerThread.start()\n\n if daemon == False and shared.safeConfigGetBoolean('bitmessagesettings', 'daemon') == False:\n if curses == False:\n try:\n from PyQt4 import QtCore, QtGui\n except Exception as err:\n print('PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \\'PyQt Download\\'. If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon')\n print('Error message:', err)\n print('You can also run PyBitmessage with the new curses interface by providing \\'-c\\' as a commandline argument.')\n os._exit(0)\n\n import bitmessageqt\n bitmessageqt.run()\n else:\n print('Running with curses')\n import bitmessagecurses\n bitmessagecurses.runwrapper()\n else:\n shared.config.remove_option('bitmessagesettings', 'dontconnect')\n\n if daemon:\n with shared.printLock:\n print('Running as a daemon. The main program should exit this thread.')\n else:\n with shared.printLock:\n print('Running as a daemon. You can use Ctrl+C to exit.')\n while True:\n time.sleep(20)\n\n def stop(self):\n with shared.printLock:\n print('Stopping Bitmessage Deamon.')\n shared.doCleanShutdown()\n\n\n #TODO: nice function but no one is using this \n def getApiAddress(self):\n if not shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):\n return None\n address = shared.config.get('bitmessagesettings', 'apiinterface')\n port = shared.config.getint('bitmessagesettings', 'apiport')\n return {'address':address,'port':port}\n\nif __name__ == \"__main__\":\n mainprogram = Main()\n mainprogram.start()\n\n\n# So far, the creation of and management of the Bitmessage protocol and this\n# client is a one-man operation. Bitcoin tips are quite appreciated.\n# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u\n", "path": "src/bitmessagemain.py"}], "after_files": [{"content": "#!/usr/bin/env python2.7\n# Copyright (c) 2012 Jonathan Warren\n# Copyright (c) 2012 The Bitmessage developers\n# Distributed under the MIT/X11 software license. See the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n# Right now, PyBitmessage only support connecting to stream 1. It doesn't\n# yet contain logic to expand into further streams.\n\n# The software version variable is now held in shared.py\n\n\nimport sys\n#Version check\n#Older versions of Python don't support the print function while Python 3 doesn't\n#like the print statement, so we use sys.stdout for the version check. After this\n#check we can then use the print function in the remainder of this file. Currently\n#in order to use logging, a lot of unnecessary code needs to be executed which could\n#potentially render this version check useless. So logging won't be used here until\n#there is a more efficient way to configure logging\nif sys.hexversion >= 0x3000000:\n msg = \"PyBitmessage does not support Python 3. Python 2.7.3 or later is required. Your version: %s\" % sys.version\n #logger.critical(msg)\n sys.stdout.write(msg)\n sys.exit(0)\nif sys.hexversion < 0x20703F0:\n msg = \"You should use Python 2.7.3 or greater (but not Python 3). Your version: %s\" % sys.version\n #logger.critical(msg)\n sys.stdout.write(msg)\n sys.exit(0)\n\nimport signal # Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.\n# The next 3 are used for the API\nimport singleton\nimport os\nimport socket\nimport ctypes\nfrom struct import pack\n\nfrom SimpleXMLRPCServer import SimpleXMLRPCServer\nfrom api import MySimpleXMLRPCRequestHandler\nfrom helper_startup import isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections\n\nimport shared\nfrom helper_sql import sqlQuery\nimport threading\n\n# Classes\n#from helper_sql import *\n#from class_sqlThread import *\nfrom class_sqlThread import sqlThread\nfrom class_singleCleaner import singleCleaner\n#from class_singleWorker import *\nfrom class_objectProcessor import objectProcessor\nfrom class_outgoingSynSender import outgoingSynSender\nfrom class_singleListener import singleListener\nfrom class_singleWorker import singleWorker\n#from class_addressGenerator import *\nfrom class_addressGenerator import addressGenerator\nfrom debug import logger\n\n# Helper Functions\nimport helper_bootstrap\nimport helper_generic\n\nfrom subprocess import call\nimport time\n \n\ndef connectToStream(streamNumber):\n shared.streamsInWhichIAmParticipating[streamNumber] = 'no data'\n selfInitiatedConnections[streamNumber] = {}\n shared.inventorySets[streamNumber] = set()\n queryData = sqlQuery('''SELECT hash FROM inventory WHERE streamnumber=?''', streamNumber)\n for row in queryData:\n shared.inventorySets[streamNumber].add(row[0])\n\n \n if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():\n # Some XP and Vista systems can only have 10 outgoing connections at a time.\n maximumNumberOfHalfOpenConnections = 9\n else:\n maximumNumberOfHalfOpenConnections = 64\n for i in range(maximumNumberOfHalfOpenConnections):\n a = outgoingSynSender()\n a.setup(streamNumber, selfInitiatedConnections)\n a.start()\n\ndef _fixWinsock():\n if not ('win32' in sys.platform) and not ('win64' in sys.platform):\n return\n\n # Python 2 on Windows doesn't define a wrapper for\n # socket.inet_ntop but we can make one ourselves using ctypes\n if not hasattr(socket, 'inet_ntop'):\n addressToString = ctypes.windll.ws2_32.WSAAddressToStringA\n def inet_ntop(family, host):\n if family == socket.AF_INET:\n if len(host) != 4:\n raise ValueError(\"invalid IPv4 host\")\n host = pack(\"hH4s8s\", socket.AF_INET, 0, host, \"\\0\" * 8)\n elif family == socket.AF_INET6:\n if len(host) != 16:\n raise ValueError(\"invalid IPv6 host\")\n host = pack(\"hHL16sL\", socket.AF_INET6, 0, 0, host, 0)\n else:\n raise ValueError(\"invalid address family\")\n buf = \"\\0\" * 64\n lengthBuf = pack(\"I\", len(buf))\n addressToString(host, len(host), None, buf, lengthBuf)\n return buf[0:buf.index(\"\\0\")]\n socket.inet_ntop = inet_ntop\n\n # Same for inet_pton\n if not hasattr(socket, 'inet_pton'):\n stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA\n def inet_pton(family, host):\n buf = \"\\0\" * 28\n lengthBuf = pack(\"I\", len(buf))\n if stringToAddress(str(host),\n int(family),\n None,\n buf,\n lengthBuf) != 0:\n raise socket.error(\"illegal IP address passed to inet_pton\")\n if family == socket.AF_INET:\n return buf[4:8]\n elif family == socket.AF_INET6:\n return buf[8:24]\n else:\n raise ValueError(\"invalid address family\")\n socket.inet_pton = inet_pton\n\n # These sockopts are needed on for IPv6 support\n if not hasattr(socket, 'IPPROTO_IPV6'):\n socket.IPPROTO_IPV6 = 41\n if not hasattr(socket, 'IPV6_V6ONLY'):\n socket.IPV6_V6ONLY = 27\n\n# This thread, of which there is only one, runs the API.\nclass singleAPI(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n se = SimpleXMLRPCServer((shared.config.get('bitmessagesettings', 'apiinterface'), shared.config.getint(\n 'bitmessagesettings', 'apiport')), MySimpleXMLRPCRequestHandler, True, True)\n se.register_introspection_functions()\n se.serve_forever()\n\n# This is a list of current connections (the thread pointers at least)\nselfInitiatedConnections = {}\n\nif shared.useVeryEasyProofOfWorkForTesting:\n shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(\n shared.networkDefaultProofOfWorkNonceTrialsPerByte / 16)\n shared.networkDefaultPayloadLengthExtraBytes = int(\n shared.networkDefaultPayloadLengthExtraBytes / 7000)\n\nclass Main:\n def start(self, daemon=False):\n _fixWinsock()\n\n shared.daemon = daemon\n # is the application already running? If yes then exit.\n thisapp = singleton.singleinstance()\n\n # get curses flag\n curses = False\n if '-c' in sys.argv:\n curses = True\n\n signal.signal(signal.SIGINT, helper_generic.signal_handler)\n # signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n helper_bootstrap.knownNodes()\n # Start the address generation thread\n addressGeneratorThread = addressGenerator()\n addressGeneratorThread.daemon = True # close the main program even if there are threads left\n addressGeneratorThread.start()\n\n # Start the thread that calculates POWs\n singleWorkerThread = singleWorker()\n singleWorkerThread.daemon = True # close the main program even if there are threads left\n singleWorkerThread.start()\n\n # Start the SQL thread\n sqlLookup = sqlThread()\n sqlLookup.daemon = False # DON'T close the main program even if there are threads left. The closeEvent should command this thread to exit gracefully.\n sqlLookup.start()\n\n # Start the thread that calculates POWs\n objectProcessorThread = objectProcessor()\n objectProcessorThread.daemon = False # DON'T close the main program even the thread remains. This thread checks the shutdown variable after processing each object.\n objectProcessorThread.start()\n\n # Start the cleanerThread\n singleCleanerThread = singleCleaner()\n singleCleanerThread.daemon = True # close the main program even if there are threads left\n singleCleanerThread.start()\n\n shared.reloadMyAddressHashes()\n shared.reloadBroadcastSendersForWhichImWatching()\n\n if shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):\n try:\n apiNotifyPath = shared.config.get(\n 'bitmessagesettings', 'apinotifypath')\n except:\n apiNotifyPath = ''\n if apiNotifyPath != '':\n with shared.printLock:\n print('Trying to call', apiNotifyPath)\n\n call([apiNotifyPath, \"startingUp\"])\n singleAPIThread = singleAPI()\n singleAPIThread.daemon = True # close the main program even if there are threads left\n singleAPIThread.start()\n\n connectToStream(1)\n\n singleListenerThread = singleListener()\n singleListenerThread.setup(selfInitiatedConnections)\n singleListenerThread.daemon = True # close the main program even if there are threads left\n singleListenerThread.start()\n\n if daemon == False and shared.safeConfigGetBoolean('bitmessagesettings', 'daemon') == False:\n if curses == False:\n try:\n from PyQt4 import QtCore, QtGui\n except Exception as err:\n print('PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \\'PyQt Download\\'. If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon')\n print('Error message:', err)\n print('You can also run PyBitmessage with the new curses interface by providing \\'-c\\' as a commandline argument.')\n os._exit(0)\n\n import bitmessageqt\n bitmessageqt.run()\n else:\n print('Running with curses')\n import bitmessagecurses\n bitmessagecurses.runwrapper()\n else:\n shared.config.remove_option('bitmessagesettings', 'dontconnect')\n\n if daemon:\n with shared.printLock:\n print('Running as a daemon. The main program should exit this thread.')\n else:\n with shared.printLock:\n print('Running as a daemon. You can use Ctrl+C to exit.')\n while True:\n time.sleep(20)\n\n def stop(self):\n with shared.printLock:\n print('Stopping Bitmessage Deamon.')\n shared.doCleanShutdown()\n\n\n #TODO: nice function but no one is using this \n def getApiAddress(self):\n if not shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):\n return None\n address = shared.config.get('bitmessagesettings', 'apiinterface')\n port = shared.config.getint('bitmessagesettings', 'apiport')\n return {'address':address,'port':port}\n\nif __name__ == \"__main__\":\n mainprogram = Main()\n mainprogram.start()\n\n\n# So far, the creation of and management of the Bitmessage protocol and this\n# client is a one-man operation. Bitcoin tips are quite appreciated.\n# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u\n", "path": "src/bitmessagemain.py"}]}
| 3,723 | 101 |
gh_patches_debug_36430
|
rasdani/github-patches
|
git_diff
|
oppia__oppia-2670
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Split up the admin controller
The admin page is going to be the first de-facto example of how to split up a large controller into multiple directives and services. This will be used as a guide for other parts of the frontend refactoring project. The overall process used as part of this refactor is:
1. Minimize the HTML in the controller by reusing existing Jinja templates and directives as makes sense (this should only affect admin.html and not require any significant refactoring)
2. Extract major parts of the controller into isolated directives (and move dependent JS from the controller JS file to directive JS files)
3. Additional steps may need to be done to further split directives that are too large once they are pulled out of the controller
4. Functionality already implemented in the controller/directive JS files should be replaced with calls to existing services (possibly moving those services to the domain folder if they are in other pages/ directories)
5. Common functionality in the directive and controller JS files should be split into non-UI services
Tests should be added for these services
6. If these services need to be used by other pages, they should be moved to domain/{directory}
7. Finally, if domain objects are needed they should be added to domain/{directory} and the services should be updated to use them
The admin page doesn't require all of the above steps, which means the collection editor and simple exploration editor will need to be used as examples for how some of other steps look (like creating domain object factories).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/controllers/admin.py`
Content:
```
1 # Copyright 2014 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Controllers for the admin view."""
16
17 import logging
18
19 import jinja2
20
21 from core import jobs
22 from core import jobs_registry
23 from core.controllers import base
24 from core.controllers import editor
25 from core.domain import collection_services
26 from core.domain import config_domain
27 from core.domain import config_services
28 from core.domain import exp_services
29 from core.domain import recommendations_services
30 from core.domain import rights_manager
31 from core.domain import rte_component_registry
32 from core.platform import models
33 import feconf
34 import utils
35
36 current_user_services = models.Registry.import_current_user_services()
37
38
39 def require_super_admin(handler):
40 """Decorator that checks if the current user is a super admin."""
41 def test_super_admin(self, **kwargs):
42 """Checks if the user is logged in and is a super admin."""
43 if not self.user_id:
44 self.redirect(
45 current_user_services.create_login_url(self.request.uri))
46 return
47 if not current_user_services.is_current_user_super_admin():
48 raise self.UnauthorizedUserException(
49 '%s is not a super admin of this application', self.user_id)
50 return handler(self, **kwargs)
51
52 return test_super_admin
53
54
55 class AdminPage(base.BaseHandler):
56 """Admin page shown in the App Engine admin console."""
57 @require_super_admin
58 def get(self):
59 """Handles GET requests."""
60 demo_exploration_ids = feconf.DEMO_EXPLORATIONS.keys()
61
62 recent_job_data = jobs.get_data_for_recent_jobs()
63 unfinished_job_data = jobs.get_data_for_unfinished_jobs()
64 for job in unfinished_job_data:
65 job['can_be_canceled'] = job['is_cancelable'] and any([
66 klass.__name__ == job['job_type']
67 for klass in jobs_registry.ONE_OFF_JOB_MANAGERS])
68
69 queued_or_running_job_types = set([
70 job['job_type'] for job in unfinished_job_data])
71 one_off_job_specs = [{
72 'job_type': klass.__name__,
73 'is_queued_or_running': (
74 klass.__name__ in queued_or_running_job_types)
75 } for klass in jobs_registry.ONE_OFF_JOB_MANAGERS]
76
77 continuous_computations_data = jobs.get_continuous_computations_info(
78 jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS)
79 for computation in continuous_computations_data:
80 if computation['last_started_msec']:
81 computation['human_readable_last_started'] = (
82 utils.get_human_readable_time_string(
83 computation['last_started_msec']))
84 if computation['last_stopped_msec']:
85 computation['human_readable_last_stopped'] = (
86 utils.get_human_readable_time_string(
87 computation['last_stopped_msec']))
88 if computation['last_finished_msec']:
89 computation['human_readable_last_finished'] = (
90 utils.get_human_readable_time_string(
91 computation['last_finished_msec']))
92
93 self.values.update({
94 'continuous_computations_data': continuous_computations_data,
95 'demo_collections': sorted(feconf.DEMO_COLLECTIONS.iteritems()),
96 'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.iteritems()),
97 'demo_exploration_ids': demo_exploration_ids,
98 'human_readable_current_time': (
99 utils.get_human_readable_time_string(
100 utils.get_current_time_in_millisecs())),
101 'one_off_job_specs': one_off_job_specs,
102 'recent_job_data': recent_job_data,
103 'rte_components_html': jinja2.utils.Markup(
104 rte_component_registry.Registry.get_html_for_all_components()),
105 'unfinished_job_data': unfinished_job_data,
106 'value_generators_js': jinja2.utils.Markup(
107 editor.get_value_generators_js()),
108 })
109
110 self.render_template('pages/admin/admin.html')
111
112
113 class AdminHandler(base.BaseHandler):
114 """Handler for the admin page."""
115
116 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
117
118 @require_super_admin
119 def get(self):
120 """Handles GET requests."""
121
122 self.render_json({
123 'config_properties': (
124 config_domain.Registry.get_config_property_schemas()),
125 })
126
127 @require_super_admin
128 def post(self):
129 """Handles POST requests."""
130 try:
131 if self.payload.get('action') == 'reload_exploration':
132 exploration_id = self.payload.get('exploration_id')
133 logging.info(
134 '[ADMIN] %s reloaded exploration %s' %
135 (self.user_id, exploration_id))
136 exp_services.load_demo(unicode(exploration_id))
137 rights_manager.release_ownership_of_exploration(
138 feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))
139 elif self.payload.get('action') == 'reload_collection':
140 collection_id = self.payload.get('collection_id')
141 logging.info(
142 '[ADMIN] %s reloaded collection %s' %
143 (self.user_id, collection_id))
144 collection_services.load_demo(unicode(collection_id))
145 rights_manager.release_ownership_of_collection(
146 feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))
147 elif self.payload.get('action') == 'clear_search_index':
148 exp_services.clear_search_index()
149 elif self.payload.get('action') == 'save_config_properties':
150 new_config_property_values = self.payload.get(
151 'new_config_property_values')
152 logging.info('[ADMIN] %s saved config property values: %s' %
153 (self.user_id, new_config_property_values))
154 for (name, value) in new_config_property_values.iteritems():
155 config_services.set_property(self.user_id, name, value)
156 elif self.payload.get('action') == 'revert_config_property':
157 config_property_id = self.payload.get('config_property_id')
158 logging.info('[ADMIN] %s reverted config property: %s' %
159 (self.user_id, config_property_id))
160 config_services.revert_property(
161 self.user_id, config_property_id)
162 elif self.payload.get('action') == 'start_new_job':
163 for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
164 if klass.__name__ == self.payload.get('job_type'):
165 klass.enqueue(klass.create_new())
166 break
167 elif self.payload.get('action') == 'cancel_job':
168 job_id = self.payload.get('job_id')
169 job_type = self.payload.get('job_type')
170 for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
171 if klass.__name__ == job_type:
172 klass.cancel(job_id, self.user_id)
173 break
174 elif self.payload.get('action') == 'start_computation':
175 computation_type = self.payload.get('computation_type')
176 for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:
177 if klass.__name__ == computation_type:
178 klass.start_computation()
179 break
180 elif self.payload.get('action') == 'stop_computation':
181 computation_type = self.payload.get('computation_type')
182 for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:
183 if klass.__name__ == computation_type:
184 klass.stop_computation(self.user_id)
185 break
186 elif self.payload.get('action') == 'upload_topic_similarities':
187 data = self.payload.get('data')
188 recommendations_services.update_topic_similarities(data)
189
190 self.render_json({})
191 except Exception as e:
192 self.render_json({'error': unicode(e)})
193 raise
194
195
196 class AdminJobOutput(base.BaseHandler):
197 """Retrieves job output to show on the admin page."""
198
199 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
200
201 @require_super_admin
202 def get(self):
203 """Handles GET requests."""
204 job_id = self.request.get('job_id')
205 self.render_json({
206 'output': jobs.get_job_output(job_id)
207 })
208
209
210 class AdminTopicsCsvDownloadHandler(base.BaseHandler):
211 """Retrieves topic similarity data for download."""
212
213 @require_super_admin
214 def get(self):
215 self.response.headers['Content-Type'] = 'text/csv'
216 self.response.headers['Content-Disposition'] = (
217 'attachment; filename=topic_similarities.csv')
218 self.response.write(
219 recommendations_services.get_topic_similarities_as_csv())
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/controllers/admin.py b/core/controllers/admin.py
--- a/core/controllers/admin.py
+++ b/core/controllers/admin.py
@@ -130,20 +130,10 @@
try:
if self.payload.get('action') == 'reload_exploration':
exploration_id = self.payload.get('exploration_id')
- logging.info(
- '[ADMIN] %s reloaded exploration %s' %
- (self.user_id, exploration_id))
- exp_services.load_demo(unicode(exploration_id))
- rights_manager.release_ownership_of_exploration(
- feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))
+ self._reload_exploration(exploration_id)
elif self.payload.get('action') == 'reload_collection':
collection_id = self.payload.get('collection_id')
- logging.info(
- '[ADMIN] %s reloaded collection %s' %
- (self.user_id, collection_id))
- collection_services.load_demo(unicode(collection_id))
- rights_manager.release_ownership_of_collection(
- feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))
+ self._reload_collection(collection_id)
elif self.payload.get('action') == 'clear_search_index':
exp_services.clear_search_index()
elif self.payload.get('action') == 'save_config_properties':
@@ -192,6 +182,28 @@
self.render_json({'error': unicode(e)})
raise
+ def _reload_exploration(self, exploration_id):
+ if feconf.DEV_MODE:
+ logging.info(
+ '[ADMIN] %s reloaded exploration %s' %
+ (self.user_id, exploration_id))
+ exp_services.load_demo(unicode(exploration_id))
+ rights_manager.release_ownership_of_exploration(
+ feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))
+ else:
+ raise Exception('Cannot reload an exploration in production.')
+
+ def _reload_collection(self, collection_id):
+ if feconf.DEV_MODE:
+ logging.info(
+ '[ADMIN] %s reloaded collection %s' %
+ (self.user_id, collection_id))
+ collection_services.load_demo(unicode(collection_id))
+ rights_manager.release_ownership_of_collection(
+ feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))
+ else:
+ raise Exception('Cannot reload a collection in production.')
+
class AdminJobOutput(base.BaseHandler):
"""Retrieves job output to show on the admin page."""
|
{"golden_diff": "diff --git a/core/controllers/admin.py b/core/controllers/admin.py\n--- a/core/controllers/admin.py\n+++ b/core/controllers/admin.py\n@@ -130,20 +130,10 @@\n try:\n if self.payload.get('action') == 'reload_exploration':\n exploration_id = self.payload.get('exploration_id')\n- logging.info(\n- '[ADMIN] %s reloaded exploration %s' %\n- (self.user_id, exploration_id))\n- exp_services.load_demo(unicode(exploration_id))\n- rights_manager.release_ownership_of_exploration(\n- feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))\n+ self._reload_exploration(exploration_id)\n elif self.payload.get('action') == 'reload_collection':\n collection_id = self.payload.get('collection_id')\n- logging.info(\n- '[ADMIN] %s reloaded collection %s' %\n- (self.user_id, collection_id))\n- collection_services.load_demo(unicode(collection_id))\n- rights_manager.release_ownership_of_collection(\n- feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))\n+ self._reload_collection(collection_id)\n elif self.payload.get('action') == 'clear_search_index':\n exp_services.clear_search_index()\n elif self.payload.get('action') == 'save_config_properties':\n@@ -192,6 +182,28 @@\n self.render_json({'error': unicode(e)})\n raise\n \n+ def _reload_exploration(self, exploration_id):\n+ if feconf.DEV_MODE:\n+ logging.info(\n+ '[ADMIN] %s reloaded exploration %s' %\n+ (self.user_id, exploration_id))\n+ exp_services.load_demo(unicode(exploration_id))\n+ rights_manager.release_ownership_of_exploration(\n+ feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))\n+ else:\n+ raise Exception('Cannot reload an exploration in production.')\n+\n+ def _reload_collection(self, collection_id):\n+ if feconf.DEV_MODE:\n+ logging.info(\n+ '[ADMIN] %s reloaded collection %s' %\n+ (self.user_id, collection_id))\n+ collection_services.load_demo(unicode(collection_id))\n+ rights_manager.release_ownership_of_collection(\n+ feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))\n+ else:\n+ raise Exception('Cannot reload a collection in production.')\n+\n \n class AdminJobOutput(base.BaseHandler):\n \"\"\"Retrieves job output to show on the admin page.\"\"\"\n", "issue": "Split up the admin controller\nThe admin page is going to be the first de-facto example of how to split up a large controller into multiple directives and services. This will be used as a guide for other parts of the frontend refactoring project. The overall process used as part of this refactor is:\n1. Minimize the HTML in the controller by reusing existing Jinja templates and directives as makes sense (this should only affect admin.html and not require any significant refactoring)\n2. Extract major parts of the controller into isolated directives (and move dependent JS from the controller JS file to directive JS files)\n3. Additional steps may need to be done to further split directives that are too large once they are pulled out of the controller\n4. Functionality already implemented in the controller/directive JS files should be replaced with calls to existing services (possibly moving those services to the domain folder if they are in other pages/ directories)\n5. Common functionality in the directive and controller JS files should be split into non-UI services\n Tests should be added for these services\n6. If these services need to be used by other pages, they should be moved to domain/{directory}\n7. Finally, if domain objects are needed they should be added to domain/{directory} and the services should be updated to use them\n\nThe admin page doesn't require all of the above steps, which means the collection editor and simple exploration editor will need to be used as examples for how some of other steps look (like creating domain object factories).\n\n", "before_files": [{"content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the admin view.\"\"\"\n\nimport logging\n\nimport jinja2\n\nfrom core import jobs\nfrom core import jobs_registry\nfrom core.controllers import base\nfrom core.controllers import editor\nfrom core.domain import collection_services\nfrom core.domain import config_domain\nfrom core.domain import config_services\nfrom core.domain import exp_services\nfrom core.domain import recommendations_services\nfrom core.domain import rights_manager\nfrom core.domain import rte_component_registry\nfrom core.platform import models\nimport feconf\nimport utils\n\ncurrent_user_services = models.Registry.import_current_user_services()\n\n\ndef require_super_admin(handler):\n \"\"\"Decorator that checks if the current user is a super admin.\"\"\"\n def test_super_admin(self, **kwargs):\n \"\"\"Checks if the user is logged in and is a super admin.\"\"\"\n if not self.user_id:\n self.redirect(\n current_user_services.create_login_url(self.request.uri))\n return\n if not current_user_services.is_current_user_super_admin():\n raise self.UnauthorizedUserException(\n '%s is not a super admin of this application', self.user_id)\n return handler(self, **kwargs)\n\n return test_super_admin\n\n\nclass AdminPage(base.BaseHandler):\n \"\"\"Admin page shown in the App Engine admin console.\"\"\"\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n demo_exploration_ids = feconf.DEMO_EXPLORATIONS.keys()\n\n recent_job_data = jobs.get_data_for_recent_jobs()\n unfinished_job_data = jobs.get_data_for_unfinished_jobs()\n for job in unfinished_job_data:\n job['can_be_canceled'] = job['is_cancelable'] and any([\n klass.__name__ == job['job_type']\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS])\n\n queued_or_running_job_types = set([\n job['job_type'] for job in unfinished_job_data])\n one_off_job_specs = [{\n 'job_type': klass.__name__,\n 'is_queued_or_running': (\n klass.__name__ in queued_or_running_job_types)\n } for klass in jobs_registry.ONE_OFF_JOB_MANAGERS]\n\n continuous_computations_data = jobs.get_continuous_computations_info(\n jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS)\n for computation in continuous_computations_data:\n if computation['last_started_msec']:\n computation['human_readable_last_started'] = (\n utils.get_human_readable_time_string(\n computation['last_started_msec']))\n if computation['last_stopped_msec']:\n computation['human_readable_last_stopped'] = (\n utils.get_human_readable_time_string(\n computation['last_stopped_msec']))\n if computation['last_finished_msec']:\n computation['human_readable_last_finished'] = (\n utils.get_human_readable_time_string(\n computation['last_finished_msec']))\n\n self.values.update({\n 'continuous_computations_data': continuous_computations_data,\n 'demo_collections': sorted(feconf.DEMO_COLLECTIONS.iteritems()),\n 'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.iteritems()),\n 'demo_exploration_ids': demo_exploration_ids,\n 'human_readable_current_time': (\n utils.get_human_readable_time_string(\n utils.get_current_time_in_millisecs())),\n 'one_off_job_specs': one_off_job_specs,\n 'recent_job_data': recent_job_data,\n 'rte_components_html': jinja2.utils.Markup(\n rte_component_registry.Registry.get_html_for_all_components()),\n 'unfinished_job_data': unfinished_job_data,\n 'value_generators_js': jinja2.utils.Markup(\n editor.get_value_generators_js()),\n })\n\n self.render_template('pages/admin/admin.html')\n\n\nclass AdminHandler(base.BaseHandler):\n \"\"\"Handler for the admin page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n\n self.render_json({\n 'config_properties': (\n config_domain.Registry.get_config_property_schemas()),\n })\n\n @require_super_admin\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n try:\n if self.payload.get('action') == 'reload_exploration':\n exploration_id = self.payload.get('exploration_id')\n logging.info(\n '[ADMIN] %s reloaded exploration %s' %\n (self.user_id, exploration_id))\n exp_services.load_demo(unicode(exploration_id))\n rights_manager.release_ownership_of_exploration(\n feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))\n elif self.payload.get('action') == 'reload_collection':\n collection_id = self.payload.get('collection_id')\n logging.info(\n '[ADMIN] %s reloaded collection %s' %\n (self.user_id, collection_id))\n collection_services.load_demo(unicode(collection_id))\n rights_manager.release_ownership_of_collection(\n feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))\n elif self.payload.get('action') == 'clear_search_index':\n exp_services.clear_search_index()\n elif self.payload.get('action') == 'save_config_properties':\n new_config_property_values = self.payload.get(\n 'new_config_property_values')\n logging.info('[ADMIN] %s saved config property values: %s' %\n (self.user_id, new_config_property_values))\n for (name, value) in new_config_property_values.iteritems():\n config_services.set_property(self.user_id, name, value)\n elif self.payload.get('action') == 'revert_config_property':\n config_property_id = self.payload.get('config_property_id')\n logging.info('[ADMIN] %s reverted config property: %s' %\n (self.user_id, config_property_id))\n config_services.revert_property(\n self.user_id, config_property_id)\n elif self.payload.get('action') == 'start_new_job':\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:\n if klass.__name__ == self.payload.get('job_type'):\n klass.enqueue(klass.create_new())\n break\n elif self.payload.get('action') == 'cancel_job':\n job_id = self.payload.get('job_id')\n job_type = self.payload.get('job_type')\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:\n if klass.__name__ == job_type:\n klass.cancel(job_id, self.user_id)\n break\n elif self.payload.get('action') == 'start_computation':\n computation_type = self.payload.get('computation_type')\n for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:\n if klass.__name__ == computation_type:\n klass.start_computation()\n break\n elif self.payload.get('action') == 'stop_computation':\n computation_type = self.payload.get('computation_type')\n for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:\n if klass.__name__ == computation_type:\n klass.stop_computation(self.user_id)\n break\n elif self.payload.get('action') == 'upload_topic_similarities':\n data = self.payload.get('data')\n recommendations_services.update_topic_similarities(data)\n\n self.render_json({})\n except Exception as e:\n self.render_json({'error': unicode(e)})\n raise\n\n\nclass AdminJobOutput(base.BaseHandler):\n \"\"\"Retrieves job output to show on the admin page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n job_id = self.request.get('job_id')\n self.render_json({\n 'output': jobs.get_job_output(job_id)\n })\n\n\nclass AdminTopicsCsvDownloadHandler(base.BaseHandler):\n \"\"\"Retrieves topic similarity data for download.\"\"\"\n\n @require_super_admin\n def get(self):\n self.response.headers['Content-Type'] = 'text/csv'\n self.response.headers['Content-Disposition'] = (\n 'attachment; filename=topic_similarities.csv')\n self.response.write(\n recommendations_services.get_topic_similarities_as_csv())\n", "path": "core/controllers/admin.py"}], "after_files": [{"content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the admin view.\"\"\"\n\nimport logging\n\nimport jinja2\n\nfrom core import jobs\nfrom core import jobs_registry\nfrom core.controllers import base\nfrom core.controllers import editor\nfrom core.domain import collection_services\nfrom core.domain import config_domain\nfrom core.domain import config_services\nfrom core.domain import exp_services\nfrom core.domain import recommendations_services\nfrom core.domain import rights_manager\nfrom core.domain import rte_component_registry\nfrom core.platform import models\nimport feconf\nimport utils\n\ncurrent_user_services = models.Registry.import_current_user_services()\n\n\ndef require_super_admin(handler):\n \"\"\"Decorator that checks if the current user is a super admin.\"\"\"\n def test_super_admin(self, **kwargs):\n \"\"\"Checks if the user is logged in and is a super admin.\"\"\"\n if not self.user_id:\n self.redirect(\n current_user_services.create_login_url(self.request.uri))\n return\n if not current_user_services.is_current_user_super_admin():\n raise self.UnauthorizedUserException(\n '%s is not a super admin of this application', self.user_id)\n return handler(self, **kwargs)\n\n return test_super_admin\n\n\nclass AdminPage(base.BaseHandler):\n \"\"\"Admin page shown in the App Engine admin console.\"\"\"\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n demo_exploration_ids = feconf.DEMO_EXPLORATIONS.keys()\n\n recent_job_data = jobs.get_data_for_recent_jobs()\n unfinished_job_data = jobs.get_data_for_unfinished_jobs()\n for job in unfinished_job_data:\n job['can_be_canceled'] = job['is_cancelable'] and any([\n klass.__name__ == job['job_type']\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS])\n\n queued_or_running_job_types = set([\n job['job_type'] for job in unfinished_job_data])\n one_off_job_specs = [{\n 'job_type': klass.__name__,\n 'is_queued_or_running': (\n klass.__name__ in queued_or_running_job_types)\n } for klass in jobs_registry.ONE_OFF_JOB_MANAGERS]\n\n continuous_computations_data = jobs.get_continuous_computations_info(\n jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS)\n for computation in continuous_computations_data:\n if computation['last_started_msec']:\n computation['human_readable_last_started'] = (\n utils.get_human_readable_time_string(\n computation['last_started_msec']))\n if computation['last_stopped_msec']:\n computation['human_readable_last_stopped'] = (\n utils.get_human_readable_time_string(\n computation['last_stopped_msec']))\n if computation['last_finished_msec']:\n computation['human_readable_last_finished'] = (\n utils.get_human_readable_time_string(\n computation['last_finished_msec']))\n\n self.values.update({\n 'continuous_computations_data': continuous_computations_data,\n 'demo_collections': sorted(feconf.DEMO_COLLECTIONS.iteritems()),\n 'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.iteritems()),\n 'demo_exploration_ids': demo_exploration_ids,\n 'human_readable_current_time': (\n utils.get_human_readable_time_string(\n utils.get_current_time_in_millisecs())),\n 'one_off_job_specs': one_off_job_specs,\n 'recent_job_data': recent_job_data,\n 'rte_components_html': jinja2.utils.Markup(\n rte_component_registry.Registry.get_html_for_all_components()),\n 'unfinished_job_data': unfinished_job_data,\n 'value_generators_js': jinja2.utils.Markup(\n editor.get_value_generators_js()),\n })\n\n self.render_template('pages/admin/admin.html')\n\n\nclass AdminHandler(base.BaseHandler):\n \"\"\"Handler for the admin page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n\n self.render_json({\n 'config_properties': (\n config_domain.Registry.get_config_property_schemas()),\n })\n\n @require_super_admin\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n try:\n if self.payload.get('action') == 'reload_exploration':\n exploration_id = self.payload.get('exploration_id')\n self._reload_exploration(exploration_id)\n elif self.payload.get('action') == 'reload_collection':\n collection_id = self.payload.get('collection_id')\n self._reload_collection(collection_id)\n elif self.payload.get('action') == 'clear_search_index':\n exp_services.clear_search_index()\n elif self.payload.get('action') == 'save_config_properties':\n new_config_property_values = self.payload.get(\n 'new_config_property_values')\n logging.info('[ADMIN] %s saved config property values: %s' %\n (self.user_id, new_config_property_values))\n for (name, value) in new_config_property_values.iteritems():\n config_services.set_property(self.user_id, name, value)\n elif self.payload.get('action') == 'revert_config_property':\n config_property_id = self.payload.get('config_property_id')\n logging.info('[ADMIN] %s reverted config property: %s' %\n (self.user_id, config_property_id))\n config_services.revert_property(\n self.user_id, config_property_id)\n elif self.payload.get('action') == 'start_new_job':\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:\n if klass.__name__ == self.payload.get('job_type'):\n klass.enqueue(klass.create_new())\n break\n elif self.payload.get('action') == 'cancel_job':\n job_id = self.payload.get('job_id')\n job_type = self.payload.get('job_type')\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:\n if klass.__name__ == job_type:\n klass.cancel(job_id, self.user_id)\n break\n elif self.payload.get('action') == 'start_computation':\n computation_type = self.payload.get('computation_type')\n for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:\n if klass.__name__ == computation_type:\n klass.start_computation()\n break\n elif self.payload.get('action') == 'stop_computation':\n computation_type = self.payload.get('computation_type')\n for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:\n if klass.__name__ == computation_type:\n klass.stop_computation(self.user_id)\n break\n elif self.payload.get('action') == 'upload_topic_similarities':\n data = self.payload.get('data')\n recommendations_services.update_topic_similarities(data)\n\n self.render_json({})\n except Exception as e:\n self.render_json({'error': unicode(e)})\n raise\n\n def _reload_exploration(self, exploration_id):\n if feconf.DEV_MODE:\n logging.info(\n '[ADMIN] %s reloaded exploration %s' %\n (self.user_id, exploration_id))\n exp_services.load_demo(unicode(exploration_id))\n rights_manager.release_ownership_of_exploration(\n feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))\n else:\n raise Exception('Cannot reload an exploration in production.')\n\n def _reload_collection(self, collection_id):\n if feconf.DEV_MODE:\n logging.info(\n '[ADMIN] %s reloaded collection %s' %\n (self.user_id, collection_id))\n collection_services.load_demo(unicode(collection_id))\n rights_manager.release_ownership_of_collection(\n feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))\n else:\n raise Exception('Cannot reload a collection in production.')\n\n\nclass AdminJobOutput(base.BaseHandler):\n \"\"\"Retrieves job output to show on the admin page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n job_id = self.request.get('job_id')\n self.render_json({\n 'output': jobs.get_job_output(job_id)\n })\n\n\nclass AdminTopicsCsvDownloadHandler(base.BaseHandler):\n \"\"\"Retrieves topic similarity data for download.\"\"\"\n\n @require_super_admin\n def get(self):\n self.response.headers['Content-Type'] = 'text/csv'\n self.response.headers['Content-Disposition'] = (\n 'attachment; filename=topic_similarities.csv')\n self.response.write(\n recommendations_services.get_topic_similarities_as_csv())\n", "path": "core/controllers/admin.py"}]}
| 2,964 | 546 |
gh_patches_debug_13073
|
rasdani/github-patches
|
git_diff
|
kymatio__kymatio-650
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sphinx inverses colors...
no clue why, but it can wait v3.
cf here:
https://www.kymat.io/gallery_2d/plot_scattering_disk.html#sphx-glr-gallery-2d-plot-scattering-disk-py
https://www.kymat.io/gallery_2d/plot_invert_scattering_torch.html#sphx-glr-gallery-2d-plot-invert-scattering-torch-py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/2d/plot_scattering_disk.py`
Content:
```
1 """
2 Scattering disk display
3 =======================
4 This script reproduces concentric circles that encode Scattering coefficient's
5 energy as described in "Invariant Scattering Convolution Networks" by Bruna and Mallat.
6 Here, for the sake of simplicity, we only consider first order scattering.
7
8 Author: https://github.com/Jonas1312
9 Edited by: Edouard Oyallon
10 """
11
12
13
14 import matplotlib as mpl
15 import matplotlib.cm as cm
16 import matplotlib.pyplot as plt
17 import numpy as np
18 from kymatio import Scattering2D
19 from PIL import Image
20 import os
21
22
23 img_name = os.path.join(os.getcwd(),"images/digit.png")
24
25 ####################################################################
26 # Scattering computations
27 #-------------------------------------------------------------------
28 # First, we read the input digit:
29 src_img = Image.open(img_name).convert('L').resize((32,32))
30 src_img = np.array(src_img)
31 print("img shape: ", src_img.shape)
32
33 ####################################################################
34 # We compute a Scattering Transform with L=6 angles and J=3 scales.
35 # Rotating a wavelet $\psi$ by $\pi$ is equivalent to consider its
36 # conjugate in fourier: $\hat\psi_{\pi}(\omega)=\hat\psi(r_{-\pi}\omega)^*$.
37 #
38 # Combining this and the fact that a real signal has a Hermitian symmetry
39 # implies that it is usually sufficient to use the angles $\{\frac{\pi l}{L}\}_{l\leq L}$ at computation time.
40 # For consistency, we will however display $\{\frac{2\pi l}{L}\}_{l\leq 2L}$,
41 # which implies that our visualization will be redundant and have a symmetry by rotation of $\pi$.
42
43 L = 6
44 J = 3
45 scattering = Scattering2D(J=J, shape=src_img.shape, L=L, max_order=1, frontend='numpy')
46
47 ####################################################################
48 # We now compute the scattering coefficients:
49 src_img_tensor = src_img.astype(np.float32) / 255.
50
51 scattering_coefficients = scattering(src_img_tensor)
52 print("coeffs shape: ", scattering_coefficients.shape)
53 # Invert colors
54 scattering_coefficients = -scattering_coefficients
55
56 ####################################################################
57 # We skip the low pass filter...
58 scattering_coefficients = scattering_coefficients[1:, :, :]
59 norm = mpl.colors.Normalize(scattering_coefficients.min(), scattering_coefficients.max(), clip=True)
60 mapper = cm.ScalarMappable(norm=norm, cmap="gray")
61 nb_coeffs, window_rows, window_columns = scattering_coefficients.shape
62
63 ####################################################################
64 # Figure reproduction
65 #-------------------------------------------------------------------
66
67 ####################################################################
68 # Now we can reproduce a figure that displays the energy of the first
69 # order Scattering coefficient, which are given by $\{\mid x\star\psi_{j,\theta}\mid\star\phi_J|\}_{j,\theta}$ .
70 # Here, each scattering coefficient is represented on the polar plane. The polar radius and angle correspond
71 # respectively to the scale $j$ and the rotation $\theta$ applied to the mother wavelet.
72 #
73 # Observe that as predicted, the visualization exhibit a redundancy and a symmetry.
74
75 fig,ax = plt.subplots()
76
77 plt.imshow(1-src_img,cmap='gray',interpolation='nearest', aspect='auto')
78 ax.axis('off')
79 offset = 0.1
80 for row in range(window_rows):
81 for column in range(window_columns):
82 ax=fig.add_subplot(window_rows, window_columns, 1 + column + row * window_rows, projection='polar')
83 ax.set_ylim(0, 1)
84 ax.axis('off')
85 ax.set_yticklabels([]) # turn off radial tick labels (yticks)
86 ax.set_xticklabels([]) # turn off degrees
87 # ax.set_theta_zero_location('N') # 0° to North
88 coefficients = scattering_coefficients[:, row, column]
89 for j in range(J):
90 for l in range(L):
91 coeff = coefficients[l + (J - 1 - j) * L]
92 color = mpl.colors.to_hex(mapper.to_rgba(coeff))
93 ax.bar(x=(4.5+l) * np.pi / L,
94 height=2*(2**(j-1) / 2**J),
95 width=2 * np.pi / L,
96 bottom=offset + (2**j / 2**J) ,
97 color=color)
98 ax.bar(x=(4.5+l+L) * np.pi / L,
99 height=2*(2**(j-1) / 2**J),
100 width=2 * np.pi / L,
101 bottom=offset + (2**j / 2**J) ,
102 color=color)
103
```
Path: `examples/2d/plot_invert_scattering_torch.py`
Content:
```
1 """
2 Inverting scattering via mse
3 ============================
4 This script aims to quantify the information loss for natural images by
5 performing a reconstruction of an image from its scattering coefficients via a
6 L2-norm minimization.
7 """
8
9 ###############################################################################
10 # Imports
11 # -------
12 import matplotlib.pyplot as plt
13 import numpy as np
14 import torch
15 import torch.nn.functional as F
16 from PIL import Image
17 from torch import optim
18 from scipy.misc import face
19
20 from kymatio.torch import Scattering2D
21
22 device = "cuda" if torch.cuda.is_available() else "cpu"
23
24 ###############################################################################
25 # Load test image
26 # ---------------
27 src_img = Image.fromarray(face())
28 src_img = src_img.resize((512, 384), Image.ANTIALIAS)
29 src_img = np.array(src_img).astype(np.float32)
30 src_img = src_img / 255.0
31 plt.imshow(src_img)
32 plt.title("Original image")
33
34 src_img = np.moveaxis(src_img, -1, 0) # HWC to CHW
35 max_iter = 5 # number of steps for the GD
36 print("Image shape: ", src_img.shape)
37 channels, height, width = src_img.shape
38
39 ###############################################################################
40 # Main loop
41 # ----------
42 for order in [1]:
43 for J in [2, 4]:
44
45 # Compute scattering coefficients
46 scattering = Scattering2D(J=J, shape=(height, width), max_order=order)
47 if device == "cuda":
48 scattering = scattering.cuda()
49 max_iter = 500
50 src_img_tensor = torch.from_numpy(src_img).to(device).contiguous()
51 scattering_coefficients = scattering(src_img_tensor)
52
53 # Create trainable input image
54 input_tensor = torch.rand(src_img.shape, requires_grad=True, device=device)
55
56 # Optimizer hyperparams
57 optimizer = optim.Adam([input_tensor], lr=1)
58
59 # Training
60 best_img = None
61 best_loss = float("inf")
62 for epoch in range(1, max_iter):
63 new_coefficients = scattering(input_tensor)
64 loss = F.mse_loss(input=new_coefficients, target=scattering_coefficients)
65 print("Epoch {}, loss: {}".format(epoch, loss.item()), end="\r")
66 optimizer.zero_grad()
67 loss.backward()
68 optimizer.step()
69 if loss < best_loss:
70 best_loss = loss.detach().cpu().item()
71 best_img = input_tensor.detach().cpu().numpy()
72
73 best_img = np.clip(best_img, 0.0, 1.0)
74
75 # PSNR
76 mse = np.mean((src_img - best_img) ** 2)
77 psnr = 20 * np.log10(1.0 / np.sqrt(mse))
78 print("\nPSNR: {:.2f}dB for order {} and J={}".format(psnr, order, J))
79
80 # Plot
81 plt.figure()
82 plt.imshow(np.moveaxis(best_img, 0, -1))
83 plt.title("PSNR: {:.2f}dB (order {}, J={})".format(psnr, order, J))
84
85 plt.show()
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/2d/plot_invert_scattering_torch.py b/examples/2d/plot_invert_scattering_torch.py
--- a/examples/2d/plot_invert_scattering_torch.py
+++ b/examples/2d/plot_invert_scattering_torch.py
@@ -32,7 +32,7 @@
plt.title("Original image")
src_img = np.moveaxis(src_img, -1, 0) # HWC to CHW
-max_iter = 5 # number of steps for the GD
+max_iter = 15 # number of steps for the GD
print("Image shape: ", src_img.shape)
channels, height, width = src_img.shape
diff --git a/examples/2d/plot_scattering_disk.py b/examples/2d/plot_scattering_disk.py
--- a/examples/2d/plot_scattering_disk.py
+++ b/examples/2d/plot_scattering_disk.py
@@ -74,7 +74,7 @@
fig,ax = plt.subplots()
-plt.imshow(1-src_img,cmap='gray',interpolation='nearest', aspect='auto')
+plt.imshow(src_img,cmap='gray',interpolation='nearest', aspect='auto')
ax.axis('off')
offset = 0.1
for row in range(window_rows):
|
{"golden_diff": "diff --git a/examples/2d/plot_invert_scattering_torch.py b/examples/2d/plot_invert_scattering_torch.py\n--- a/examples/2d/plot_invert_scattering_torch.py\n+++ b/examples/2d/plot_invert_scattering_torch.py\n@@ -32,7 +32,7 @@\n plt.title(\"Original image\")\n \n src_img = np.moveaxis(src_img, -1, 0) # HWC to CHW\n-max_iter = 5 # number of steps for the GD\n+max_iter = 15 # number of steps for the GD\n print(\"Image shape: \", src_img.shape)\n channels, height, width = src_img.shape\n \ndiff --git a/examples/2d/plot_scattering_disk.py b/examples/2d/plot_scattering_disk.py\n--- a/examples/2d/plot_scattering_disk.py\n+++ b/examples/2d/plot_scattering_disk.py\n@@ -74,7 +74,7 @@\n \n fig,ax = plt.subplots()\n \n-plt.imshow(1-src_img,cmap='gray',interpolation='nearest', aspect='auto')\n+plt.imshow(src_img,cmap='gray',interpolation='nearest', aspect='auto')\n ax.axis('off')\n offset = 0.1\n for row in range(window_rows):\n", "issue": "sphinx inverses colors...\nno clue why, but it can wait v3.\r\n\r\ncf here:\r\nhttps://www.kymat.io/gallery_2d/plot_scattering_disk.html#sphx-glr-gallery-2d-plot-scattering-disk-py\r\nhttps://www.kymat.io/gallery_2d/plot_invert_scattering_torch.html#sphx-glr-gallery-2d-plot-invert-scattering-torch-py\n", "before_files": [{"content": "\"\"\"\nScattering disk display\n=======================\nThis script reproduces concentric circles that encode Scattering coefficient's\nenergy as described in \"Invariant Scattering Convolution Networks\" by Bruna and Mallat.\nHere, for the sake of simplicity, we only consider first order scattering.\n\nAuthor: https://github.com/Jonas1312\nEdited by: Edouard Oyallon\n\"\"\"\n\n\n\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom kymatio import Scattering2D\nfrom PIL import Image\nimport os\n\n\nimg_name = os.path.join(os.getcwd(),\"images/digit.png\")\n\n####################################################################\n# Scattering computations\n#-------------------------------------------------------------------\n# First, we read the input digit:\nsrc_img = Image.open(img_name).convert('L').resize((32,32))\nsrc_img = np.array(src_img)\nprint(\"img shape: \", src_img.shape)\n\n####################################################################\n# We compute a Scattering Transform with L=6 angles and J=3 scales.\n# Rotating a wavelet $\\psi$ by $\\pi$ is equivalent to consider its\n# conjugate in fourier: $\\hat\\psi_{\\pi}(\\omega)=\\hat\\psi(r_{-\\pi}\\omega)^*$.\n#\n# Combining this and the fact that a real signal has a Hermitian symmetry\n# implies that it is usually sufficient to use the angles $\\{\\frac{\\pi l}{L}\\}_{l\\leq L}$ at computation time.\n# For consistency, we will however display $\\{\\frac{2\\pi l}{L}\\}_{l\\leq 2L}$,\n# which implies that our visualization will be redundant and have a symmetry by rotation of $\\pi$.\n\nL = 6\nJ = 3\nscattering = Scattering2D(J=J, shape=src_img.shape, L=L, max_order=1, frontend='numpy')\n\n####################################################################\n# We now compute the scattering coefficients:\nsrc_img_tensor = src_img.astype(np.float32) / 255.\n\nscattering_coefficients = scattering(src_img_tensor)\nprint(\"coeffs shape: \", scattering_coefficients.shape)\n# Invert colors\nscattering_coefficients = -scattering_coefficients\n\n####################################################################\n# We skip the low pass filter...\nscattering_coefficients = scattering_coefficients[1:, :, :]\nnorm = mpl.colors.Normalize(scattering_coefficients.min(), scattering_coefficients.max(), clip=True)\nmapper = cm.ScalarMappable(norm=norm, cmap=\"gray\")\nnb_coeffs, window_rows, window_columns = scattering_coefficients.shape\n\n####################################################################\n# Figure reproduction\n#-------------------------------------------------------------------\n\n####################################################################\n# Now we can reproduce a figure that displays the energy of the first\n# order Scattering coefficient, which are given by $\\{\\mid x\\star\\psi_{j,\\theta}\\mid\\star\\phi_J|\\}_{j,\\theta}$ .\n# Here, each scattering coefficient is represented on the polar plane. The polar radius and angle correspond\n# respectively to the scale $j$ and the rotation $\\theta$ applied to the mother wavelet.\n#\n# Observe that as predicted, the visualization exhibit a redundancy and a symmetry.\n\nfig,ax = plt.subplots()\n\nplt.imshow(1-src_img,cmap='gray',interpolation='nearest', aspect='auto')\nax.axis('off')\noffset = 0.1\nfor row in range(window_rows):\n for column in range(window_columns):\n ax=fig.add_subplot(window_rows, window_columns, 1 + column + row * window_rows, projection='polar')\n ax.set_ylim(0, 1)\n ax.axis('off')\n ax.set_yticklabels([]) # turn off radial tick labels (yticks)\n ax.set_xticklabels([]) # turn off degrees\n # ax.set_theta_zero_location('N') # 0\u00b0 to North\n coefficients = scattering_coefficients[:, row, column]\n for j in range(J):\n for l in range(L):\n coeff = coefficients[l + (J - 1 - j) * L]\n color = mpl.colors.to_hex(mapper.to_rgba(coeff))\n ax.bar(x=(4.5+l) * np.pi / L,\n height=2*(2**(j-1) / 2**J),\n width=2 * np.pi / L,\n bottom=offset + (2**j / 2**J) ,\n color=color)\n ax.bar(x=(4.5+l+L) * np.pi / L,\n height=2*(2**(j-1) / 2**J),\n width=2 * np.pi / L,\n bottom=offset + (2**j / 2**J) ,\n color=color)\n", "path": "examples/2d/plot_scattering_disk.py"}, {"content": "\"\"\"\nInverting scattering via mse\n============================\nThis script aims to quantify the information loss for natural images by\nperforming a reconstruction of an image from its scattering coefficients via a\nL2-norm minimization.\n\"\"\"\n\n###############################################################################\n# Imports\n# -------\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torch import optim\nfrom scipy.misc import face\n\nfrom kymatio.torch import Scattering2D\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n###############################################################################\n# Load test image\n# ---------------\nsrc_img = Image.fromarray(face())\nsrc_img = src_img.resize((512, 384), Image.ANTIALIAS)\nsrc_img = np.array(src_img).astype(np.float32)\nsrc_img = src_img / 255.0\nplt.imshow(src_img)\nplt.title(\"Original image\")\n\nsrc_img = np.moveaxis(src_img, -1, 0) # HWC to CHW\nmax_iter = 5 # number of steps for the GD\nprint(\"Image shape: \", src_img.shape)\nchannels, height, width = src_img.shape\n\n###############################################################################\n# Main loop\n# ----------\nfor order in [1]:\n for J in [2, 4]:\n\n # Compute scattering coefficients\n scattering = Scattering2D(J=J, shape=(height, width), max_order=order)\n if device == \"cuda\":\n scattering = scattering.cuda()\n max_iter = 500\n src_img_tensor = torch.from_numpy(src_img).to(device).contiguous()\n scattering_coefficients = scattering(src_img_tensor)\n\n # Create trainable input image\n input_tensor = torch.rand(src_img.shape, requires_grad=True, device=device)\n\n # Optimizer hyperparams\n optimizer = optim.Adam([input_tensor], lr=1)\n\n # Training\n best_img = None\n best_loss = float(\"inf\")\n for epoch in range(1, max_iter):\n new_coefficients = scattering(input_tensor)\n loss = F.mse_loss(input=new_coefficients, target=scattering_coefficients)\n print(\"Epoch {}, loss: {}\".format(epoch, loss.item()), end=\"\\r\")\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if loss < best_loss:\n best_loss = loss.detach().cpu().item()\n best_img = input_tensor.detach().cpu().numpy()\n\n best_img = np.clip(best_img, 0.0, 1.0)\n\n # PSNR\n mse = np.mean((src_img - best_img) ** 2)\n psnr = 20 * np.log10(1.0 / np.sqrt(mse))\n print(\"\\nPSNR: {:.2f}dB for order {} and J={}\".format(psnr, order, J))\n\n # Plot\n plt.figure()\n plt.imshow(np.moveaxis(best_img, 0, -1))\n plt.title(\"PSNR: {:.2f}dB (order {}, J={})\".format(psnr, order, J))\n\nplt.show()\n", "path": "examples/2d/plot_invert_scattering_torch.py"}], "after_files": [{"content": "\"\"\"\nScattering disk display\n=======================\nThis script reproduces concentric circles that encode Scattering coefficient's\nenergy as described in \"Invariant Scattering Convolution Networks\" by Bruna and Mallat.\nHere, for the sake of simplicity, we only consider first order scattering.\n\nAuthor: https://github.com/Jonas1312\nEdited by: Edouard Oyallon\n\"\"\"\n\n\n\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom kymatio import Scattering2D\nfrom PIL import Image\nimport os\n\n\nimg_name = os.path.join(os.getcwd(),\"images/digit.png\")\n\n####################################################################\n# Scattering computations\n#-------------------------------------------------------------------\n# First, we read the input digit:\nsrc_img = Image.open(img_name).convert('L').resize((32,32))\nsrc_img = np.array(src_img)\nprint(\"img shape: \", src_img.shape)\n\n####################################################################\n# We compute a Scattering Transform with L=6 angles and J=3 scales.\n# Rotating a wavelet $\\psi$ by $\\pi$ is equivalent to consider its\n# conjugate in fourier: $\\hat\\psi_{\\pi}(\\omega)=\\hat\\psi(r_{-\\pi}\\omega)^*$.\n#\n# Combining this and the fact that a real signal has a Hermitian symmetry\n# implies that it is usually sufficient to use the angles $\\{\\frac{\\pi l}{L}\\}_{l\\leq L}$ at computation time.\n# For consistency, we will however display $\\{\\frac{2\\pi l}{L}\\}_{l\\leq 2L}$,\n# which implies that our visualization will be redundant and have a symmetry by rotation of $\\pi$.\n\nL = 6\nJ = 3\nscattering = Scattering2D(J=J, shape=src_img.shape, L=L, max_order=1, frontend='numpy')\n\n####################################################################\n# We now compute the scattering coefficients:\nsrc_img_tensor = src_img.astype(np.float32) / 255.\n\nscattering_coefficients = scattering(src_img_tensor)\nprint(\"coeffs shape: \", scattering_coefficients.shape)\n# Invert colors\nscattering_coefficients = -scattering_coefficients\n\n####################################################################\n# We skip the low pass filter...\nscattering_coefficients = scattering_coefficients[1:, :, :]\nnorm = mpl.colors.Normalize(scattering_coefficients.min(), scattering_coefficients.max(), clip=True)\nmapper = cm.ScalarMappable(norm=norm, cmap=\"gray\")\nnb_coeffs, window_rows, window_columns = scattering_coefficients.shape\n\n####################################################################\n# Figure reproduction\n#-------------------------------------------------------------------\n\n####################################################################\n# Now we can reproduce a figure that displays the energy of the first\n# order Scattering coefficient, which are given by $\\{\\mid x\\star\\psi_{j,\\theta}\\mid\\star\\phi_J|\\}_{j,\\theta}$ .\n# Here, each scattering coefficient is represented on the polar plane. The polar radius and angle correspond\n# respectively to the scale $j$ and the rotation $\\theta$ applied to the mother wavelet.\n#\n# Observe that as predicted, the visualization exhibit a redundancy and a symmetry.\n\nfig,ax = plt.subplots()\n\nplt.imshow(src_img,cmap='gray',interpolation='nearest', aspect='auto')\nax.axis('off')\noffset = 0.1\nfor row in range(window_rows):\n for column in range(window_columns):\n ax=fig.add_subplot(window_rows, window_columns, 1 + column + row * window_rows, projection='polar')\n ax.set_ylim(0, 1)\n ax.axis('off')\n ax.set_yticklabels([]) # turn off radial tick labels (yticks)\n ax.set_xticklabels([]) # turn off degrees\n # ax.set_theta_zero_location('N') # 0\u00b0 to North\n coefficients = scattering_coefficients[:, row, column]\n for j in range(J):\n for l in range(L):\n coeff = coefficients[l + (J - 1 - j) * L]\n color = mpl.colors.to_hex(mapper.to_rgba(coeff))\n ax.bar(x=(4.5+l) * np.pi / L,\n height=2*(2**(j-1) / 2**J),\n width=2 * np.pi / L,\n bottom=offset + (2**j / 2**J) ,\n color=color)\n ax.bar(x=(4.5+l+L) * np.pi / L,\n height=2*(2**(j-1) / 2**J),\n width=2 * np.pi / L,\n bottom=offset + (2**j / 2**J) ,\n color=color)\n", "path": "examples/2d/plot_scattering_disk.py"}, {"content": "\"\"\"\nInverting scattering via mse\n============================\nThis script aims to quantify the information loss for natural images by\nperforming a reconstruction of an image from its scattering coefficients via a\nL2-norm minimization.\n\"\"\"\n\n###############################################################################\n# Imports\n# -------\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torch import optim\nfrom scipy.misc import face\n\nfrom kymatio.torch import Scattering2D\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n###############################################################################\n# Load test image\n# ---------------\nsrc_img = Image.fromarray(face())\nsrc_img = src_img.resize((512, 384), Image.ANTIALIAS)\nsrc_img = np.array(src_img).astype(np.float32)\nsrc_img = src_img / 255.0\nplt.imshow(src_img)\nplt.title(\"Original image\")\n\nsrc_img = np.moveaxis(src_img, -1, 0) # HWC to CHW\nmax_iter = 15 # number of steps for the GD\nprint(\"Image shape: \", src_img.shape)\nchannels, height, width = src_img.shape\n\n###############################################################################\n# Main loop\n# ----------\nfor order in [1]:\n for J in [2, 4]:\n\n # Compute scattering coefficients\n scattering = Scattering2D(J=J, shape=(height, width), max_order=order)\n if device == \"cuda\":\n scattering = scattering.cuda()\n max_iter = 500\n src_img_tensor = torch.from_numpy(src_img).to(device).contiguous()\n scattering_coefficients = scattering(src_img_tensor)\n\n # Create trainable input image\n input_tensor = torch.rand(src_img.shape, requires_grad=True, device=device)\n\n # Optimizer hyperparams\n optimizer = optim.Adam([input_tensor], lr=1)\n\n # Training\n best_img = None\n best_loss = float(\"inf\")\n for epoch in range(1, max_iter):\n new_coefficients = scattering(input_tensor)\n loss = F.mse_loss(input=new_coefficients, target=scattering_coefficients)\n print(\"Epoch {}, loss: {}\".format(epoch, loss.item()), end=\"\\r\")\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if loss < best_loss:\n best_loss = loss.detach().cpu().item()\n best_img = input_tensor.detach().cpu().numpy()\n\n best_img = np.clip(best_img, 0.0, 1.0)\n\n # PSNR\n mse = np.mean((src_img - best_img) ** 2)\n psnr = 20 * np.log10(1.0 / np.sqrt(mse))\n print(\"\\nPSNR: {:.2f}dB for order {} and J={}\".format(psnr, order, J))\n\n # Plot\n plt.figure()\n plt.imshow(np.moveaxis(best_img, 0, -1))\n plt.title(\"PSNR: {:.2f}dB (order {}, J={})\".format(psnr, order, J))\n\nplt.show()\n", "path": "examples/2d/plot_invert_scattering_torch.py"}]}
| 2,418 | 286 |
gh_patches_debug_12440
|
rasdani/github-patches
|
git_diff
|
conda__conda-8272
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
condarc file precedence not respected when merging
In my system `.condarc` I have `defaults` and `conda-forge` and in my user `.condarc` I have my own `dhirschfeld` channel:

My understanding ([from this blog post](https://www.anaconda.com/conda-configuration-engine-power-users/)) was that the system `.condarc` should take precedence when merging and that my channel should be *appended* to the channel list. This is the behaviour I'd like, but not what I'm observing in practice:
```
(base) C:\> conda info
active environment : base
active env location : C:\Miniconda3
shell level : 1
user config file : C:\Users\dhirschfeld\.condarc
populated config files : C:\Miniconda3\.condarc
C:\Users\dhirschfeld\.condarc
conda version : 4.6.3
conda-build version : not installed
python version : 3.7.1.final.0
base environment : C:\Miniconda3 (writable)
channel URLs : https://conda.anaconda.org/dhirschfeld/win-64
https://conda.anaconda.org/dhirschfeld/noarch
https://repo.anaconda.com/pkgs/main/win-64
https://repo.anaconda.com/pkgs/main/noarch
https://repo.anaconda.com/pkgs/free/win-64
https://repo.anaconda.com/pkgs/free/noarch
https://repo.anaconda.com/pkgs/r/win-64
https://repo.anaconda.com/pkgs/r/noarch
https://repo.anaconda.com/pkgs/msys2/win-64
https://repo.anaconda.com/pkgs/msys2/noarch
https://conda.anaconda.org/conda-forge/win-64
https://conda.anaconda.org/conda-forge/noarch
package cache : C:\Miniconda3\pkgs
C:\Users\dhirschfeld\.conda\pkgs
C:\Users\dhirschfeld\AppData\Local\conda\conda\pkgs
envs directories : C:\Miniconda3\envs
C:\Users\dhirschfeld\.conda\envs
C:\Users\dhirschfeld\AppData\Local\conda\conda\envs
platform : win-64
user-agent : conda/4.6.3 requests/2.21.0 CPython/3.7.1 Windows/10 Windows/10.0.17763
administrator : False
netrc file : None
offline mode : False
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/base/constants.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (C) 2012 Anaconda, Inc
3 # SPDX-License-Identifier: BSD-3-Clause
4 """
5 This file should hold most string literals and magic numbers used throughout the code base.
6 The exception is if a literal is specifically meant to be private to and isolated within a module.
7 Think of this as a "more static" source of configuration information.
8
9 Another important source of "static" configuration is conda/models/enums.py.
10 """
11 from __future__ import absolute_import, division, print_function, unicode_literals
12
13 from enum import Enum, EnumMeta
14 from os.path import join
15
16 from ..common.compat import itervalues, on_win, six_with_metaclass, string_types
17
18 PREFIX_PLACEHOLDER = ('/opt/anaconda1anaconda2'
19 # this is intentionally split into parts, such that running
20 # this program on itself will leave it unchanged
21 'anaconda3')
22
23 machine_bits = 8 * tuple.__itemsize__
24
25 APP_NAME = 'conda'
26
27 SEARCH_PATH = (
28 '/etc/conda/.condarc',
29 '/etc/conda/condarc',
30 '/etc/conda/condarc.d/',
31 '/var/lib/conda/.condarc',
32 '/var/lib/conda/condarc',
33 '/var/lib/conda/condarc.d/',
34 '$CONDA_ROOT/.condarc',
35 '$CONDA_ROOT/condarc',
36 '$CONDA_ROOT/condarc.d/',
37 '~/.conda/.condarc',
38 '~/.conda/condarc',
39 '~/.conda/condarc.d/',
40 '~/.condarc',
41 '$CONDA_PREFIX/.condarc',
42 '$CONDA_PREFIX/condarc',
43 '$CONDA_PREFIX/condarc.d/',
44 '$CONDARC',
45 )
46
47 DEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org'
48 CONDA_HOMEPAGE_URL = 'https://conda.io'
49 ERROR_UPLOAD_URL = 'https://conda.io/conda-post/unexpected-error'
50 DEFAULTS_CHANNEL_NAME = 'defaults'
51
52 PLATFORM_DIRECTORIES = (
53 "noarch",
54 "linux-32",
55 "linux-64",
56 "linux-aarch64",
57 "linux-armv6l",
58 "linux-armv7l",
59 "linux-ppc64le",
60 "osx-64",
61 "win-32",
62 "win-64",
63 "zos-z",
64 )
65
66 RECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')
67
68
69 DEFAULT_CHANNELS_UNIX = (
70 'https://repo.anaconda.com/pkgs/main',
71 'https://repo.anaconda.com/pkgs/free',
72 'https://repo.anaconda.com/pkgs/r',
73 )
74
75 DEFAULT_CHANNELS_WIN = (
76 'https://repo.anaconda.com/pkgs/main',
77 'https://repo.anaconda.com/pkgs/free',
78 'https://repo.anaconda.com/pkgs/r',
79 'https://repo.anaconda.com/pkgs/msys2',
80 )
81
82 DEFAULT_CUSTOM_CHANNELS = {
83 'pkgs/pro': 'https://repo.anaconda.com',
84 }
85
86 DEFAULT_CHANNELS = DEFAULT_CHANNELS_WIN if on_win else DEFAULT_CHANNELS_UNIX
87
88 ROOT_ENV_NAME = 'base'
89
90 ROOT_NO_RM = (
91 'python',
92 'pycosat',
93 'ruamel_yaml',
94 'conda',
95 'openssl',
96 'requests',
97 )
98
99 DEFAULT_AGGRESSIVE_UPDATE_PACKAGES = (
100 'ca-certificates',
101 'certifi',
102 'openssl',
103 )
104
105 if on_win:
106 COMPATIBLE_SHELLS = (
107 'bash',
108 'cmd.exe',
109 'fish',
110 'tcsh',
111 'xonsh',
112 'zsh',
113 'powershell',
114 )
115 else:
116 COMPATIBLE_SHELLS = (
117 'bash',
118 'fish',
119 'tcsh',
120 'xonsh',
121 'zsh',
122 'powershell',
123 )
124
125
126 # Maximum priority, reserved for packages we really want to remove
127 MAX_CHANNEL_PRIORITY = 10000
128
129 CONDA_TARBALL_EXTENSION = '.tar.bz2'
130
131 UNKNOWN_CHANNEL = "<unknown>"
132
133
134 class SafetyChecks(Enum):
135 disabled = 'disabled'
136 warn = 'warn'
137 enabled = 'enabled'
138
139 def __str__(self):
140 return self.value
141
142
143 class PathConflict(Enum):
144 clobber = 'clobber'
145 warn = 'warn'
146 prevent = 'prevent'
147
148 def __str__(self):
149 return self.value
150
151
152 class DepsModifier(Enum):
153 """Flags to enable alternate handling of dependencies."""
154 NOT_SET = 'not_set' # default
155 NO_DEPS = 'no_deps'
156 ONLY_DEPS = 'only_deps'
157
158 def __str__(self):
159 return self.value
160
161
162 class UpdateModifier(Enum):
163 SPECS_SATISFIED_SKIP_SOLVE = 'specs_satisfied_skip_solve'
164 FREEZE_INSTALLED = 'freeze_installed' # freeze is a better name for --no-update-deps
165 UPDATE_DEPS = 'update_deps'
166 UPDATE_SPECS = 'update_specs' # default
167 UPDATE_ALL = 'update_all'
168 # TODO: add REINSTALL_ALL, see https://github.com/conda/conda/issues/6247 and https://github.com/conda/conda/issues/3149 # NOQA
169
170 def __str__(self):
171 return self.value
172
173
174 class ChannelPriorityMeta(EnumMeta):
175
176 def __call__(cls, value, *args, **kwargs):
177 try:
178 return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)
179 except ValueError:
180 if isinstance(value, string_types):
181 from .._vendor.auxlib.type_coercion import typify
182 value = typify(value)
183 if value is True:
184 value = 'flexible'
185 elif value is False:
186 value = cls.DISABLED
187 return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)
188
189
190 class ChannelPriority(six_with_metaclass(ChannelPriorityMeta, Enum)):
191 __name__ = "ChannelPriority"
192
193 STRICT = 'strict'
194 # STRICT_OR_FLEXIBLE = 'strict_or_flexible' # TODO: consider implementing if needed
195 FLEXIBLE = 'flexible'
196 DISABLED = 'disabled'
197
198 def __str__(self):
199 return self.value
200
201
202 class SatSolverChoice(Enum):
203 PYCOSAT = 'pycosat'
204 PYCRYPTOSAT = 'pycryptosat'
205 PYSAT = 'pysat'
206
207 def __str__(self):
208 return self.value
209
210
211 # Magic files for permissions determination
212 PACKAGE_CACHE_MAGIC_FILE = 'urls.txt'
213 PREFIX_MAGIC_FILE = join('conda-meta', 'history')
214
215
216 # TODO: should be frozendict(), but I don't want to import frozendict from auxlib here.
217 NAMESPACES_MAP = { # base package name, namespace
218 "python": "python",
219 "r": "r",
220 "r-base": "r",
221 "mro-base": "r",
222 "erlang": "erlang",
223 "java": "java",
224 "openjdk": "java",
225 "julia": "julia",
226 "latex": "latex",
227 "lua": "lua",
228 "nodejs": "js",
229 "perl": "perl",
230 "php": "php",
231 "ruby": "ruby",
232 "m2-base": "m2",
233 "msys2-conda-epoch": "m2w64",
234 }
235
236 NAMESPACE_PACKAGE_NAMES = frozenset(NAMESPACES_MAP)
237 NAMESPACES = frozenset(itervalues(NAMESPACES_MAP))
238
239 # Namespace arbiters of uniqueness
240 # global: some repository established by Anaconda, Inc. and conda-forge
241 # python: https://pypi.org/simple
242 # r: https://cran.r-project.org/web/packages/available_packages_by_name.html
243 # erlang: https://hex.pm/packages
244 # java: https://repo1.maven.org/maven2/
245 # julia: https://pkg.julialang.org/
246 # latex: https://ctan.org/pkg
247 # lua: https://luarocks.org/m/root
248 # js: https://docs.npmjs.com/misc/registry
249 # pascal: ???
250 # perl: https://www.cpan.org/modules/01modules.index.html
251 # php: https://packagist.org/
252 # ruby: https://rubygems.org/gems
253 # clojure: https://clojars.org/
254
255
256 # Not all python namespace packages are registered on PyPI. If a package
257 # contains files in site-packages, it probably belongs in the python namespace.
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/base/constants.py b/conda/base/constants.py
--- a/conda/base/constants.py
+++ b/conda/base/constants.py
@@ -24,13 +24,23 @@
APP_NAME = 'conda'
-SEARCH_PATH = (
- '/etc/conda/.condarc',
- '/etc/conda/condarc',
- '/etc/conda/condarc.d/',
- '/var/lib/conda/.condarc',
- '/var/lib/conda/condarc',
- '/var/lib/conda/condarc.d/',
+if on_win:
+ SEARCH_PATH = (
+ 'C:/ProgramData/conda/.condarc',
+ 'C:/ProgramData/conda/condarc',
+ 'C:/ProgramData/conda/condarc.d',
+ )
+else:
+ SEARCH_PATH = (
+ '/etc/conda/.condarc',
+ '/etc/conda/condarc',
+ '/etc/conda/condarc.d/',
+ '/var/lib/conda/.condarc',
+ '/var/lib/conda/condarc',
+ '/var/lib/conda/condarc.d/',
+ )
+
+SEARCH_PATH += (
'$CONDA_ROOT/.condarc',
'$CONDA_ROOT/condarc',
'$CONDA_ROOT/condarc.d/',
|
{"golden_diff": "diff --git a/conda/base/constants.py b/conda/base/constants.py\n--- a/conda/base/constants.py\n+++ b/conda/base/constants.py\n@@ -24,13 +24,23 @@\n \n APP_NAME = 'conda'\n \n-SEARCH_PATH = (\n- '/etc/conda/.condarc',\n- '/etc/conda/condarc',\n- '/etc/conda/condarc.d/',\n- '/var/lib/conda/.condarc',\n- '/var/lib/conda/condarc',\n- '/var/lib/conda/condarc.d/',\n+if on_win:\n+ SEARCH_PATH = (\n+ 'C:/ProgramData/conda/.condarc',\n+ 'C:/ProgramData/conda/condarc',\n+ 'C:/ProgramData/conda/condarc.d',\n+ )\n+else:\n+ SEARCH_PATH = (\n+ '/etc/conda/.condarc',\n+ '/etc/conda/condarc',\n+ '/etc/conda/condarc.d/',\n+ '/var/lib/conda/.condarc',\n+ '/var/lib/conda/condarc',\n+ '/var/lib/conda/condarc.d/',\n+ )\n+\n+SEARCH_PATH += (\n '$CONDA_ROOT/.condarc',\n '$CONDA_ROOT/condarc',\n '$CONDA_ROOT/condarc.d/',\n", "issue": "condarc file precedence not respected when merging\nIn my system `.condarc` I have `defaults` and `conda-forge` and in my user `.condarc` I have my own `dhirschfeld` channel:\r\n\r\n\r\n\r\nMy understanding ([from this blog post](https://www.anaconda.com/conda-configuration-engine-power-users/)) was that the system `.condarc` should take precedence when merging and that my channel should be *appended* to the channel list. This is the behaviour I'd like, but not what I'm observing in practice:\r\n```\r\n(base) C:\\> conda info\r\n\r\n active environment : base\r\n active env location : C:\\Miniconda3\r\n shell level : 1\r\n user config file : C:\\Users\\dhirschfeld\\.condarc\r\n populated config files : C:\\Miniconda3\\.condarc\r\n C:\\Users\\dhirschfeld\\.condarc\r\n conda version : 4.6.3\r\n conda-build version : not installed\r\n python version : 3.7.1.final.0\r\n base environment : C:\\Miniconda3 (writable)\r\n channel URLs : https://conda.anaconda.org/dhirschfeld/win-64\r\n https://conda.anaconda.org/dhirschfeld/noarch\r\n https://repo.anaconda.com/pkgs/main/win-64\r\n https://repo.anaconda.com/pkgs/main/noarch\r\n https://repo.anaconda.com/pkgs/free/win-64\r\n https://repo.anaconda.com/pkgs/free/noarch\r\n https://repo.anaconda.com/pkgs/r/win-64\r\n https://repo.anaconda.com/pkgs/r/noarch\r\n https://repo.anaconda.com/pkgs/msys2/win-64\r\n https://repo.anaconda.com/pkgs/msys2/noarch\r\n https://conda.anaconda.org/conda-forge/win-64\r\n https://conda.anaconda.org/conda-forge/noarch\r\n package cache : C:\\Miniconda3\\pkgs\r\n C:\\Users\\dhirschfeld\\.conda\\pkgs\r\n C:\\Users\\dhirschfeld\\AppData\\Local\\conda\\conda\\pkgs\r\n envs directories : C:\\Miniconda3\\envs\r\n C:\\Users\\dhirschfeld\\.conda\\envs\r\n C:\\Users\\dhirschfeld\\AppData\\Local\\conda\\conda\\envs\r\n platform : win-64\r\n user-agent : conda/4.6.3 requests/2.21.0 CPython/3.7.1 Windows/10 Windows/10.0.17763\r\n administrator : False\r\n netrc file : None\r\n offline mode : False\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"\nThis file should hold most string literals and magic numbers used throughout the code base.\nThe exception is if a literal is specifically meant to be private to and isolated within a module.\nThink of this as a \"more static\" source of configuration information.\n\nAnother important source of \"static\" configuration is conda/models/enums.py.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom enum import Enum, EnumMeta\nfrom os.path import join\n\nfrom ..common.compat import itervalues, on_win, six_with_metaclass, string_types\n\nPREFIX_PLACEHOLDER = ('/opt/anaconda1anaconda2'\n # this is intentionally split into parts, such that running\n # this program on itself will leave it unchanged\n 'anaconda3')\n\nmachine_bits = 8 * tuple.__itemsize__\n\nAPP_NAME = 'conda'\n\nSEARCH_PATH = (\n '/etc/conda/.condarc',\n '/etc/conda/condarc',\n '/etc/conda/condarc.d/',\n '/var/lib/conda/.condarc',\n '/var/lib/conda/condarc',\n '/var/lib/conda/condarc.d/',\n '$CONDA_ROOT/.condarc',\n '$CONDA_ROOT/condarc',\n '$CONDA_ROOT/condarc.d/',\n '~/.conda/.condarc',\n '~/.conda/condarc',\n '~/.conda/condarc.d/',\n '~/.condarc',\n '$CONDA_PREFIX/.condarc',\n '$CONDA_PREFIX/condarc',\n '$CONDA_PREFIX/condarc.d/',\n '$CONDARC',\n)\n\nDEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org'\nCONDA_HOMEPAGE_URL = 'https://conda.io'\nERROR_UPLOAD_URL = 'https://conda.io/conda-post/unexpected-error'\nDEFAULTS_CHANNEL_NAME = 'defaults'\n\nPLATFORM_DIRECTORIES = (\n \"noarch\",\n \"linux-32\",\n \"linux-64\",\n \"linux-aarch64\",\n \"linux-armv6l\",\n \"linux-armv7l\",\n \"linux-ppc64le\",\n \"osx-64\",\n \"win-32\",\n \"win-64\",\n \"zos-z\",\n)\n\nRECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')\n\n\nDEFAULT_CHANNELS_UNIX = (\n 'https://repo.anaconda.com/pkgs/main',\n 'https://repo.anaconda.com/pkgs/free',\n 'https://repo.anaconda.com/pkgs/r',\n)\n\nDEFAULT_CHANNELS_WIN = (\n 'https://repo.anaconda.com/pkgs/main',\n 'https://repo.anaconda.com/pkgs/free',\n 'https://repo.anaconda.com/pkgs/r',\n 'https://repo.anaconda.com/pkgs/msys2',\n)\n\nDEFAULT_CUSTOM_CHANNELS = {\n 'pkgs/pro': 'https://repo.anaconda.com',\n}\n\nDEFAULT_CHANNELS = DEFAULT_CHANNELS_WIN if on_win else DEFAULT_CHANNELS_UNIX\n\nROOT_ENV_NAME = 'base'\n\nROOT_NO_RM = (\n 'python',\n 'pycosat',\n 'ruamel_yaml',\n 'conda',\n 'openssl',\n 'requests',\n)\n\nDEFAULT_AGGRESSIVE_UPDATE_PACKAGES = (\n 'ca-certificates',\n 'certifi',\n 'openssl',\n)\n\nif on_win:\n COMPATIBLE_SHELLS = (\n 'bash',\n 'cmd.exe',\n 'fish',\n 'tcsh',\n 'xonsh',\n 'zsh',\n 'powershell',\n )\nelse:\n COMPATIBLE_SHELLS = (\n 'bash',\n 'fish',\n 'tcsh',\n 'xonsh',\n 'zsh',\n 'powershell',\n )\n\n\n# Maximum priority, reserved for packages we really want to remove\nMAX_CHANNEL_PRIORITY = 10000\n\nCONDA_TARBALL_EXTENSION = '.tar.bz2'\n\nUNKNOWN_CHANNEL = \"<unknown>\"\n\n\nclass SafetyChecks(Enum):\n disabled = 'disabled'\n warn = 'warn'\n enabled = 'enabled'\n\n def __str__(self):\n return self.value\n\n\nclass PathConflict(Enum):\n clobber = 'clobber'\n warn = 'warn'\n prevent = 'prevent'\n\n def __str__(self):\n return self.value\n\n\nclass DepsModifier(Enum):\n \"\"\"Flags to enable alternate handling of dependencies.\"\"\"\n NOT_SET = 'not_set' # default\n NO_DEPS = 'no_deps'\n ONLY_DEPS = 'only_deps'\n\n def __str__(self):\n return self.value\n\n\nclass UpdateModifier(Enum):\n SPECS_SATISFIED_SKIP_SOLVE = 'specs_satisfied_skip_solve'\n FREEZE_INSTALLED = 'freeze_installed' # freeze is a better name for --no-update-deps\n UPDATE_DEPS = 'update_deps'\n UPDATE_SPECS = 'update_specs' # default\n UPDATE_ALL = 'update_all'\n # TODO: add REINSTALL_ALL, see https://github.com/conda/conda/issues/6247 and https://github.com/conda/conda/issues/3149 # NOQA\n\n def __str__(self):\n return self.value\n\n\nclass ChannelPriorityMeta(EnumMeta):\n\n def __call__(cls, value, *args, **kwargs):\n try:\n return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)\n except ValueError:\n if isinstance(value, string_types):\n from .._vendor.auxlib.type_coercion import typify\n value = typify(value)\n if value is True:\n value = 'flexible'\n elif value is False:\n value = cls.DISABLED\n return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)\n\n\nclass ChannelPriority(six_with_metaclass(ChannelPriorityMeta, Enum)):\n __name__ = \"ChannelPriority\"\n\n STRICT = 'strict'\n # STRICT_OR_FLEXIBLE = 'strict_or_flexible' # TODO: consider implementing if needed\n FLEXIBLE = 'flexible'\n DISABLED = 'disabled'\n\n def __str__(self):\n return self.value\n\n\nclass SatSolverChoice(Enum):\n PYCOSAT = 'pycosat'\n PYCRYPTOSAT = 'pycryptosat'\n PYSAT = 'pysat'\n\n def __str__(self):\n return self.value\n\n\n# Magic files for permissions determination\nPACKAGE_CACHE_MAGIC_FILE = 'urls.txt'\nPREFIX_MAGIC_FILE = join('conda-meta', 'history')\n\n\n# TODO: should be frozendict(), but I don't want to import frozendict from auxlib here.\nNAMESPACES_MAP = { # base package name, namespace\n \"python\": \"python\",\n \"r\": \"r\",\n \"r-base\": \"r\",\n \"mro-base\": \"r\",\n \"erlang\": \"erlang\",\n \"java\": \"java\",\n \"openjdk\": \"java\",\n \"julia\": \"julia\",\n \"latex\": \"latex\",\n \"lua\": \"lua\",\n \"nodejs\": \"js\",\n \"perl\": \"perl\",\n \"php\": \"php\",\n \"ruby\": \"ruby\",\n \"m2-base\": \"m2\",\n \"msys2-conda-epoch\": \"m2w64\",\n}\n\nNAMESPACE_PACKAGE_NAMES = frozenset(NAMESPACES_MAP)\nNAMESPACES = frozenset(itervalues(NAMESPACES_MAP))\n\n# Namespace arbiters of uniqueness\n# global: some repository established by Anaconda, Inc. and conda-forge\n# python: https://pypi.org/simple\n# r: https://cran.r-project.org/web/packages/available_packages_by_name.html\n# erlang: https://hex.pm/packages\n# java: https://repo1.maven.org/maven2/\n# julia: https://pkg.julialang.org/\n# latex: https://ctan.org/pkg\n# lua: https://luarocks.org/m/root\n# js: https://docs.npmjs.com/misc/registry\n# pascal: ???\n# perl: https://www.cpan.org/modules/01modules.index.html\n# php: https://packagist.org/\n# ruby: https://rubygems.org/gems\n# clojure: https://clojars.org/\n\n\n# Not all python namespace packages are registered on PyPI. If a package\n# contains files in site-packages, it probably belongs in the python namespace.\n", "path": "conda/base/constants.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"\nThis file should hold most string literals and magic numbers used throughout the code base.\nThe exception is if a literal is specifically meant to be private to and isolated within a module.\nThink of this as a \"more static\" source of configuration information.\n\nAnother important source of \"static\" configuration is conda/models/enums.py.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom enum import Enum, EnumMeta\nfrom os.path import join\n\nfrom ..common.compat import itervalues, on_win, six_with_metaclass, string_types\n\nPREFIX_PLACEHOLDER = ('/opt/anaconda1anaconda2'\n # this is intentionally split into parts, such that running\n # this program on itself will leave it unchanged\n 'anaconda3')\n\nmachine_bits = 8 * tuple.__itemsize__\n\nAPP_NAME = 'conda'\n\nif on_win:\n SEARCH_PATH = (\n 'C:/ProgramData/conda/.condarc',\n 'C:/ProgramData/conda/condarc',\n 'C:/ProgramData/conda/condarc.d',\n )\nelse:\n SEARCH_PATH = (\n '/etc/conda/.condarc',\n '/etc/conda/condarc',\n '/etc/conda/condarc.d/',\n '/var/lib/conda/.condarc',\n '/var/lib/conda/condarc',\n '/var/lib/conda/condarc.d/',\n )\n\nSEARCH_PATH += (\n '$CONDA_ROOT/.condarc',\n '$CONDA_ROOT/condarc',\n '$CONDA_ROOT/condarc.d/',\n '~/.conda/.condarc',\n '~/.conda/condarc',\n '~/.conda/condarc.d/',\n '~/.condarc',\n '$CONDA_PREFIX/.condarc',\n '$CONDA_PREFIX/condarc',\n '$CONDA_PREFIX/condarc.d/',\n '$CONDARC',\n)\n\nDEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org'\nCONDA_HOMEPAGE_URL = 'https://conda.io'\nERROR_UPLOAD_URL = 'https://conda.io/conda-post/unexpected-error'\nDEFAULTS_CHANNEL_NAME = 'defaults'\n\nPLATFORM_DIRECTORIES = (\n \"noarch\",\n \"linux-32\",\n \"linux-64\",\n \"linux-aarch64\",\n \"linux-armv6l\",\n \"linux-armv7l\",\n \"linux-ppc64le\",\n \"osx-64\",\n \"win-32\",\n \"win-64\",\n \"zos-z\",\n)\n\nRECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')\n\n\nDEFAULT_CHANNELS_UNIX = (\n 'https://repo.anaconda.com/pkgs/main',\n 'https://repo.anaconda.com/pkgs/free',\n 'https://repo.anaconda.com/pkgs/r',\n)\n\nDEFAULT_CHANNELS_WIN = (\n 'https://repo.anaconda.com/pkgs/main',\n 'https://repo.anaconda.com/pkgs/free',\n 'https://repo.anaconda.com/pkgs/r',\n 'https://repo.anaconda.com/pkgs/msys2',\n)\n\nDEFAULT_CUSTOM_CHANNELS = {\n 'pkgs/pro': 'https://repo.anaconda.com',\n}\n\nDEFAULT_CHANNELS = DEFAULT_CHANNELS_WIN if on_win else DEFAULT_CHANNELS_UNIX\n\nROOT_ENV_NAME = 'base'\n\nROOT_NO_RM = (\n 'python',\n 'pycosat',\n 'ruamel_yaml',\n 'conda',\n 'openssl',\n 'requests',\n)\n\nDEFAULT_AGGRESSIVE_UPDATE_PACKAGES = (\n 'ca-certificates',\n 'certifi',\n 'openssl',\n)\n\nif on_win:\n COMPATIBLE_SHELLS = (\n 'bash',\n 'cmd.exe',\n 'fish',\n 'tcsh',\n 'xonsh',\n 'zsh',\n 'powershell',\n )\nelse:\n COMPATIBLE_SHELLS = (\n 'bash',\n 'fish',\n 'tcsh',\n 'xonsh',\n 'zsh',\n 'powershell',\n )\n\n\n# Maximum priority, reserved for packages we really want to remove\nMAX_CHANNEL_PRIORITY = 10000\n\nCONDA_TARBALL_EXTENSION = '.tar.bz2'\n\nUNKNOWN_CHANNEL = \"<unknown>\"\n\n\nclass SafetyChecks(Enum):\n disabled = 'disabled'\n warn = 'warn'\n enabled = 'enabled'\n\n def __str__(self):\n return self.value\n\n\nclass PathConflict(Enum):\n clobber = 'clobber'\n warn = 'warn'\n prevent = 'prevent'\n\n def __str__(self):\n return self.value\n\n\nclass DepsModifier(Enum):\n \"\"\"Flags to enable alternate handling of dependencies.\"\"\"\n NOT_SET = 'not_set' # default\n NO_DEPS = 'no_deps'\n ONLY_DEPS = 'only_deps'\n\n def __str__(self):\n return self.value\n\n\nclass UpdateModifier(Enum):\n SPECS_SATISFIED_SKIP_SOLVE = 'specs_satisfied_skip_solve'\n FREEZE_INSTALLED = 'freeze_installed' # freeze is a better name for --no-update-deps\n UPDATE_DEPS = 'update_deps'\n UPDATE_SPECS = 'update_specs' # default\n UPDATE_ALL = 'update_all'\n # TODO: add REINSTALL_ALL, see https://github.com/conda/conda/issues/6247 and https://github.com/conda/conda/issues/3149 # NOQA\n\n def __str__(self):\n return self.value\n\n\nclass ChannelPriorityMeta(EnumMeta):\n\n def __call__(cls, value, *args, **kwargs):\n try:\n return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)\n except ValueError:\n if isinstance(value, string_types):\n from .._vendor.auxlib.type_coercion import typify\n value = typify(value)\n if value is True:\n value = 'flexible'\n elif value is False:\n value = cls.DISABLED\n return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)\n\n\nclass ChannelPriority(six_with_metaclass(ChannelPriorityMeta, Enum)):\n __name__ = \"ChannelPriority\"\n\n STRICT = 'strict'\n # STRICT_OR_FLEXIBLE = 'strict_or_flexible' # TODO: consider implementing if needed\n FLEXIBLE = 'flexible'\n DISABLED = 'disabled'\n\n def __str__(self):\n return self.value\n\n\nclass SatSolverChoice(Enum):\n PYCOSAT = 'pycosat'\n PYCRYPTOSAT = 'pycryptosat'\n PYSAT = 'pysat'\n\n def __str__(self):\n return self.value\n\n\n# Magic files for permissions determination\nPACKAGE_CACHE_MAGIC_FILE = 'urls.txt'\nPREFIX_MAGIC_FILE = join('conda-meta', 'history')\n\n\n# TODO: should be frozendict(), but I don't want to import frozendict from auxlib here.\nNAMESPACES_MAP = { # base package name, namespace\n \"python\": \"python\",\n \"r\": \"r\",\n \"r-base\": \"r\",\n \"mro-base\": \"r\",\n \"erlang\": \"erlang\",\n \"java\": \"java\",\n \"openjdk\": \"java\",\n \"julia\": \"julia\",\n \"latex\": \"latex\",\n \"lua\": \"lua\",\n \"nodejs\": \"js\",\n \"perl\": \"perl\",\n \"php\": \"php\",\n \"ruby\": \"ruby\",\n \"m2-base\": \"m2\",\n \"msys2-conda-epoch\": \"m2w64\",\n}\n\nNAMESPACE_PACKAGE_NAMES = frozenset(NAMESPACES_MAP)\nNAMESPACES = frozenset(itervalues(NAMESPACES_MAP))\n\n# Namespace arbiters of uniqueness\n# global: some repository established by Anaconda, Inc. and conda-forge\n# python: https://pypi.org/simple\n# r: https://cran.r-project.org/web/packages/available_packages_by_name.html\n# erlang: https://hex.pm/packages\n# java: https://repo1.maven.org/maven2/\n# julia: https://pkg.julialang.org/\n# latex: https://ctan.org/pkg\n# lua: https://luarocks.org/m/root\n# js: https://docs.npmjs.com/misc/registry\n# pascal: ???\n# perl: https://www.cpan.org/modules/01modules.index.html\n# php: https://packagist.org/\n# ruby: https://rubygems.org/gems\n# clojure: https://clojars.org/\n\n\n# Not all python namespace packages are registered on PyPI. If a package\n# contains files in site-packages, it probably belongs in the python namespace.\n", "path": "conda/base/constants.py"}]}
| 3,501 | 289 |
gh_patches_debug_14939
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-2721
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Access logging raise TypeError after `logging.logProcesses=False`
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
If someone use `logging.logProcesses = False` to disable the calling of `os.getpid()` while logging, the [default formatter of Sanic](https://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65) will lead to the exception as shown below.
See [`logging.logProcesses`](https://docs.python.org/3/howto/logging.html#optimization)
```log
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 445, in _format
return self._fmt % values
~~~~~~~~~~^~~~~~~~
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 449, in format
return self._format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 659, in formatMessage
return self._style.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 690, in format
s = self.formatMessage(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 953, in format
return fmt.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1110, in emit
msg = self.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 978, in handle
self.emit(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1706, in callHandlers
hdlr.handle(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1644, in handle
self.callHandlers(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1634, in _log
self.handle(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1489, in info
self._log(INFO, msg, args, **kwargs)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\application\motd.py", line 113, in display
out(indent("\n".join(lines), " "))
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\application\motd.py", line 39, in output
motd_class(logo, serve_location, data, extra).display()
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 579, in motd
MOTD.output(logo, serve_location, display, extra)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 533, in _helper
self.motd(server_settings=server_settings)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 327, in prepare
server_settings = self._helper(
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 176, in run
self.prepare(
...
TypeError: %d format: a real number is required, not NoneType
```
Without `os.getpid()`, the LogRecord will only be generated with a value dict like `values = {'process': None, ...}`. Then, exception raises when the formatter tries to translate `values = {'process': None, ...}` into `"[%(process)d]"`.
I suggest to use `[%(process)s]` instead of `[%(process)d]`.
https://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65
If there is no [conversion flags](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting) (like `%06d`) setted, `%s` also has a better performance on converting unsigned integer to string.
See [Why is %s faster than %d for integer substitution in python?](https://stackoverflow.com/a/27800584/18677995)
And here is a shell snippet for you to make a brief test:
```shell
python -m timeit -n 100000 -s "fstr='[%(process)s]'" "fstr % {'process':12345}"
python -m timeit -n 100000 -s "fstr='[%(process)d]'" "fstr % {'process':12345}"
```
Result on my laptop is:
```log
100000 loops, best of 5: 157 nsec per loop
100000 loops, best of 5: 160 nsec per loop
```
### Code snippet
```python
import logging
from sanic import Sanic
from sanic.response import text
logging.logProcesses = False
app = Sanic("MyHelloWorldApp")
@app.get("/")
async def hello_world(request):
return text("Hello, world.")
if __name__ == '__main__':
app.run(host="127.0.0.1", port=8080, debug=True)
```
### Expected Behavior
Log be like (pid shows `None` instead of raising exceptions):
```log
[2023-02-09 10:39:08 +0800] [None] [INFO]
┌─────────────────────────────────────────────────────────────┐
│ Sanic v22.12.0 │
│ Goin' Fast @ http://127.0.0.1:8080 │
├───────────────────────┬─────────────────────────────────────┤
│ │ mode: debug, single worker │
│ ▄███ █████ ██ │ server: sanic, HTTP/1.1 │
│ ██ │ python: 3.11.1 │
│ ▀███████ ███▄ │ platform: Windows-10-10.0.22621-SP0 │
│ ██ │ packages: sanic-routing==22.8.0 │
│ ████ ████████▀ │ │
│ │ │
│ Build Fast. Run Fast. │ │
└───────────────────────┴─────────────────────────────────────┘
[2023-02-09 10:39:08 +0800] [None] [DEBUG] Creating multiprocessing context using 'spawn'
[2023-02-09 10:39:08 +0800] [None] [DEBUG] Starting a process: Sanic-Server-0-0
[2023-02-09 10:39:09 +0800] [None] [DEBUG] Process ack: Sanic-Server-0-0 [13504]
[2023-02-09 10:39:09 +0800] [None] [INFO] Starting worker [13504]
```
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
Windows
### Sanic Version
22.12.0
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/log.py`
Content:
```
1 import logging
2 import sys
3
4 from enum import Enum
5 from typing import TYPE_CHECKING, Any, Dict
6 from warnings import warn
7
8 from sanic.compat import is_atty
9
10
11 # Python 3.11 changed the way Enum formatting works for mixed-in types.
12 if sys.version_info < (3, 11, 0):
13
14 class StrEnum(str, Enum):
15 pass
16
17 else:
18 if not TYPE_CHECKING:
19 from enum import StrEnum
20
21
22 LOGGING_CONFIG_DEFAULTS: Dict[str, Any] = dict( # no cov
23 version=1,
24 disable_existing_loggers=False,
25 loggers={
26 "sanic.root": {"level": "INFO", "handlers": ["console"]},
27 "sanic.error": {
28 "level": "INFO",
29 "handlers": ["error_console"],
30 "propagate": True,
31 "qualname": "sanic.error",
32 },
33 "sanic.access": {
34 "level": "INFO",
35 "handlers": ["access_console"],
36 "propagate": True,
37 "qualname": "sanic.access",
38 },
39 "sanic.server": {
40 "level": "INFO",
41 "handlers": ["console"],
42 "propagate": True,
43 "qualname": "sanic.server",
44 },
45 },
46 handlers={
47 "console": {
48 "class": "logging.StreamHandler",
49 "formatter": "generic",
50 "stream": sys.stdout,
51 },
52 "error_console": {
53 "class": "logging.StreamHandler",
54 "formatter": "generic",
55 "stream": sys.stderr,
56 },
57 "access_console": {
58 "class": "logging.StreamHandler",
59 "formatter": "access",
60 "stream": sys.stdout,
61 },
62 },
63 formatters={
64 "generic": {
65 "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
66 "datefmt": "[%Y-%m-%d %H:%M:%S %z]",
67 "class": "logging.Formatter",
68 },
69 "access": {
70 "format": "%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: "
71 + "%(request)s %(message)s %(status)d %(byte)d",
72 "datefmt": "[%Y-%m-%d %H:%M:%S %z]",
73 "class": "logging.Formatter",
74 },
75 },
76 )
77 """
78 Defult logging configuration
79 """
80
81
82 class Colors(StrEnum): # no cov
83 END = "\033[0m"
84 BOLD = "\033[1m"
85 BLUE = "\033[34m"
86 GREEN = "\033[32m"
87 PURPLE = "\033[35m"
88 RED = "\033[31m"
89 SANIC = "\033[38;2;255;13;104m"
90 YELLOW = "\033[01;33m"
91
92
93 class VerbosityFilter(logging.Filter):
94 verbosity: int = 0
95
96 def filter(self, record: logging.LogRecord) -> bool:
97 verbosity = getattr(record, "verbosity", 0)
98 return verbosity <= self.verbosity
99
100
101 _verbosity_filter = VerbosityFilter()
102
103 logger = logging.getLogger("sanic.root") # no cov
104 """
105 General Sanic logger
106 """
107 logger.addFilter(_verbosity_filter)
108
109 error_logger = logging.getLogger("sanic.error") # no cov
110 """
111 Logger used by Sanic for error logging
112 """
113 error_logger.addFilter(_verbosity_filter)
114
115 access_logger = logging.getLogger("sanic.access") # no cov
116 """
117 Logger used by Sanic for access logging
118 """
119 access_logger.addFilter(_verbosity_filter)
120
121 server_logger = logging.getLogger("sanic.server") # no cov
122 """
123 Logger used by Sanic for server related messages
124 """
125 logger.addFilter(_verbosity_filter)
126
127
128 def deprecation(message: str, version: float): # no cov
129 version_info = f"[DEPRECATION v{version}] "
130 if is_atty():
131 version_info = f"{Colors.RED}{version_info}"
132 message = f"{Colors.YELLOW}{message}{Colors.END}"
133 warn(version_info + message, DeprecationWarning)
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sanic/log.py b/sanic/log.py
--- a/sanic/log.py
+++ b/sanic/log.py
@@ -62,13 +62,13 @@
},
formatters={
"generic": {
- "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
+ "format": "%(asctime)s [%(process)s] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
},
"access": {
"format": "%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: "
- + "%(request)s %(message)s %(status)d %(byte)d",
+ + "%(request)s %(message)s %(status)s %(byte)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
},
|
{"golden_diff": "diff --git a/sanic/log.py b/sanic/log.py\n--- a/sanic/log.py\n+++ b/sanic/log.py\n@@ -62,13 +62,13 @@\n },\n formatters={\n \"generic\": {\n- \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n+ \"format\": \"%(asctime)s [%(process)s] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n \"access\": {\n \"format\": \"%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: \"\n- + \"%(request)s %(message)s %(status)d %(byte)d\",\n+ + \"%(request)s %(message)s %(status)s %(byte)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n", "issue": "Access logging raise TypeError after `logging.logProcesses=False`\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Describe the bug\r\n\r\nIf someone use `logging.logProcesses = False` to disable the calling of `os.getpid()` while logging, the [default formatter of Sanic](https://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65) will lead to the exception as shown below.\r\n\r\nSee [`logging.logProcesses`](https://docs.python.org/3/howto/logging.html#optimization)\r\n\r\n```log\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 445, in _format\r\n return self._fmt % values\r\n ~~~~~~~~~~^~~~~~~~\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 449, in format\r\n return self._format(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 659, in formatMessage\r\n return self._style.format(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 690, in format\r\n s = self.formatMessage(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 953, in format\r\n return fmt.format(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1110, in emit\r\n msg = self.format(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 978, in handle\r\n self.emit(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1706, in callHandlers\r\n hdlr.handle(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1644, in handle\r\n self.callHandlers(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1634, in _log\r\n self.handle(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1489, in info\r\n self._log(INFO, msg, args, **kwargs)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\application\\motd.py\", line 113, in display\r\n out(indent(\"\\n\".join(lines), \" \"))\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\application\\motd.py\", line 39, in output\r\n motd_class(logo, serve_location, data, extra).display()\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\mixins\\startup.py\", line 579, in motd\r\n MOTD.output(logo, serve_location, display, extra)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\mixins\\startup.py\", line 533, in _helper\r\n self.motd(server_settings=server_settings)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\mixins\\startup.py\", line 327, in prepare\r\n server_settings = self._helper(\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\mixins\\startup.py\", line 176, in run\r\n self.prepare(\r\n...\r\nTypeError: %d format: a real number is required, not NoneType\r\n```\r\n\r\nWithout `os.getpid()`, the LogRecord will only be generated with a value dict like `values = {'process': None, ...}`. Then, exception raises when the formatter tries to translate `values = {'process': None, ...}` into `\"[%(process)d]\"`.\r\n\r\nI suggest to use `[%(process)s]` instead of `[%(process)d]`.\r\n\r\nhttps://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65\r\n\r\nIf there is no [conversion flags](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting) (like `%06d`) setted, `%s` also has a better performance on converting unsigned integer to string.\r\n\r\nSee [Why is %s faster than %d for integer substitution in python?](https://stackoverflow.com/a/27800584/18677995)\r\n\r\nAnd here is a shell snippet for you to make a brief test:\r\n```shell\r\npython -m timeit -n 100000 -s \"fstr='[%(process)s]'\" \"fstr % {'process':12345}\"\r\npython -m timeit -n 100000 -s \"fstr='[%(process)d]'\" \"fstr % {'process':12345}\"\r\n```\r\n\r\nResult on my laptop is:\r\n```log\r\n100000 loops, best of 5: 157 nsec per loop\r\n100000 loops, best of 5: 160 nsec per loop\r\n```\r\n\r\n### Code snippet\r\n\r\n```python\r\nimport logging\r\n\r\nfrom sanic import Sanic\r\nfrom sanic.response import text\r\n\r\nlogging.logProcesses = False\r\n\r\napp = Sanic(\"MyHelloWorldApp\")\r\n\r\n\r\[email protected](\"/\")\r\nasync def hello_world(request):\r\n return text(\"Hello, world.\")\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host=\"127.0.0.1\", port=8080, debug=True)\r\n```\r\n\r\n### Expected Behavior\r\n\r\nLog be like (pid shows `None` instead of raising exceptions):\r\n\r\n```log\r\n[2023-02-09 10:39:08 +0800] [None] [INFO] \r\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\n \u2502 Sanic v22.12.0 \u2502\r\n \u2502 Goin' Fast @ http://127.0.0.1:8080 \u2502\r\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\r\n \u2502 \u2502 mode: debug, single worker \u2502\r\n \u2502 \u2584\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588 \u2588\u2588 \u2502 server: sanic, HTTP/1.1 \u2502\r\n \u2502 \u2588\u2588 \u2502 python: 3.11.1 \u2502\r\n \u2502 \u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2584 \u2502 platform: Windows-10-10.0.22621-SP0 \u2502\r\n \u2502 \u2588\u2588 \u2502 packages: sanic-routing==22.8.0 \u2502\r\n \u2502 \u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2580 \u2502 \u2502\r\n \u2502 \u2502 \u2502\r\n \u2502 Build Fast. Run Fast. \u2502 \u2502\r\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\r\n\r\n[2023-02-09 10:39:08 +0800] [None] [DEBUG] Creating multiprocessing context using 'spawn'\r\n[2023-02-09 10:39:08 +0800] [None] [DEBUG] Starting a process: Sanic-Server-0-0\r\n[2023-02-09 10:39:09 +0800] [None] [DEBUG] Process ack: Sanic-Server-0-0 [13504]\r\n[2023-02-09 10:39:09 +0800] [None] [INFO] Starting worker [13504]\r\n```\r\n\r\n### How do you run Sanic?\r\n\r\nAs a script (`app.run` or `Sanic.serve`)\r\n\r\n### Operating System\r\n\r\nWindows\r\n\r\n### Sanic Version\r\n\r\n22.12.0\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport sys\n\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any, Dict\nfrom warnings import warn\n\nfrom sanic.compat import is_atty\n\n\n# Python 3.11 changed the way Enum formatting works for mixed-in types.\nif sys.version_info < (3, 11, 0):\n\n class StrEnum(str, Enum):\n pass\n\nelse:\n if not TYPE_CHECKING:\n from enum import StrEnum\n\n\nLOGGING_CONFIG_DEFAULTS: Dict[str, Any] = dict( # no cov\n version=1,\n disable_existing_loggers=False,\n loggers={\n \"sanic.root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"sanic.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.error\",\n },\n \"sanic.access\": {\n \"level\": \"INFO\",\n \"handlers\": [\"access_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.access\",\n },\n \"sanic.server\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.server\",\n },\n },\n handlers={\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stdout,\n },\n \"error_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stderr,\n },\n \"access_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"access\",\n \"stream\": sys.stdout,\n },\n },\n formatters={\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n \"access\": {\n \"format\": \"%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: \"\n + \"%(request)s %(message)s %(status)d %(byte)d\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n },\n)\n\"\"\"\nDefult logging configuration\n\"\"\"\n\n\nclass Colors(StrEnum): # no cov\n END = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n BLUE = \"\\033[34m\"\n GREEN = \"\\033[32m\"\n PURPLE = \"\\033[35m\"\n RED = \"\\033[31m\"\n SANIC = \"\\033[38;2;255;13;104m\"\n YELLOW = \"\\033[01;33m\"\n\n\nclass VerbosityFilter(logging.Filter):\n verbosity: int = 0\n\n def filter(self, record: logging.LogRecord) -> bool:\n verbosity = getattr(record, \"verbosity\", 0)\n return verbosity <= self.verbosity\n\n\n_verbosity_filter = VerbosityFilter()\n\nlogger = logging.getLogger(\"sanic.root\") # no cov\n\"\"\"\nGeneral Sanic logger\n\"\"\"\nlogger.addFilter(_verbosity_filter)\n\nerror_logger = logging.getLogger(\"sanic.error\") # no cov\n\"\"\"\nLogger used by Sanic for error logging\n\"\"\"\nerror_logger.addFilter(_verbosity_filter)\n\naccess_logger = logging.getLogger(\"sanic.access\") # no cov\n\"\"\"\nLogger used by Sanic for access logging\n\"\"\"\naccess_logger.addFilter(_verbosity_filter)\n\nserver_logger = logging.getLogger(\"sanic.server\") # no cov\n\"\"\"\nLogger used by Sanic for server related messages\n\"\"\"\nlogger.addFilter(_verbosity_filter)\n\n\ndef deprecation(message: str, version: float): # no cov\n version_info = f\"[DEPRECATION v{version}] \"\n if is_atty():\n version_info = f\"{Colors.RED}{version_info}\"\n message = f\"{Colors.YELLOW}{message}{Colors.END}\"\n warn(version_info + message, DeprecationWarning)\n", "path": "sanic/log.py"}], "after_files": [{"content": "import logging\nimport sys\n\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any, Dict\nfrom warnings import warn\n\nfrom sanic.compat import is_atty\n\n\n# Python 3.11 changed the way Enum formatting works for mixed-in types.\nif sys.version_info < (3, 11, 0):\n\n class StrEnum(str, Enum):\n pass\n\nelse:\n if not TYPE_CHECKING:\n from enum import StrEnum\n\n\nLOGGING_CONFIG_DEFAULTS: Dict[str, Any] = dict( # no cov\n version=1,\n disable_existing_loggers=False,\n loggers={\n \"sanic.root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"sanic.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.error\",\n },\n \"sanic.access\": {\n \"level\": \"INFO\",\n \"handlers\": [\"access_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.access\",\n },\n \"sanic.server\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.server\",\n },\n },\n handlers={\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stdout,\n },\n \"error_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stderr,\n },\n \"access_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"access\",\n \"stream\": sys.stdout,\n },\n },\n formatters={\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)s] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n \"access\": {\n \"format\": \"%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: \"\n + \"%(request)s %(message)s %(status)s %(byte)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n },\n)\n\"\"\"\nDefult logging configuration\n\"\"\"\n\n\nclass Colors(StrEnum): # no cov\n END = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n BLUE = \"\\033[34m\"\n GREEN = \"\\033[32m\"\n PURPLE = \"\\033[35m\"\n RED = \"\\033[31m\"\n SANIC = \"\\033[38;2;255;13;104m\"\n YELLOW = \"\\033[01;33m\"\n\n\nclass VerbosityFilter(logging.Filter):\n verbosity: int = 0\n\n def filter(self, record: logging.LogRecord) -> bool:\n verbosity = getattr(record, \"verbosity\", 0)\n return verbosity <= self.verbosity\n\n\n_verbosity_filter = VerbosityFilter()\n\nlogger = logging.getLogger(\"sanic.root\") # no cov\n\"\"\"\nGeneral Sanic logger\n\"\"\"\nlogger.addFilter(_verbosity_filter)\n\nerror_logger = logging.getLogger(\"sanic.error\") # no cov\n\"\"\"\nLogger used by Sanic for error logging\n\"\"\"\nerror_logger.addFilter(_verbosity_filter)\n\naccess_logger = logging.getLogger(\"sanic.access\") # no cov\n\"\"\"\nLogger used by Sanic for access logging\n\"\"\"\naccess_logger.addFilter(_verbosity_filter)\n\nserver_logger = logging.getLogger(\"sanic.server\") # no cov\n\"\"\"\nLogger used by Sanic for server related messages\n\"\"\"\nlogger.addFilter(_verbosity_filter)\n\n\ndef deprecation(message: str, version: float): # no cov\n version_info = f\"[DEPRECATION v{version}] \"\n if is_atty():\n version_info = f\"{Colors.RED}{version_info}\"\n message = f\"{Colors.YELLOW}{message}{Colors.END}\"\n warn(version_info + message, DeprecationWarning)\n", "path": "sanic/log.py"}]}
| 3,375 | 221 |
gh_patches_debug_28507
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-1815
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Worker hangs when connects to PS.
The worker will wait the grpc channel of PS is ready using `channel_ready_future`. We should set timeout and retry.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/worker/main.py`
Content:
```
1 import grpc
2
3 from elasticdl.python.common import log_utils
4 from elasticdl.python.common.args import parse_worker_args
5 from elasticdl.python.common.grpc_utils import build_channel
6 from elasticdl.python.worker.worker import Worker
7
8
9 def main():
10 args = parse_worker_args()
11 logger = log_utils.get_logger(__name__)
12 logger.info("Starting worker %d", args.worker_id)
13 if args.master_addr is None:
14 raise ValueError("master_addr is missing for worker")
15
16 master_channel = build_channel(args.master_addr)
17
18 ps_channels = []
19 if args.ps_addrs:
20 ps_addrs = args.ps_addrs.split(",")
21
22 for addr in ps_addrs:
23 # addr is in the form as "ps-pod-name.namespace.svc:port"
24 channel = build_channel(addr)
25
26 # Wait the channel is ready by a Future object.
27 grpc.channel_ready_future(channel).result()
28 logger.info(
29 "grpc channel %s to connect pod %s is ready"
30 % (addr, addr.split(".")[0])
31 )
32 ps_channels.append(channel)
33
34 worker = Worker(
35 args,
36 channel=master_channel,
37 ps_channels=ps_channels,
38 set_parallelism=True,
39 )
40 worker.run()
41
42
43 if __name__ == "__main__":
44 main()
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py
--- a/elasticdl/python/worker/main.py
+++ b/elasticdl/python/worker/main.py
@@ -5,6 +5,9 @@
from elasticdl.python.common.grpc_utils import build_channel
from elasticdl.python.worker.worker import Worker
+CONNECT_PS_MAX_RETRIES = 3
+CONNECT_PS_TIMEOUT = 60
+
def main():
args = parse_worker_args()
@@ -23,13 +26,29 @@
# addr is in the form as "ps-pod-name.namespace.svc:port"
channel = build_channel(addr)
- # Wait the channel is ready by a Future object.
- grpc.channel_ready_future(channel).result()
- logger.info(
- "grpc channel %s to connect pod %s is ready"
- % (addr, addr.split(".")[0])
- )
- ps_channels.append(channel)
+ succeeded = False
+ for i in range(CONNECT_PS_MAX_RETRIES):
+ try:
+ grpc.channel_ready_future(channel).result(
+ timeout=CONNECT_PS_TIMEOUT
+ )
+ logger.info(
+ "grpc channel %s to connect pod %s is ready"
+ % (addr, addr.split(".")[0])
+ )
+ ps_channels.append(channel)
+ succeeded = True
+ break
+ except grpc.FutureTimeoutError:
+ logger.warning(
+ "Failed to connect pod %s with %d retry"
+ % (addr.split(".")[0], i)
+ )
+ if not succeeded:
+ raise TimeoutError(
+ "Time out to connect pod %s with 3 retries"
+ % addr.split(".")[0]
+ )
worker = Worker(
args,
|
{"golden_diff": "diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py\n--- a/elasticdl/python/worker/main.py\n+++ b/elasticdl/python/worker/main.py\n@@ -5,6 +5,9 @@\n from elasticdl.python.common.grpc_utils import build_channel\n from elasticdl.python.worker.worker import Worker\n \n+CONNECT_PS_MAX_RETRIES = 3\n+CONNECT_PS_TIMEOUT = 60\n+\n \n def main():\n args = parse_worker_args()\n@@ -23,13 +26,29 @@\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n channel = build_channel(addr)\n \n- # Wait the channel is ready by a Future object.\n- grpc.channel_ready_future(channel).result()\n- logger.info(\n- \"grpc channel %s to connect pod %s is ready\"\n- % (addr, addr.split(\".\")[0])\n- )\n- ps_channels.append(channel)\n+ succeeded = False\n+ for i in range(CONNECT_PS_MAX_RETRIES):\n+ try:\n+ grpc.channel_ready_future(channel).result(\n+ timeout=CONNECT_PS_TIMEOUT\n+ )\n+ logger.info(\n+ \"grpc channel %s to connect pod %s is ready\"\n+ % (addr, addr.split(\".\")[0])\n+ )\n+ ps_channels.append(channel)\n+ succeeded = True\n+ break\n+ except grpc.FutureTimeoutError:\n+ logger.warning(\n+ \"Failed to connect pod %s with %d retry\"\n+ % (addr.split(\".\")[0], i)\n+ )\n+ if not succeeded:\n+ raise TimeoutError(\n+ \"Time out to connect pod %s with 3 retries\"\n+ % addr.split(\".\")[0]\n+ )\n \n worker = Worker(\n args,\n", "issue": "Worker hangs when connects to PS.\nThe worker will wait the grpc channel of PS is ready using `channel_ready_future`. We should set timeout and retry.\r\n\r\n\n", "before_files": [{"content": "import grpc\n\nfrom elasticdl.python.common import log_utils\nfrom elasticdl.python.common.args import parse_worker_args\nfrom elasticdl.python.common.grpc_utils import build_channel\nfrom elasticdl.python.worker.worker import Worker\n\n\ndef main():\n args = parse_worker_args()\n logger = log_utils.get_logger(__name__)\n logger.info(\"Starting worker %d\", args.worker_id)\n if args.master_addr is None:\n raise ValueError(\"master_addr is missing for worker\")\n\n master_channel = build_channel(args.master_addr)\n\n ps_channels = []\n if args.ps_addrs:\n ps_addrs = args.ps_addrs.split(\",\")\n\n for addr in ps_addrs:\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n channel = build_channel(addr)\n\n # Wait the channel is ready by a Future object.\n grpc.channel_ready_future(channel).result()\n logger.info(\n \"grpc channel %s to connect pod %s is ready\"\n % (addr, addr.split(\".\")[0])\n )\n ps_channels.append(channel)\n\n worker = Worker(\n args,\n channel=master_channel,\n ps_channels=ps_channels,\n set_parallelism=True,\n )\n worker.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/worker/main.py"}], "after_files": [{"content": "import grpc\n\nfrom elasticdl.python.common import log_utils\nfrom elasticdl.python.common.args import parse_worker_args\nfrom elasticdl.python.common.grpc_utils import build_channel\nfrom elasticdl.python.worker.worker import Worker\n\nCONNECT_PS_MAX_RETRIES = 3\nCONNECT_PS_TIMEOUT = 60\n\n\ndef main():\n args = parse_worker_args()\n logger = log_utils.get_logger(__name__)\n logger.info(\"Starting worker %d\", args.worker_id)\n if args.master_addr is None:\n raise ValueError(\"master_addr is missing for worker\")\n\n master_channel = build_channel(args.master_addr)\n\n ps_channels = []\n if args.ps_addrs:\n ps_addrs = args.ps_addrs.split(\",\")\n\n for addr in ps_addrs:\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n channel = build_channel(addr)\n\n succeeded = False\n for i in range(CONNECT_PS_MAX_RETRIES):\n try:\n grpc.channel_ready_future(channel).result(\n timeout=CONNECT_PS_TIMEOUT\n )\n logger.info(\n \"grpc channel %s to connect pod %s is ready\"\n % (addr, addr.split(\".\")[0])\n )\n ps_channels.append(channel)\n succeeded = True\n break\n except grpc.FutureTimeoutError:\n logger.warning(\n \"Failed to connect pod %s with %d retry\"\n % (addr.split(\".\")[0], i)\n )\n if not succeeded:\n raise TimeoutError(\n \"Time out to connect pod %s with 3 retries\"\n % addr.split(\".\")[0]\n )\n\n worker = Worker(\n args,\n channel=master_channel,\n ps_channels=ps_channels,\n set_parallelism=True,\n )\n worker.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/worker/main.py"}]}
| 712 | 397 |
gh_patches_debug_33130
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-1666
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] evaluate fails with Imputer
**Describe the bug**
Calling `evaluate` with a pipeline and imputer fails.
**To Reproduce**
```python
import pandas as pd
import numpy as np
from sktime.transformations.series.impute import Imputer
from sktime.forecasting.compose import ForecastingPipeline
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.model_evaluation import evaluate
from sktime.forecasting.model_selection import SlidingWindowSplitter
y = pd.Series(np.random.normal(size=100))
X = pd.DataFrame(np.random.normal(size=(100, 2)))
X.iloc[3, 1] = np.nan
cv = SlidingWindowSplitter(fh=[1, 2, 3])
forecaster = ForecastingPipeline([
("impute", Imputer()),
("forecast", NaiveForecaster())
])
evaluate(forecaster, cv=cv, y=y, X=X)
```
**Expected behavior**
No error.
**Versions**
<details>
System:
python: 3.7.8 | packaged by conda-forge | (default, Jul 31 2020, 02:37:09) [Clang 10.0.1 ]
executable: /Users/mloning/.conda/envs/sktime-dev/bin/python
machine: Darwin-19.6.0-x86_64-i386-64bit
Python dependencies:
pip: 21.2.4
setuptools: 49.6.0.post20210108
sklearn: 0.24.2
sktime: 0.8.1
statsmodels: 0.12.1
numpy: 1.19.3
scipy: 1.6.0
Cython: 0.29.21
pandas: 1.2.3
matplotlib: 3.3.4
joblib: 1.0.1
numba: 0.53.1
pmdarima: 1.8.2
tsfresh: 0.17.0
</details>
<!-- Thanks for contributing! -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/transformations/series/impute.py`
Content:
```
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4 """Utilities to impute series with missing values."""
5
6 __author__ = ["Martin Walter"]
7 __all__ = ["Imputer"]
8
9
10 import numpy as np
11 import pandas as pd
12
13 from sklearn.base import clone
14 from sklearn.utils import check_random_state
15
16 from sktime.transformations.base import _SeriesToSeriesTransformer
17 from sktime.utils.validation.series import check_series
18 from sktime.forecasting.trend import PolynomialTrendForecaster
19 from sktime.forecasting.base import ForecastingHorizon
20
21
22 class Imputer(_SeriesToSeriesTransformer):
23 """Missing value imputation.
24
25 The Imputer transforms input series by replacing missing values according
26 to an imputation strategy specified by `method`.
27
28 Parameters
29 ----------
30 method : str, default="drift"
31 Method to fill the missing values values.
32
33 * "drift" : drift/trend values by sktime.PolynomialTrendForecaster()
34 * "linear" : linear interpolation, by pd.Series.interpolate()
35 * "nearest" : use nearest value, by pd.Series.interpolate()
36 * "constant" : same constant value (given in arg value) for all NaN
37 * "mean" : pd.Series.mean()
38 * "median" : pd.Series.median()
39 * "backfill" ot "bfill" : adapted from pd.Series.fillna()
40 * "pad" or "ffill" : adapted from pd.Series.fillna()
41 * "random" : random values between pd.Series.min() and .max()
42 * "forecaster" : use an sktime Forecaster, given in arg forecaster
43
44 missing_values : int/float/str, default=None
45 The placeholder for the missing values. All occurrences of
46 missing_values will be imputed. If None then np.nan is used.
47 value : int/float, default=None
48 Value to use to fill missing values when method="constant".
49 forecaster : Any Forecaster based on sktime.BaseForecaster, default=None
50 Use a given Forecaster to impute by insample predictions when
51 method="forecaster". Before fitting, missing data is imputed with
52 method="ffill" or "bfill" as heuristic.
53 random_state : int/float/str, optional
54 Value to set random.seed() if method="random", default None
55
56 Examples
57 --------
58 >>> from sktime.transformations.series.impute import Imputer
59 >>> from sktime.datasets import load_airline
60 >>> y = load_airline()
61 >>> transformer = Imputer(method="drift")
62 >>> y_hat = transformer.fit_transform(y)
63 """
64
65 _tags = {
66 "fit-in-transform": True,
67 "handles-missing-data": True,
68 "skip-inverse-transform": True,
69 "univariate-only": False,
70 }
71
72 def __init__(
73 self,
74 method="drift",
75 random_state=None,
76 value=None,
77 forecaster=None,
78 missing_values=None,
79 ):
80
81 self.method = method
82 self.missing_values = missing_values
83 self.value = value
84 self.forecaster = forecaster
85 self.random_state = random_state
86 super(Imputer, self).__init__()
87
88 def transform(self, Z, X=None):
89 """Transform data.
90
91 Returns a transformed version of Z.
92
93 Parameters
94 ----------
95 Z : pd.Series, pd.DataFrame
96
97 Returns
98 -------
99 Z : pd.Series, pd.DataFrame
100 Transformed time series(es).
101 """
102 self.check_is_fitted()
103 self._check_method()
104 Z = check_series(Z)
105 Z = Z.copy()
106
107 # replace missing_values with np.nan
108 if self.missing_values:
109 Z = Z.replace(to_replace=self.missing_values, value=np.nan)
110
111 if not _has_missing_values(Z):
112 return Z
113
114 elif self.method == "random":
115 if isinstance(Z, pd.DataFrame):
116 for col in Z:
117 Z[col] = Z[col].apply(
118 lambda i: self._get_random(Z[col]) if np.isnan(i) else i
119 )
120 else:
121 Z = Z.apply(lambda i: self._get_random(Z) if np.isnan(i) else i)
122 elif self.method == "constant":
123 Z = Z.fillna(value=self.value)
124 elif self.method in ["backfill", "bfill", "pad", "ffill"]:
125 Z = Z.fillna(method=self.method)
126 elif self.method == "drift":
127 forecaster = PolynomialTrendForecaster(degree=1)
128 Z = _impute_with_forecaster(forecaster, Z)
129 elif self.method == "forecaster":
130 forecaster = clone(self.forecaster)
131 Z = _impute_with_forecaster(forecaster, Z)
132 elif self.method == "mean":
133 Z = Z.fillna(value=Z.mean())
134 elif self.method == "median":
135 Z = Z.fillna(value=Z.median())
136 elif self.method in ["nearest", "linear"]:
137 Z = Z.interpolate(method=self.method)
138 else:
139 raise ValueError(f"`method`: {self.method} not available.")
140 # fill first/last elements of series,
141 # as some methods (e.g. "linear") cant impute those
142 Z = Z.fillna(method="ffill").fillna(method="backfill")
143 return Z
144
145 def _check_method(self):
146 if (
147 self.value is not None
148 and self.method != "constant"
149 or self.method == "constant"
150 and self.value is None
151 ):
152 raise ValueError(
153 """Imputing with a value can only be
154 used if method="constant" and if parameter "value" is not None"""
155 )
156 elif (
157 self.forecaster is not None
158 and self.method != "forecaster"
159 or self.method == "forecaster"
160 and self.forecaster is None
161 ):
162 raise ValueError(
163 """Imputing with a forecaster can only be used if
164 method=\"forecaster\" and if arg forecaster is not None"""
165 )
166 else:
167 pass
168
169 def _get_random(self, Z):
170 """Create a random int or float value.
171
172 :param Z: Series
173 :type Z: pd.Series
174 :return: Random int or float between min and max of Z
175 :rtype: int/float
176 """
177 rng = check_random_state(self.random_state)
178 # check if series contains only int or int-like values (e.g. 3.0)
179 if (Z.dropna() % 1 == 0).all():
180 return rng.randint(Z.min(), Z.max())
181 else:
182 return rng.uniform(Z.min(), Z.max())
183
184
185 def _impute_with_forecaster(forecaster, Z):
186 """Use a given forecaster for imputation by in-sample predictions.
187
188 Parameters
189 ----------
190 forecaster: Forecaster
191 Forecaster to use for imputation
192 Z : pd.Series or pd.DataFrame
193 Series to impute.
194
195 Returns
196 -------
197 zt : pd.Series or pd.DataFrame
198 Series with imputed values.
199 """
200 if isinstance(Z, pd.Series):
201 series = [Z]
202 elif isinstance(Z, pd.DataFrame):
203 series = [Z[column] for column in Z]
204
205 for z in series:
206 # define fh based on index of missing values
207 na_index = z.index[z.isna()]
208 fh = ForecastingHorizon(values=na_index, is_relative=False)
209
210 # fill NaN before fitting with ffill and backfill (heuristic)
211 forecaster.fit(y=z.fillna(method="ffill").fillna(method="backfill"), fh=fh)
212
213 # replace missing values with predicted values
214 z[na_index] = forecaster.predict()
215 return Z
216
217
218 def _has_missing_values(Z):
219 return Z.isnull().to_numpy().any()
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sktime/transformations/series/impute.py b/sktime/transformations/series/impute.py
--- a/sktime/transformations/series/impute.py
+++ b/sktime/transformations/series/impute.py
@@ -9,14 +9,13 @@
import numpy as np
import pandas as pd
-
from sklearn.base import clone
from sklearn.utils import check_random_state
+from sktime.forecasting.base import ForecastingHorizon
+from sktime.forecasting.trend import PolynomialTrendForecaster
from sktime.transformations.base import _SeriesToSeriesTransformer
from sktime.utils.validation.series import check_series
-from sktime.forecasting.trend import PolynomialTrendForecaster
-from sktime.forecasting.base import ForecastingHorizon
class Imputer(_SeriesToSeriesTransformer):
@@ -111,7 +110,7 @@
if not _has_missing_values(Z):
return Z
- elif self.method == "random":
+ if self.method == "random":
if isinstance(Z, pd.DataFrame):
for col in Z:
Z[col] = Z[col].apply(
@@ -203,15 +202,16 @@
series = [Z[column] for column in Z]
for z in series:
- # define fh based on index of missing values
- na_index = z.index[z.isna()]
- fh = ForecastingHorizon(values=na_index, is_relative=False)
+ if _has_missing_values(z):
+ # define fh based on index of missing values
+ na_index = z.index[z.isna()]
+ fh = ForecastingHorizon(values=na_index, is_relative=False)
- # fill NaN before fitting with ffill and backfill (heuristic)
- forecaster.fit(y=z.fillna(method="ffill").fillna(method="backfill"), fh=fh)
+ # fill NaN before fitting with ffill and backfill (heuristic)
+ forecaster.fit(y=z.fillna(method="ffill").fillna(method="backfill"), fh=fh)
- # replace missing values with predicted values
- z[na_index] = forecaster.predict()
+ # replace missing values with predicted values
+ z[na_index] = forecaster.predict()
return Z
|
{"golden_diff": "diff --git a/sktime/transformations/series/impute.py b/sktime/transformations/series/impute.py\n--- a/sktime/transformations/series/impute.py\n+++ b/sktime/transformations/series/impute.py\n@@ -9,14 +9,13 @@\n \n import numpy as np\n import pandas as pd\n-\n from sklearn.base import clone\n from sklearn.utils import check_random_state\n \n+from sktime.forecasting.base import ForecastingHorizon\n+from sktime.forecasting.trend import PolynomialTrendForecaster\n from sktime.transformations.base import _SeriesToSeriesTransformer\n from sktime.utils.validation.series import check_series\n-from sktime.forecasting.trend import PolynomialTrendForecaster\n-from sktime.forecasting.base import ForecastingHorizon\n \n \n class Imputer(_SeriesToSeriesTransformer):\n@@ -111,7 +110,7 @@\n if not _has_missing_values(Z):\n return Z\n \n- elif self.method == \"random\":\n+ if self.method == \"random\":\n if isinstance(Z, pd.DataFrame):\n for col in Z:\n Z[col] = Z[col].apply(\n@@ -203,15 +202,16 @@\n series = [Z[column] for column in Z]\n \n for z in series:\n- # define fh based on index of missing values\n- na_index = z.index[z.isna()]\n- fh = ForecastingHorizon(values=na_index, is_relative=False)\n+ if _has_missing_values(z):\n+ # define fh based on index of missing values\n+ na_index = z.index[z.isna()]\n+ fh = ForecastingHorizon(values=na_index, is_relative=False)\n \n- # fill NaN before fitting with ffill and backfill (heuristic)\n- forecaster.fit(y=z.fillna(method=\"ffill\").fillna(method=\"backfill\"), fh=fh)\n+ # fill NaN before fitting with ffill and backfill (heuristic)\n+ forecaster.fit(y=z.fillna(method=\"ffill\").fillna(method=\"backfill\"), fh=fh)\n \n- # replace missing values with predicted values\n- z[na_index] = forecaster.predict()\n+ # replace missing values with predicted values\n+ z[na_index] = forecaster.predict()\n return Z\n", "issue": "[BUG] evaluate fails with Imputer\n**Describe the bug**\r\nCalling `evaluate` with a pipeline and imputer fails. \r\n\r\n**To Reproduce**\r\n```python\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sktime.transformations.series.impute import Imputer\r\nfrom sktime.forecasting.compose import ForecastingPipeline\r\nfrom sktime.forecasting.naive import NaiveForecaster\r\nfrom sktime.forecasting.model_evaluation import evaluate\r\nfrom sktime.forecasting.model_selection import SlidingWindowSplitter\r\n\r\ny = pd.Series(np.random.normal(size=100))\r\nX = pd.DataFrame(np.random.normal(size=(100, 2)))\r\nX.iloc[3, 1] = np.nan\r\n\r\ncv = SlidingWindowSplitter(fh=[1, 2, 3])\r\n\r\nforecaster = ForecastingPipeline([\r\n (\"impute\", Imputer()),\r\n (\"forecast\", NaiveForecaster())\r\n])\r\n\r\nevaluate(forecaster, cv=cv, y=y, X=X)\r\n```\r\n\r\n**Expected behavior**\r\nNo error. \r\n\r\n**Versions**\r\n<details>\r\nSystem:\r\n python: 3.7.8 | packaged by conda-forge | (default, Jul 31 2020, 02:37:09) [Clang 10.0.1 ]\r\nexecutable: /Users/mloning/.conda/envs/sktime-dev/bin/python\r\n machine: Darwin-19.6.0-x86_64-i386-64bit\r\n\r\nPython dependencies:\r\n pip: 21.2.4\r\n setuptools: 49.6.0.post20210108\r\n sklearn: 0.24.2\r\n sktime: 0.8.1\r\n statsmodels: 0.12.1\r\n numpy: 1.19.3\r\n scipy: 1.6.0\r\n Cython: 0.29.21\r\n pandas: 1.2.3\r\n matplotlib: 3.3.4\r\n joblib: 1.0.1\r\n numba: 0.53.1\r\n pmdarima: 1.8.2\r\n tsfresh: 0.17.0\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Utilities to impute series with missing values.\"\"\"\n\n__author__ = [\"Martin Walter\"]\n__all__ = [\"Imputer\"]\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.base import clone\nfrom sklearn.utils import check_random_state\n\nfrom sktime.transformations.base import _SeriesToSeriesTransformer\nfrom sktime.utils.validation.series import check_series\nfrom sktime.forecasting.trend import PolynomialTrendForecaster\nfrom sktime.forecasting.base import ForecastingHorizon\n\n\nclass Imputer(_SeriesToSeriesTransformer):\n \"\"\"Missing value imputation.\n\n The Imputer transforms input series by replacing missing values according\n to an imputation strategy specified by `method`.\n\n Parameters\n ----------\n method : str, default=\"drift\"\n Method to fill the missing values values.\n\n * \"drift\" : drift/trend values by sktime.PolynomialTrendForecaster()\n * \"linear\" : linear interpolation, by pd.Series.interpolate()\n * \"nearest\" : use nearest value, by pd.Series.interpolate()\n * \"constant\" : same constant value (given in arg value) for all NaN\n * \"mean\" : pd.Series.mean()\n * \"median\" : pd.Series.median()\n * \"backfill\" ot \"bfill\" : adapted from pd.Series.fillna()\n * \"pad\" or \"ffill\" : adapted from pd.Series.fillna()\n * \"random\" : random values between pd.Series.min() and .max()\n * \"forecaster\" : use an sktime Forecaster, given in arg forecaster\n\n missing_values : int/float/str, default=None\n The placeholder for the missing values. All occurrences of\n missing_values will be imputed. If None then np.nan is used.\n value : int/float, default=None\n Value to use to fill missing values when method=\"constant\".\n forecaster : Any Forecaster based on sktime.BaseForecaster, default=None\n Use a given Forecaster to impute by insample predictions when\n method=\"forecaster\". Before fitting, missing data is imputed with\n method=\"ffill\" or \"bfill\" as heuristic.\n random_state : int/float/str, optional\n Value to set random.seed() if method=\"random\", default None\n\n Examples\n --------\n >>> from sktime.transformations.series.impute import Imputer\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n >>> transformer = Imputer(method=\"drift\")\n >>> y_hat = transformer.fit_transform(y)\n \"\"\"\n\n _tags = {\n \"fit-in-transform\": True,\n \"handles-missing-data\": True,\n \"skip-inverse-transform\": True,\n \"univariate-only\": False,\n }\n\n def __init__(\n self,\n method=\"drift\",\n random_state=None,\n value=None,\n forecaster=None,\n missing_values=None,\n ):\n\n self.method = method\n self.missing_values = missing_values\n self.value = value\n self.forecaster = forecaster\n self.random_state = random_state\n super(Imputer, self).__init__()\n\n def transform(self, Z, X=None):\n \"\"\"Transform data.\n\n Returns a transformed version of Z.\n\n Parameters\n ----------\n Z : pd.Series, pd.DataFrame\n\n Returns\n -------\n Z : pd.Series, pd.DataFrame\n Transformed time series(es).\n \"\"\"\n self.check_is_fitted()\n self._check_method()\n Z = check_series(Z)\n Z = Z.copy()\n\n # replace missing_values with np.nan\n if self.missing_values:\n Z = Z.replace(to_replace=self.missing_values, value=np.nan)\n\n if not _has_missing_values(Z):\n return Z\n\n elif self.method == \"random\":\n if isinstance(Z, pd.DataFrame):\n for col in Z:\n Z[col] = Z[col].apply(\n lambda i: self._get_random(Z[col]) if np.isnan(i) else i\n )\n else:\n Z = Z.apply(lambda i: self._get_random(Z) if np.isnan(i) else i)\n elif self.method == \"constant\":\n Z = Z.fillna(value=self.value)\n elif self.method in [\"backfill\", \"bfill\", \"pad\", \"ffill\"]:\n Z = Z.fillna(method=self.method)\n elif self.method == \"drift\":\n forecaster = PolynomialTrendForecaster(degree=1)\n Z = _impute_with_forecaster(forecaster, Z)\n elif self.method == \"forecaster\":\n forecaster = clone(self.forecaster)\n Z = _impute_with_forecaster(forecaster, Z)\n elif self.method == \"mean\":\n Z = Z.fillna(value=Z.mean())\n elif self.method == \"median\":\n Z = Z.fillna(value=Z.median())\n elif self.method in [\"nearest\", \"linear\"]:\n Z = Z.interpolate(method=self.method)\n else:\n raise ValueError(f\"`method`: {self.method} not available.\")\n # fill first/last elements of series,\n # as some methods (e.g. \"linear\") cant impute those\n Z = Z.fillna(method=\"ffill\").fillna(method=\"backfill\")\n return Z\n\n def _check_method(self):\n if (\n self.value is not None\n and self.method != \"constant\"\n or self.method == \"constant\"\n and self.value is None\n ):\n raise ValueError(\n \"\"\"Imputing with a value can only be\n used if method=\"constant\" and if parameter \"value\" is not None\"\"\"\n )\n elif (\n self.forecaster is not None\n and self.method != \"forecaster\"\n or self.method == \"forecaster\"\n and self.forecaster is None\n ):\n raise ValueError(\n \"\"\"Imputing with a forecaster can only be used if\n method=\\\"forecaster\\\" and if arg forecaster is not None\"\"\"\n )\n else:\n pass\n\n def _get_random(self, Z):\n \"\"\"Create a random int or float value.\n\n :param Z: Series\n :type Z: pd.Series\n :return: Random int or float between min and max of Z\n :rtype: int/float\n \"\"\"\n rng = check_random_state(self.random_state)\n # check if series contains only int or int-like values (e.g. 3.0)\n if (Z.dropna() % 1 == 0).all():\n return rng.randint(Z.min(), Z.max())\n else:\n return rng.uniform(Z.min(), Z.max())\n\n\ndef _impute_with_forecaster(forecaster, Z):\n \"\"\"Use a given forecaster for imputation by in-sample predictions.\n\n Parameters\n ----------\n forecaster: Forecaster\n Forecaster to use for imputation\n Z : pd.Series or pd.DataFrame\n Series to impute.\n\n Returns\n -------\n zt : pd.Series or pd.DataFrame\n Series with imputed values.\n \"\"\"\n if isinstance(Z, pd.Series):\n series = [Z]\n elif isinstance(Z, pd.DataFrame):\n series = [Z[column] for column in Z]\n\n for z in series:\n # define fh based on index of missing values\n na_index = z.index[z.isna()]\n fh = ForecastingHorizon(values=na_index, is_relative=False)\n\n # fill NaN before fitting with ffill and backfill (heuristic)\n forecaster.fit(y=z.fillna(method=\"ffill\").fillna(method=\"backfill\"), fh=fh)\n\n # replace missing values with predicted values\n z[na_index] = forecaster.predict()\n return Z\n\n\ndef _has_missing_values(Z):\n return Z.isnull().to_numpy().any()\n", "path": "sktime/transformations/series/impute.py"}], "after_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Utilities to impute series with missing values.\"\"\"\n\n__author__ = [\"Martin Walter\"]\n__all__ = [\"Imputer\"]\n\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.base import clone\nfrom sklearn.utils import check_random_state\n\nfrom sktime.forecasting.base import ForecastingHorizon\nfrom sktime.forecasting.trend import PolynomialTrendForecaster\nfrom sktime.transformations.base import _SeriesToSeriesTransformer\nfrom sktime.utils.validation.series import check_series\n\n\nclass Imputer(_SeriesToSeriesTransformer):\n \"\"\"Missing value imputation.\n\n The Imputer transforms input series by replacing missing values according\n to an imputation strategy specified by `method`.\n\n Parameters\n ----------\n method : str, default=\"drift\"\n Method to fill the missing values values.\n\n * \"drift\" : drift/trend values by sktime.PolynomialTrendForecaster()\n * \"linear\" : linear interpolation, by pd.Series.interpolate()\n * \"nearest\" : use nearest value, by pd.Series.interpolate()\n * \"constant\" : same constant value (given in arg value) for all NaN\n * \"mean\" : pd.Series.mean()\n * \"median\" : pd.Series.median()\n * \"backfill\" ot \"bfill\" : adapted from pd.Series.fillna()\n * \"pad\" or \"ffill\" : adapted from pd.Series.fillna()\n * \"random\" : random values between pd.Series.min() and .max()\n * \"forecaster\" : use an sktime Forecaster, given in arg forecaster\n\n missing_values : int/float/str, default=None\n The placeholder for the missing values. All occurrences of\n missing_values will be imputed. If None then np.nan is used.\n value : int/float, default=None\n Value to use to fill missing values when method=\"constant\".\n forecaster : Any Forecaster based on sktime.BaseForecaster, default=None\n Use a given Forecaster to impute by insample predictions when\n method=\"forecaster\". Before fitting, missing data is imputed with\n method=\"ffill\" or \"bfill\" as heuristic.\n random_state : int/float/str, optional\n Value to set random.seed() if method=\"random\", default None\n\n Examples\n --------\n >>> from sktime.transformations.series.impute import Imputer\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n >>> transformer = Imputer(method=\"drift\")\n >>> y_hat = transformer.fit_transform(y)\n \"\"\"\n\n _tags = {\n \"fit-in-transform\": True,\n \"handles-missing-data\": True,\n \"skip-inverse-transform\": True,\n \"univariate-only\": False,\n }\n\n def __init__(\n self,\n method=\"drift\",\n random_state=None,\n value=None,\n forecaster=None,\n missing_values=None,\n ):\n\n self.method = method\n self.missing_values = missing_values\n self.value = value\n self.forecaster = forecaster\n self.random_state = random_state\n super(Imputer, self).__init__()\n\n def transform(self, Z, X=None):\n \"\"\"Transform data.\n\n Returns a transformed version of Z.\n\n Parameters\n ----------\n Z : pd.Series, pd.DataFrame\n\n Returns\n -------\n Z : pd.Series, pd.DataFrame\n Transformed time series(es).\n \"\"\"\n self.check_is_fitted()\n self._check_method()\n Z = check_series(Z)\n Z = Z.copy()\n\n # replace missing_values with np.nan\n if self.missing_values:\n Z = Z.replace(to_replace=self.missing_values, value=np.nan)\n\n if not _has_missing_values(Z):\n return Z\n\n if self.method == \"random\":\n if isinstance(Z, pd.DataFrame):\n for col in Z:\n Z[col] = Z[col].apply(\n lambda i: self._get_random(Z[col]) if np.isnan(i) else i\n )\n else:\n Z = Z.apply(lambda i: self._get_random(Z) if np.isnan(i) else i)\n elif self.method == \"constant\":\n Z = Z.fillna(value=self.value)\n elif self.method in [\"backfill\", \"bfill\", \"pad\", \"ffill\"]:\n Z = Z.fillna(method=self.method)\n elif self.method == \"drift\":\n forecaster = PolynomialTrendForecaster(degree=1)\n Z = _impute_with_forecaster(forecaster, Z)\n elif self.method == \"forecaster\":\n forecaster = clone(self.forecaster)\n Z = _impute_with_forecaster(forecaster, Z)\n elif self.method == \"mean\":\n Z = Z.fillna(value=Z.mean())\n elif self.method == \"median\":\n Z = Z.fillna(value=Z.median())\n elif self.method in [\"nearest\", \"linear\"]:\n Z = Z.interpolate(method=self.method)\n else:\n raise ValueError(f\"`method`: {self.method} not available.\")\n # fill first/last elements of series,\n # as some methods (e.g. \"linear\") cant impute those\n Z = Z.fillna(method=\"ffill\").fillna(method=\"backfill\")\n return Z\n\n def _check_method(self):\n if (\n self.value is not None\n and self.method != \"constant\"\n or self.method == \"constant\"\n and self.value is None\n ):\n raise ValueError(\n \"\"\"Imputing with a value can only be\n used if method=\"constant\" and if parameter \"value\" is not None\"\"\"\n )\n elif (\n self.forecaster is not None\n and self.method != \"forecaster\"\n or self.method == \"forecaster\"\n and self.forecaster is None\n ):\n raise ValueError(\n \"\"\"Imputing with a forecaster can only be used if\n method=\\\"forecaster\\\" and if arg forecaster is not None\"\"\"\n )\n else:\n pass\n\n def _get_random(self, Z):\n \"\"\"Create a random int or float value.\n\n :param Z: Series\n :type Z: pd.Series\n :return: Random int or float between min and max of Z\n :rtype: int/float\n \"\"\"\n rng = check_random_state(self.random_state)\n # check if series contains only int or int-like values (e.g. 3.0)\n if (Z.dropna() % 1 == 0).all():\n return rng.randint(Z.min(), Z.max())\n else:\n return rng.uniform(Z.min(), Z.max())\n\n\ndef _impute_with_forecaster(forecaster, Z):\n \"\"\"Use a given forecaster for imputation by in-sample predictions.\n\n Parameters\n ----------\n forecaster: Forecaster\n Forecaster to use for imputation\n Z : pd.Series or pd.DataFrame\n Series to impute.\n\n Returns\n -------\n zt : pd.Series or pd.DataFrame\n Series with imputed values.\n \"\"\"\n if isinstance(Z, pd.Series):\n series = [Z]\n elif isinstance(Z, pd.DataFrame):\n series = [Z[column] for column in Z]\n\n for z in series:\n if _has_missing_values(z):\n # define fh based on index of missing values\n na_index = z.index[z.isna()]\n fh = ForecastingHorizon(values=na_index, is_relative=False)\n\n # fill NaN before fitting with ffill and backfill (heuristic)\n forecaster.fit(y=z.fillna(method=\"ffill\").fillna(method=\"backfill\"), fh=fh)\n\n # replace missing values with predicted values\n z[na_index] = forecaster.predict()\n return Z\n\n\ndef _has_missing_values(Z):\n return Z.isnull().to_numpy().any()\n", "path": "sktime/transformations/series/impute.py"}]}
| 3,030 | 506 |
gh_patches_debug_31741
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-738
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support arbitrary sample_shape in Rejector
This will require the `proposal` to accept an optional `sample_shape` arg and the `log_prob_accept()` method to support broadcasting.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/rejector.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import torch
4 from pyro.distributions.distribution import Distribution
5 from pyro.distributions.score_parts import ScoreParts
6 from pyro.distributions.util import copy_docs_from
7
8
9 @copy_docs_from(Distribution)
10 class Rejector(Distribution):
11 """
12 Rejection sampled distribution given an acceptance rate function.
13
14 :param Distribution propose: A proposal distribution that samples batched
15 propsals via `propose()`.
16 :param callable log_prob_accept: A callable that inputs a batch of
17 proposals and returns a batch of log acceptance probabilities.
18 :param log_scale: Total log probability of acceptance.
19 """
20 stateful = True
21 reparameterized = True
22
23 def __init__(self, propose, log_prob_accept, log_scale):
24 self.propose = propose
25 self.log_prob_accept = log_prob_accept
26 self._log_scale = log_scale
27
28 # These LRU(1) caches allow work to be shared across different method calls.
29 self._log_prob_accept_cache = None, None
30 self._propose_batch_log_pdf_cache = None, None
31
32 def _log_prob_accept(self, x):
33 if x is not self._log_prob_accept_cache[0]:
34 self._log_prob_accept_cache = x, self.log_prob_accept(x) - self._log_scale
35 return self._log_prob_accept_cache[1]
36
37 def _propose_batch_log_pdf(self, x):
38 if x is not self._propose_batch_log_pdf_cache[0]:
39 self._propose_batch_log_pdf_cache = x, self.propose.log_prob(x)
40 return self._propose_batch_log_pdf_cache[1]
41
42 def sample(self, sample_shape=torch.Size()):
43 if sample_shape:
44 raise ValueError("Arbitrary `sample_shape` not supported by Rejector class.")
45 # Implements parallel batched accept-reject sampling.
46 x = self.propose()
47 log_prob_accept = self.log_prob_accept(x)
48 probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
49 done = torch.bernoulli(probs).byte()
50 while not done.all():
51 proposed_x = self.propose()
52 log_prob_accept = self.log_prob_accept(proposed_x)
53 prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
54 accept = torch.bernoulli(prob_accept).byte() & ~done
55 if accept.any():
56 x[accept] = proposed_x[accept]
57 done |= accept
58 return x
59
60 def log_prob(self, x):
61 return self._propose_batch_log_pdf(x) + self._log_prob_accept(x)
62
63 def score_parts(self, x):
64 score_function = self._log_prob_accept(x)
65 log_pdf = self.log_prob(x)
66 return ScoreParts(log_pdf, score_function, log_pdf)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/distributions/rejector.py b/pyro/distributions/rejector.py
--- a/pyro/distributions/rejector.py
+++ b/pyro/distributions/rejector.py
@@ -12,7 +12,8 @@
Rejection sampled distribution given an acceptance rate function.
:param Distribution propose: A proposal distribution that samples batched
- propsals via `propose()`.
+ proposals via ``propose()``. :meth:`sample` supports a ``sample_shape``
+ arg only if ``propose()`` supports a ``sample_shape`` arg.
:param callable log_prob_accept: A callable that inputs a batch of
proposals and returns a batch of log acceptance probabilities.
:param log_scale: Total log probability of acceptance.
@@ -40,15 +41,13 @@
return self._propose_batch_log_pdf_cache[1]
def sample(self, sample_shape=torch.Size()):
- if sample_shape:
- raise ValueError("Arbitrary `sample_shape` not supported by Rejector class.")
# Implements parallel batched accept-reject sampling.
- x = self.propose()
+ x = self.propose(sample_shape) if sample_shape else self.propose()
log_prob_accept = self.log_prob_accept(x)
probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
done = torch.bernoulli(probs).byte()
while not done.all():
- proposed_x = self.propose()
+ proposed_x = self.propose(sample_shape) if sample_shape else self.propose()
log_prob_accept = self.log_prob_accept(proposed_x)
prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
accept = torch.bernoulli(prob_accept).byte() & ~done
|
{"golden_diff": "diff --git a/pyro/distributions/rejector.py b/pyro/distributions/rejector.py\n--- a/pyro/distributions/rejector.py\n+++ b/pyro/distributions/rejector.py\n@@ -12,7 +12,8 @@\n Rejection sampled distribution given an acceptance rate function.\n \n :param Distribution propose: A proposal distribution that samples batched\n- propsals via `propose()`.\n+ proposals via ``propose()``. :meth:`sample` supports a ``sample_shape``\n+ arg only if ``propose()`` supports a ``sample_shape`` arg.\n :param callable log_prob_accept: A callable that inputs a batch of\n proposals and returns a batch of log acceptance probabilities.\n :param log_scale: Total log probability of acceptance.\n@@ -40,15 +41,13 @@\n return self._propose_batch_log_pdf_cache[1]\n \n def sample(self, sample_shape=torch.Size()):\n- if sample_shape:\n- raise ValueError(\"Arbitrary `sample_shape` not supported by Rejector class.\")\n # Implements parallel batched accept-reject sampling.\n- x = self.propose()\n+ x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(x)\n probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n done = torch.bernoulli(probs).byte()\n while not done.all():\n- proposed_x = self.propose()\n+ proposed_x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(proposed_x)\n prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n accept = torch.bernoulli(prob_accept).byte() & ~done\n", "issue": "Support arbitrary sample_shape in Rejector\nThis will require the `proposal` to accept an optional `sample_shape` arg and the `log_prob_accept()` method to support broadcasting.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport torch\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.score_parts import ScoreParts\nfrom pyro.distributions.util import copy_docs_from\n\n\n@copy_docs_from(Distribution)\nclass Rejector(Distribution):\n \"\"\"\n Rejection sampled distribution given an acceptance rate function.\n\n :param Distribution propose: A proposal distribution that samples batched\n propsals via `propose()`.\n :param callable log_prob_accept: A callable that inputs a batch of\n proposals and returns a batch of log acceptance probabilities.\n :param log_scale: Total log probability of acceptance.\n \"\"\"\n stateful = True\n reparameterized = True\n\n def __init__(self, propose, log_prob_accept, log_scale):\n self.propose = propose\n self.log_prob_accept = log_prob_accept\n self._log_scale = log_scale\n\n # These LRU(1) caches allow work to be shared across different method calls.\n self._log_prob_accept_cache = None, None\n self._propose_batch_log_pdf_cache = None, None\n\n def _log_prob_accept(self, x):\n if x is not self._log_prob_accept_cache[0]:\n self._log_prob_accept_cache = x, self.log_prob_accept(x) - self._log_scale\n return self._log_prob_accept_cache[1]\n\n def _propose_batch_log_pdf(self, x):\n if x is not self._propose_batch_log_pdf_cache[0]:\n self._propose_batch_log_pdf_cache = x, self.propose.log_prob(x)\n return self._propose_batch_log_pdf_cache[1]\n\n def sample(self, sample_shape=torch.Size()):\n if sample_shape:\n raise ValueError(\"Arbitrary `sample_shape` not supported by Rejector class.\")\n # Implements parallel batched accept-reject sampling.\n x = self.propose()\n log_prob_accept = self.log_prob_accept(x)\n probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n done = torch.bernoulli(probs).byte()\n while not done.all():\n proposed_x = self.propose()\n log_prob_accept = self.log_prob_accept(proposed_x)\n prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n accept = torch.bernoulli(prob_accept).byte() & ~done\n if accept.any():\n x[accept] = proposed_x[accept]\n done |= accept\n return x\n\n def log_prob(self, x):\n return self._propose_batch_log_pdf(x) + self._log_prob_accept(x)\n\n def score_parts(self, x):\n score_function = self._log_prob_accept(x)\n log_pdf = self.log_prob(x)\n return ScoreParts(log_pdf, score_function, log_pdf)\n", "path": "pyro/distributions/rejector.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport torch\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.score_parts import ScoreParts\nfrom pyro.distributions.util import copy_docs_from\n\n\n@copy_docs_from(Distribution)\nclass Rejector(Distribution):\n \"\"\"\n Rejection sampled distribution given an acceptance rate function.\n\n :param Distribution propose: A proposal distribution that samples batched\n proposals via ``propose()``. :meth:`sample` supports a ``sample_shape``\n arg only if ``propose()`` supports a ``sample_shape`` arg.\n :param callable log_prob_accept: A callable that inputs a batch of\n proposals and returns a batch of log acceptance probabilities.\n :param log_scale: Total log probability of acceptance.\n \"\"\"\n stateful = True\n reparameterized = True\n\n def __init__(self, propose, log_prob_accept, log_scale):\n self.propose = propose\n self.log_prob_accept = log_prob_accept\n self._log_scale = log_scale\n\n # These LRU(1) caches allow work to be shared across different method calls.\n self._log_prob_accept_cache = None, None\n self._propose_batch_log_pdf_cache = None, None\n\n def _log_prob_accept(self, x):\n if x is not self._log_prob_accept_cache[0]:\n self._log_prob_accept_cache = x, self.log_prob_accept(x) - self._log_scale\n return self._log_prob_accept_cache[1]\n\n def _propose_batch_log_pdf(self, x):\n if x is not self._propose_batch_log_pdf_cache[0]:\n self._propose_batch_log_pdf_cache = x, self.propose.log_prob(x)\n return self._propose_batch_log_pdf_cache[1]\n\n def sample(self, sample_shape=torch.Size()):\n # Implements parallel batched accept-reject sampling.\n x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(x)\n probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n done = torch.bernoulli(probs).byte()\n while not done.all():\n proposed_x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(proposed_x)\n prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n accept = torch.bernoulli(prob_accept).byte() & ~done\n if accept.any():\n x[accept] = proposed_x[accept]\n done |= accept\n return x\n\n def log_prob(self, x):\n return self._propose_batch_log_pdf(x) + self._log_prob_accept(x)\n\n def score_parts(self, x):\n score_function = self._log_prob_accept(x)\n log_pdf = self.log_prob(x)\n return ScoreParts(log_pdf, score_function, log_pdf)\n", "path": "pyro/distributions/rejector.py"}]}
| 1,041 | 398 |
gh_patches_debug_15112
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__PaddleSpeech-1496
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[speechx] PaddleInference ds2
- [x] linear feat
- [x] thread pool #1400
- [x] queue for wav, frames, hiddens and so on.
- [x] decodeable interface
- [x] paddle infererence
- [x] ctcdecoder online (swig_decoder)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `paddlespeech/s2t/io/sampler.py`
Content:
```
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import math
15
16 import numpy as np
17 from paddle import distributed as dist
18 from paddle.io import BatchSampler
19 from paddle.io import DistributedBatchSampler
20
21 from paddlespeech.s2t.utils.log import Log
22
23 logger = Log(__name__).getlog()
24
25 __all__ = [
26 "SortagradDistributedBatchSampler",
27 "SortagradBatchSampler",
28 ]
29
30
31 def _batch_shuffle(indices, batch_size, epoch, clipped=False):
32 """Put similarly-sized instances into minibatches for better efficiency
33 and make a batch-wise shuffle.
34
35 1. Sort the audio clips by duration.
36 2. Generate a random number `k`, k in [0, batch_size).
37 3. Randomly shift `k` instances in order to create different batches
38 for different epochs. Create minibatches.
39 4. Shuffle the minibatches.
40
41 :param indices: indexes. List of int.
42 :type indices: list
43 :param batch_size: Batch size. This size is also used for generate
44 a random number for batch shuffle.
45 :type batch_size: int
46 :param clipped: Whether to clip the heading (small shift) and trailing
47 (incomplete batch) instances.
48 :type clipped: bool
49 :return: Batch shuffled mainifest.
50 :rtype: list
51 """
52 rng = np.random.RandomState(epoch)
53 shift_len = rng.randint(0, batch_size - 1)
54 batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))
55 rng.shuffle(batch_indices)
56 batch_indices = [item for batch in batch_indices for item in batch]
57 assert clipped is False
58 if not clipped:
59 res_len = len(indices) - shift_len - len(batch_indices)
60 # when res_len is 0, will return whole list, len(List[-0:]) = len(List[:])
61 if res_len != 0:
62 batch_indices.extend(indices[-res_len:])
63 batch_indices.extend(indices[0:shift_len])
64 assert len(indices) == len(
65 batch_indices
66 ), f"_batch_shuffle: {len(indices)} : {len(batch_indices)} : {res_len} - {shift_len}"
67 return batch_indices
68
69
70 class SortagradDistributedBatchSampler(DistributedBatchSampler):
71 def __init__(self,
72 dataset,
73 batch_size,
74 num_replicas=None,
75 rank=None,
76 shuffle=False,
77 drop_last=False,
78 sortagrad=False,
79 shuffle_method="batch_shuffle"):
80 """Sortagrad Sampler for multi gpus.
81
82 Args:
83 dataset (paddle.io.Dataset):
84 batch_size (int): batch size for one gpu
85 num_replicas (int, optional): world size or numbers of gpus. Defaults to None.
86 rank (int, optional): rank id. Defaults to None.
87 shuffle (bool, optional): True for do shuffle, or else. Defaults to False.
88 drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.
89 sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.
90 shuffle_method (str, optional): shuffle method, "instance_shuffle" or "batch_shuffle". Defaults to "batch_shuffle".
91 """
92 super().__init__(dataset, batch_size, num_replicas, rank, shuffle,
93 drop_last)
94 self._sortagrad = sortagrad
95 self._shuffle_method = shuffle_method
96
97 def __iter__(self):
98 num_samples = len(self.dataset)
99 indices = np.arange(num_samples).tolist()
100 indices += indices[:(self.total_size - len(indices))]
101 assert len(indices) == self.total_size
102
103 # sort (by duration) or batch-wise shuffle the manifest
104 if self.shuffle:
105 if self.epoch == 0 and self._sortagrad:
106 logger.info(
107 f'rank: {dist.get_rank()} dataset sortagrad! epoch {self.epoch}'
108 )
109 else:
110 logger.info(
111 f'rank: {dist.get_rank()} dataset shuffle! epoch {self.epoch}'
112 )
113 if self._shuffle_method == "batch_shuffle":
114 # using `batch_size * nrank`, or will cause instability loss and nan or inf grad,
115 # since diff batch examlpe length in batches case instability loss in diff rank,
116 # e.g. rank0 maxlength 20, rank3 maxlength 1000
117 indices = _batch_shuffle(
118 indices,
119 self.batch_size * self.nranks,
120 self.epoch,
121 clipped=False)
122 elif self._shuffle_method == "instance_shuffle":
123 np.random.RandomState(self.epoch).shuffle(indices)
124 else:
125 raise ValueError("Unknown shuffle method %s." %
126 self._shuffle_method)
127 assert len(
128 indices
129 ) == self.total_size, f"batch shuffle examples error: {len(indices)} : {self.total_size}"
130
131 # slice `self.batch_size` examples by rank id
132 def _get_indices_by_batch_size(indices):
133 subsampled_indices = []
134 last_batch_size = self.total_size % (self.batch_size * self.nranks)
135 assert last_batch_size % self.nranks == 0
136 last_local_batch_size = last_batch_size // self.nranks
137
138 for i in range(self.local_rank * self.batch_size,
139 len(indices) - last_batch_size,
140 self.batch_size * self.nranks):
141 subsampled_indices.extend(indices[i:i + self.batch_size])
142
143 indices = indices[len(indices) - last_batch_size:]
144 subsampled_indices.extend(
145 indices[self.local_rank * last_local_batch_size:(
146 self.local_rank + 1) * last_local_batch_size])
147 return subsampled_indices
148
149 if self.nranks > 1:
150 indices = _get_indices_by_batch_size(indices)
151
152 assert len(indices) == self.num_samples
153 _sample_iter = iter(indices)
154
155 batch_indices = []
156 for idx in _sample_iter:
157 batch_indices.append(idx)
158 if len(batch_indices) == self.batch_size:
159 logger.debug(
160 f"rank: {dist.get_rank()} batch index: {batch_indices} ")
161 yield batch_indices
162 batch_indices = []
163 if not self.drop_last and len(batch_indices) > 0:
164 yield batch_indices
165
166 def __len__(self):
167 num_samples = self.num_samples
168 num_samples += int(not self.drop_last) * (self.batch_size - 1)
169 return num_samples // self.batch_size
170
171
172 class SortagradBatchSampler(BatchSampler):
173 def __init__(self,
174 dataset,
175 batch_size,
176 shuffle=False,
177 drop_last=False,
178 sortagrad=False,
179 shuffle_method="batch_shuffle"):
180 """Sortagrad Sampler for one gpu.
181
182 Args:
183 dataset (paddle.io.Dataset):
184 batch_size (int): batch size for one gpu
185 shuffle (bool, optional): True for do shuffle, or else. Defaults to False.
186 drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.
187 sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.
188 shuffle_method (str, optional): shuffle method, "instance_shuffle" or "batch_shuffle". Defaults to "batch_shuffle".
189 """
190 self.dataset = dataset
191
192 assert isinstance(batch_size, int) and batch_size > 0, \
193 "batch_size should be a positive integer"
194 self.batch_size = batch_size
195 assert isinstance(shuffle, bool), \
196 "shuffle should be a boolean value"
197 self.shuffle = shuffle
198 assert isinstance(drop_last, bool), \
199 "drop_last should be a boolean number"
200
201 self.drop_last = drop_last
202 self.epoch = 0
203 self.num_samples = int(math.ceil(len(self.dataset) * 1.0))
204 self.total_size = self.num_samples
205 self._sortagrad = sortagrad
206 self._shuffle_method = shuffle_method
207
208 def __iter__(self):
209 num_samples = len(self.dataset)
210 indices = np.arange(num_samples).tolist()
211 indices += indices[:(self.total_size - len(indices))]
212 assert len(indices) == self.total_size
213
214 # sort (by duration) or batch-wise shuffle the manifest
215 if self.shuffle:
216 if self.epoch == 0 and self._sortagrad:
217 logger.info(f'dataset sortagrad! epoch {self.epoch}')
218 else:
219 logger.info(f'dataset shuffle! epoch {self.epoch}')
220 if self._shuffle_method == "batch_shuffle":
221 indices = _batch_shuffle(
222 indices, self.batch_size, self.epoch, clipped=False)
223 elif self._shuffle_method == "instance_shuffle":
224 np.random.RandomState(self.epoch).shuffle(indices)
225 else:
226 raise ValueError("Unknown shuffle method %s." %
227 self._shuffle_method)
228 assert len(
229 indices
230 ) == self.total_size, f"batch shuffle examples error: {len(indices)} : {self.total_size}"
231
232 assert len(indices) == self.num_samples
233 _sample_iter = iter(indices)
234
235 batch_indices = []
236 for idx in _sample_iter:
237 batch_indices.append(idx)
238 if len(batch_indices) == self.batch_size:
239 logger.debug(
240 f"rank: {dist.get_rank()} batch index: {batch_indices} ")
241 yield batch_indices
242 batch_indices = []
243 if not self.drop_last and len(batch_indices) > 0:
244 yield batch_indices
245
246 self.epoch += 1
247
248 def __len__(self):
249 num_samples = self.num_samples
250 num_samples += int(not self.drop_last) * (self.batch_size - 1)
251 return num_samples // self.batch_size
252
```
Path: `paddlespeech/t2s/modules/transformer/repeat.py`
Content:
```
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # Modified from espnet(https://github.com/espnet/espnet)
15 """Repeat the same layer definition."""
16 import paddle
17
18
19 class MultiSequential(paddle.nn.Sequential):
20 """Multi-input multi-output paddle.nn.Sequential."""
21
22 def forward(self, *args):
23 """Repeat."""
24 for m in self:
25 args = m(*args)
26 return args
27
28
29 def repeat(N, fn):
30 """Repeat module N times.
31
32 Args:
33 N (int): Number of repeat time.
34 fn (Callable): Function to generate module.
35
36 Returns:
37 MultiSequential: Repeated model instance.
38 """
39 return MultiSequential(*[fn(n) for n in range(N)])
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/paddlespeech/s2t/io/sampler.py b/paddlespeech/s2t/io/sampler.py
--- a/paddlespeech/s2t/io/sampler.py
+++ b/paddlespeech/s2t/io/sampler.py
@@ -51,7 +51,7 @@
"""
rng = np.random.RandomState(epoch)
shift_len = rng.randint(0, batch_size - 1)
- batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))
+ batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size))
rng.shuffle(batch_indices)
batch_indices = [item for batch in batch_indices for item in batch]
assert clipped is False
diff --git a/paddlespeech/t2s/modules/transformer/repeat.py b/paddlespeech/t2s/modules/transformer/repeat.py
--- a/paddlespeech/t2s/modules/transformer/repeat.py
+++ b/paddlespeech/t2s/modules/transformer/repeat.py
@@ -36,4 +36,4 @@
Returns:
MultiSequential: Repeated model instance.
"""
- return MultiSequential(*[fn(n) for n in range(N)])
+ return MultiSequential(* [fn(n) for n in range(N)])
|
{"golden_diff": "diff --git a/paddlespeech/s2t/io/sampler.py b/paddlespeech/s2t/io/sampler.py\n--- a/paddlespeech/s2t/io/sampler.py\n+++ b/paddlespeech/s2t/io/sampler.py\n@@ -51,7 +51,7 @@\n \"\"\"\n rng = np.random.RandomState(epoch)\n shift_len = rng.randint(0, batch_size - 1)\n- batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))\n+ batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size))\n rng.shuffle(batch_indices)\n batch_indices = [item for batch in batch_indices for item in batch]\n assert clipped is False\ndiff --git a/paddlespeech/t2s/modules/transformer/repeat.py b/paddlespeech/t2s/modules/transformer/repeat.py\n--- a/paddlespeech/t2s/modules/transformer/repeat.py\n+++ b/paddlespeech/t2s/modules/transformer/repeat.py\n@@ -36,4 +36,4 @@\n Returns:\n MultiSequential: Repeated model instance.\n \"\"\"\n- return MultiSequential(*[fn(n) for n in range(N)])\n+ return MultiSequential(* [fn(n) for n in range(N)])\n", "issue": "[speechx] PaddleInference ds2\n- [x] linear feat \r\n- [x] thread pool #1400 \r\n- [x] queue for wav, frames, hiddens and so on.\r\n- [x] decodeable interface\r\n- [x] paddle infererence\r\n- [x] ctcdecoder online (swig_decoder)\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\n\nimport numpy as np\nfrom paddle import distributed as dist\nfrom paddle.io import BatchSampler\nfrom paddle.io import DistributedBatchSampler\n\nfrom paddlespeech.s2t.utils.log import Log\n\nlogger = Log(__name__).getlog()\n\n__all__ = [\n \"SortagradDistributedBatchSampler\",\n \"SortagradBatchSampler\",\n]\n\n\ndef _batch_shuffle(indices, batch_size, epoch, clipped=False):\n \"\"\"Put similarly-sized instances into minibatches for better efficiency\n and make a batch-wise shuffle.\n\n 1. Sort the audio clips by duration.\n 2. Generate a random number `k`, k in [0, batch_size).\n 3. Randomly shift `k` instances in order to create different batches\n for different epochs. Create minibatches.\n 4. Shuffle the minibatches.\n\n :param indices: indexes. List of int.\n :type indices: list\n :param batch_size: Batch size. This size is also used for generate\n a random number for batch shuffle.\n :type batch_size: int\n :param clipped: Whether to clip the heading (small shift) and trailing\n (incomplete batch) instances.\n :type clipped: bool\n :return: Batch shuffled mainifest.\n :rtype: list\n \"\"\"\n rng = np.random.RandomState(epoch)\n shift_len = rng.randint(0, batch_size - 1)\n batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))\n rng.shuffle(batch_indices)\n batch_indices = [item for batch in batch_indices for item in batch]\n assert clipped is False\n if not clipped:\n res_len = len(indices) - shift_len - len(batch_indices)\n # when res_len is 0, will return whole list, len(List[-0:]) = len(List[:])\n if res_len != 0:\n batch_indices.extend(indices[-res_len:])\n batch_indices.extend(indices[0:shift_len])\n assert len(indices) == len(\n batch_indices\n ), f\"_batch_shuffle: {len(indices)} : {len(batch_indices)} : {res_len} - {shift_len}\"\n return batch_indices\n\n\nclass SortagradDistributedBatchSampler(DistributedBatchSampler):\n def __init__(self,\n dataset,\n batch_size,\n num_replicas=None,\n rank=None,\n shuffle=False,\n drop_last=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"Sortagrad Sampler for multi gpus.\n\n Args:\n dataset (paddle.io.Dataset): \n batch_size (int): batch size for one gpu\n num_replicas (int, optional): world size or numbers of gpus. Defaults to None.\n rank (int, optional): rank id. Defaults to None.\n shuffle (bool, optional): True for do shuffle, or else. Defaults to False.\n drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.\n sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.\n shuffle_method (str, optional): shuffle method, \"instance_shuffle\" or \"batch_shuffle\". Defaults to \"batch_shuffle\".\n \"\"\"\n super().__init__(dataset, batch_size, num_replicas, rank, shuffle,\n drop_last)\n self._sortagrad = sortagrad\n self._shuffle_method = shuffle_method\n\n def __iter__(self):\n num_samples = len(self.dataset)\n indices = np.arange(num_samples).tolist()\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # sort (by duration) or batch-wise shuffle the manifest\n if self.shuffle:\n if self.epoch == 0 and self._sortagrad:\n logger.info(\n f'rank: {dist.get_rank()} dataset sortagrad! epoch {self.epoch}'\n )\n else:\n logger.info(\n f'rank: {dist.get_rank()} dataset shuffle! epoch {self.epoch}'\n )\n if self._shuffle_method == \"batch_shuffle\":\n # using `batch_size * nrank`, or will cause instability loss and nan or inf grad, \n # since diff batch examlpe length in batches case instability loss in diff rank, \n # e.g. rank0 maxlength 20, rank3 maxlength 1000\n indices = _batch_shuffle(\n indices,\n self.batch_size * self.nranks,\n self.epoch,\n clipped=False)\n elif self._shuffle_method == \"instance_shuffle\":\n np.random.RandomState(self.epoch).shuffle(indices)\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n self._shuffle_method)\n assert len(\n indices\n ) == self.total_size, f\"batch shuffle examples error: {len(indices)} : {self.total_size}\"\n\n # slice `self.batch_size` examples by rank id\n def _get_indices_by_batch_size(indices):\n subsampled_indices = []\n last_batch_size = self.total_size % (self.batch_size * self.nranks)\n assert last_batch_size % self.nranks == 0\n last_local_batch_size = last_batch_size // self.nranks\n\n for i in range(self.local_rank * self.batch_size,\n len(indices) - last_batch_size,\n self.batch_size * self.nranks):\n subsampled_indices.extend(indices[i:i + self.batch_size])\n\n indices = indices[len(indices) - last_batch_size:]\n subsampled_indices.extend(\n indices[self.local_rank * last_local_batch_size:(\n self.local_rank + 1) * last_local_batch_size])\n return subsampled_indices\n\n if self.nranks > 1:\n indices = _get_indices_by_batch_size(indices)\n\n assert len(indices) == self.num_samples\n _sample_iter = iter(indices)\n\n batch_indices = []\n for idx in _sample_iter:\n batch_indices.append(idx)\n if len(batch_indices) == self.batch_size:\n logger.debug(\n f\"rank: {dist.get_rank()} batch index: {batch_indices} \")\n yield batch_indices\n batch_indices = []\n if not self.drop_last and len(batch_indices) > 0:\n yield batch_indices\n\n def __len__(self):\n num_samples = self.num_samples\n num_samples += int(not self.drop_last) * (self.batch_size - 1)\n return num_samples // self.batch_size\n\n\nclass SortagradBatchSampler(BatchSampler):\n def __init__(self,\n dataset,\n batch_size,\n shuffle=False,\n drop_last=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"Sortagrad Sampler for one gpu.\n\n Args:\n dataset (paddle.io.Dataset): \n batch_size (int): batch size for one gpu\n shuffle (bool, optional): True for do shuffle, or else. Defaults to False.\n drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.\n sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.\n shuffle_method (str, optional): shuffle method, \"instance_shuffle\" or \"batch_shuffle\". Defaults to \"batch_shuffle\".\n \"\"\"\n self.dataset = dataset\n\n assert isinstance(batch_size, int) and batch_size > 0, \\\n \"batch_size should be a positive integer\"\n self.batch_size = batch_size\n assert isinstance(shuffle, bool), \\\n \"shuffle should be a boolean value\"\n self.shuffle = shuffle\n assert isinstance(drop_last, bool), \\\n \"drop_last should be a boolean number\"\n\n self.drop_last = drop_last\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0))\n self.total_size = self.num_samples\n self._sortagrad = sortagrad\n self._shuffle_method = shuffle_method\n\n def __iter__(self):\n num_samples = len(self.dataset)\n indices = np.arange(num_samples).tolist()\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # sort (by duration) or batch-wise shuffle the manifest\n if self.shuffle:\n if self.epoch == 0 and self._sortagrad:\n logger.info(f'dataset sortagrad! epoch {self.epoch}')\n else:\n logger.info(f'dataset shuffle! epoch {self.epoch}')\n if self._shuffle_method == \"batch_shuffle\":\n indices = _batch_shuffle(\n indices, self.batch_size, self.epoch, clipped=False)\n elif self._shuffle_method == \"instance_shuffle\":\n np.random.RandomState(self.epoch).shuffle(indices)\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n self._shuffle_method)\n assert len(\n indices\n ) == self.total_size, f\"batch shuffle examples error: {len(indices)} : {self.total_size}\"\n\n assert len(indices) == self.num_samples\n _sample_iter = iter(indices)\n\n batch_indices = []\n for idx in _sample_iter:\n batch_indices.append(idx)\n if len(batch_indices) == self.batch_size:\n logger.debug(\n f\"rank: {dist.get_rank()} batch index: {batch_indices} \")\n yield batch_indices\n batch_indices = []\n if not self.drop_last and len(batch_indices) > 0:\n yield batch_indices\n\n self.epoch += 1\n\n def __len__(self):\n num_samples = self.num_samples\n num_samples += int(not self.drop_last) * (self.batch_size - 1)\n return num_samples // self.batch_size\n", "path": "paddlespeech/s2t/io/sampler.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Modified from espnet(https://github.com/espnet/espnet)\n\"\"\"Repeat the same layer definition.\"\"\"\nimport paddle\n\n\nclass MultiSequential(paddle.nn.Sequential):\n \"\"\"Multi-input multi-output paddle.nn.Sequential.\"\"\"\n\n def forward(self, *args):\n \"\"\"Repeat.\"\"\"\n for m in self:\n args = m(*args)\n return args\n\n\ndef repeat(N, fn):\n \"\"\"Repeat module N times.\n\n Args:\n N (int): Number of repeat time.\n fn (Callable): Function to generate module.\n\n Returns:\n MultiSequential: Repeated model instance.\n \"\"\"\n return MultiSequential(*[fn(n) for n in range(N)])\n", "path": "paddlespeech/t2s/modules/transformer/repeat.py"}], "after_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\n\nimport numpy as np\nfrom paddle import distributed as dist\nfrom paddle.io import BatchSampler\nfrom paddle.io import DistributedBatchSampler\n\nfrom paddlespeech.s2t.utils.log import Log\n\nlogger = Log(__name__).getlog()\n\n__all__ = [\n \"SortagradDistributedBatchSampler\",\n \"SortagradBatchSampler\",\n]\n\n\ndef _batch_shuffle(indices, batch_size, epoch, clipped=False):\n \"\"\"Put similarly-sized instances into minibatches for better efficiency\n and make a batch-wise shuffle.\n\n 1. Sort the audio clips by duration.\n 2. Generate a random number `k`, k in [0, batch_size).\n 3. Randomly shift `k` instances in order to create different batches\n for different epochs. Create minibatches.\n 4. Shuffle the minibatches.\n\n :param indices: indexes. List of int.\n :type indices: list\n :param batch_size: Batch size. This size is also used for generate\n a random number for batch shuffle.\n :type batch_size: int\n :param clipped: Whether to clip the heading (small shift) and trailing\n (incomplete batch) instances.\n :type clipped: bool\n :return: Batch shuffled mainifest.\n :rtype: list\n \"\"\"\n rng = np.random.RandomState(epoch)\n shift_len = rng.randint(0, batch_size - 1)\n batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size))\n rng.shuffle(batch_indices)\n batch_indices = [item for batch in batch_indices for item in batch]\n assert clipped is False\n if not clipped:\n res_len = len(indices) - shift_len - len(batch_indices)\n # when res_len is 0, will return whole list, len(List[-0:]) = len(List[:])\n if res_len != 0:\n batch_indices.extend(indices[-res_len:])\n batch_indices.extend(indices[0:shift_len])\n assert len(indices) == len(\n batch_indices\n ), f\"_batch_shuffle: {len(indices)} : {len(batch_indices)} : {res_len} - {shift_len}\"\n return batch_indices\n\n\nclass SortagradDistributedBatchSampler(DistributedBatchSampler):\n def __init__(self,\n dataset,\n batch_size,\n num_replicas=None,\n rank=None,\n shuffle=False,\n drop_last=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"Sortagrad Sampler for multi gpus.\n\n Args:\n dataset (paddle.io.Dataset): \n batch_size (int): batch size for one gpu\n num_replicas (int, optional): world size or numbers of gpus. Defaults to None.\n rank (int, optional): rank id. Defaults to None.\n shuffle (bool, optional): True for do shuffle, or else. Defaults to False.\n drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.\n sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.\n shuffle_method (str, optional): shuffle method, \"instance_shuffle\" or \"batch_shuffle\". Defaults to \"batch_shuffle\".\n \"\"\"\n super().__init__(dataset, batch_size, num_replicas, rank, shuffle,\n drop_last)\n self._sortagrad = sortagrad\n self._shuffle_method = shuffle_method\n\n def __iter__(self):\n num_samples = len(self.dataset)\n indices = np.arange(num_samples).tolist()\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # sort (by duration) or batch-wise shuffle the manifest\n if self.shuffle:\n if self.epoch == 0 and self._sortagrad:\n logger.info(\n f'rank: {dist.get_rank()} dataset sortagrad! epoch {self.epoch}'\n )\n else:\n logger.info(\n f'rank: {dist.get_rank()} dataset shuffle! epoch {self.epoch}'\n )\n if self._shuffle_method == \"batch_shuffle\":\n # using `batch_size * nrank`, or will cause instability loss and nan or inf grad, \n # since diff batch examlpe length in batches case instability loss in diff rank, \n # e.g. rank0 maxlength 20, rank3 maxlength 1000\n indices = _batch_shuffle(\n indices,\n self.batch_size * self.nranks,\n self.epoch,\n clipped=False)\n elif self._shuffle_method == \"instance_shuffle\":\n np.random.RandomState(self.epoch).shuffle(indices)\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n self._shuffle_method)\n assert len(\n indices\n ) == self.total_size, f\"batch shuffle examples error: {len(indices)} : {self.total_size}\"\n\n # slice `self.batch_size` examples by rank id\n def _get_indices_by_batch_size(indices):\n subsampled_indices = []\n last_batch_size = self.total_size % (self.batch_size * self.nranks)\n assert last_batch_size % self.nranks == 0\n last_local_batch_size = last_batch_size // self.nranks\n\n for i in range(self.local_rank * self.batch_size,\n len(indices) - last_batch_size,\n self.batch_size * self.nranks):\n subsampled_indices.extend(indices[i:i + self.batch_size])\n\n indices = indices[len(indices) - last_batch_size:]\n subsampled_indices.extend(\n indices[self.local_rank * last_local_batch_size:(\n self.local_rank + 1) * last_local_batch_size])\n return subsampled_indices\n\n if self.nranks > 1:\n indices = _get_indices_by_batch_size(indices)\n\n assert len(indices) == self.num_samples\n _sample_iter = iter(indices)\n\n batch_indices = []\n for idx in _sample_iter:\n batch_indices.append(idx)\n if len(batch_indices) == self.batch_size:\n logger.debug(\n f\"rank: {dist.get_rank()} batch index: {batch_indices} \")\n yield batch_indices\n batch_indices = []\n if not self.drop_last and len(batch_indices) > 0:\n yield batch_indices\n\n def __len__(self):\n num_samples = self.num_samples\n num_samples += int(not self.drop_last) * (self.batch_size - 1)\n return num_samples // self.batch_size\n\n\nclass SortagradBatchSampler(BatchSampler):\n def __init__(self,\n dataset,\n batch_size,\n shuffle=False,\n drop_last=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"Sortagrad Sampler for one gpu.\n\n Args:\n dataset (paddle.io.Dataset): \n batch_size (int): batch size for one gpu\n shuffle (bool, optional): True for do shuffle, or else. Defaults to False.\n drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.\n sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.\n shuffle_method (str, optional): shuffle method, \"instance_shuffle\" or \"batch_shuffle\". Defaults to \"batch_shuffle\".\n \"\"\"\n self.dataset = dataset\n\n assert isinstance(batch_size, int) and batch_size > 0, \\\n \"batch_size should be a positive integer\"\n self.batch_size = batch_size\n assert isinstance(shuffle, bool), \\\n \"shuffle should be a boolean value\"\n self.shuffle = shuffle\n assert isinstance(drop_last, bool), \\\n \"drop_last should be a boolean number\"\n\n self.drop_last = drop_last\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0))\n self.total_size = self.num_samples\n self._sortagrad = sortagrad\n self._shuffle_method = shuffle_method\n\n def __iter__(self):\n num_samples = len(self.dataset)\n indices = np.arange(num_samples).tolist()\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # sort (by duration) or batch-wise shuffle the manifest\n if self.shuffle:\n if self.epoch == 0 and self._sortagrad:\n logger.info(f'dataset sortagrad! epoch {self.epoch}')\n else:\n logger.info(f'dataset shuffle! epoch {self.epoch}')\n if self._shuffle_method == \"batch_shuffle\":\n indices = _batch_shuffle(\n indices, self.batch_size, self.epoch, clipped=False)\n elif self._shuffle_method == \"instance_shuffle\":\n np.random.RandomState(self.epoch).shuffle(indices)\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n self._shuffle_method)\n assert len(\n indices\n ) == self.total_size, f\"batch shuffle examples error: {len(indices)} : {self.total_size}\"\n\n assert len(indices) == self.num_samples\n _sample_iter = iter(indices)\n\n batch_indices = []\n for idx in _sample_iter:\n batch_indices.append(idx)\n if len(batch_indices) == self.batch_size:\n logger.debug(\n f\"rank: {dist.get_rank()} batch index: {batch_indices} \")\n yield batch_indices\n batch_indices = []\n if not self.drop_last and len(batch_indices) > 0:\n yield batch_indices\n\n self.epoch += 1\n\n def __len__(self):\n num_samples = self.num_samples\n num_samples += int(not self.drop_last) * (self.batch_size - 1)\n return num_samples // self.batch_size\n", "path": "paddlespeech/s2t/io/sampler.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Modified from espnet(https://github.com/espnet/espnet)\n\"\"\"Repeat the same layer definition.\"\"\"\nimport paddle\n\n\nclass MultiSequential(paddle.nn.Sequential):\n \"\"\"Multi-input multi-output paddle.nn.Sequential.\"\"\"\n\n def forward(self, *args):\n \"\"\"Repeat.\"\"\"\n for m in self:\n args = m(*args)\n return args\n\n\ndef repeat(N, fn):\n \"\"\"Repeat module N times.\n\n Args:\n N (int): Number of repeat time.\n fn (Callable): Function to generate module.\n\n Returns:\n MultiSequential: Repeated model instance.\n \"\"\"\n return MultiSequential(* [fn(n) for n in range(N)])\n", "path": "paddlespeech/t2s/modules/transformer/repeat.py"}]}
| 3,607 | 288 |
gh_patches_debug_15538
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-438
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
show_predict_batch fails due to no targets being available
## 🐛 Bug
When calling the `show_predict_batch` method for image classification data an error is thrown due to not targets being available.
```python
for i, ax in enumerate(axs.ravel()):
# unpack images and labels
if isinstance(data, list):
_img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]
elif isinstance(data, dict):
_img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]
else:
raise TypeError(f"Unknown data type. Got: {type(data)}.")
# convert images to numpy
_img: np.ndarray = self._to_numpy(_img)
if isinstance(_label, torch.Tensor):
_label = _label.squeeze().tolist()
# show image and set label as subplot title
ax.imshow(_img)
ax.set_title(str(_label))
ax.axis('off')
```
The fix should be simple:
```python
for i, ax in enumerate(axs.ravel()):
# unpack images and labels
if isinstance(data, list):
# use the get method to return an empty string if no targets are available
_img, _label = data[i][DefaultDataKeys.INPUT], data[i].get([DefaultDataKeys.TARGET], "")
elif isinstance(data, dict):
# use the get method to return a list that contains an empty string if no targets are available
_img, _label = data[DefaultDataKeys.INPUT][i], data.get([DefaultDataKeys.TARGET], [""])[i]
else:
raise TypeError(f"Unknown data type. Got: {type(data)}.")
# convert images to numpy
_img: np.ndarray = self._to_numpy(_img)
if isinstance(_label, torch.Tensor):
_label = _label.squeeze().tolist()
# show image and set label as subplot title
ax.imshow(_img)
ax.set_title(str(_label))
ax.axis('off')
```
I can create a PR later, when I have time.
### To Reproduce
Just have flash installed.
#### Code sample
```python
from flash.core.data.utils import download_data
from flash.image import ImageClassificationData
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/")
datamodule = ImageClassificationData.from_folders(
train_folder="data/hymenoptera_data/train/",
val_folder="data/hymenoptera_data/val/",
test_folder="data/hymenoptera_data/test/",
predict_folder="data/hymenoptera_data/predict/"
)
datamodule.show_predict_batch()
```
This will give the following error message:
```python
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-4-ff8f29471c71> in <module>
----> 1 datamodule.show_predict_batch()
~/anaconda3/lib/python3.8/site-packages/flash/core/data/data_module.py in show_predict_batch(self, hooks_names, reset)
225 """This function is used to visualize a batch from the predict dataloader."""
226 stage_name: str = _STAGES_PREFIX[RunningStage.PREDICTING]
--> 227 self._show_batch(stage_name, hooks_names, reset=reset)
228
229 @staticmethod
~/anaconda3/lib/python3.8/site-packages/flash/core/data/data_module.py in _show_batch(self, stage, func_names, reset)
203 _ = next(iter_dataloader)
204 data_fetcher: BaseVisualization = self.data_fetcher
--> 205 data_fetcher._show(stage, func_names)
206 if reset:
207 self.data_fetcher.batches[stage] = {}
~/anaconda3/lib/python3.8/site-packages/flash/core/data/base_viz.py in _show(self, running_stage, func_names_list)
110
111 def _show(self, running_stage: RunningStage, func_names_list: List[str]) -> None:
--> 112 self.show(self.batches[running_stage], running_stage, func_names_list)
113
114 def show(self, batch: Dict[str, Any], running_stage: RunningStage, func_names_list: List[str]) -> None:
~/anaconda3/lib/python3.8/site-packages/flash/core/data/base_viz.py in show(self, batch, running_stage, func_names_list)
124 hook_name = f"show_{func_name}"
125 if _is_overriden(hook_name, self, BaseVisualization):
--> 126 getattr(self, hook_name)(batch[func_name], running_stage)
127
128 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):
~/anaconda3/lib/python3.8/site-packages/flash/image/classification/data.py in show_load_sample(self, samples, running_stage)
144 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):
145 win_title: str = f"{running_stage} - show_load_sample"
--> 146 self._show_images_and_labels(samples, len(samples), win_title)
147
148 def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
~/anaconda3/lib/python3.8/site-packages/flash/image/classification/data.py in _show_images_and_labels(self, data, num_samples, title)
127 # unpack images and labels
128 if isinstance(data, list):
--> 129 _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]
130 elif isinstance(data, dict):
131 _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]
KeyError: <DefaultDataKeys.TARGET: 'target'>
```
### Expected behavior
The batch should be shown without labels.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/image/classification/data.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Callable, Dict, List, Optional, Tuple, Union
15
16 import numpy as np
17 import torch
18 from pytorch_lightning.trainer.states import RunningStage
19
20 from flash.core.data.base_viz import BaseVisualization # for viz
21 from flash.core.data.callback import BaseDataFetcher
22 from flash.core.data.data_module import DataModule
23 from flash.core.data.data_source import DefaultDataKeys, DefaultDataSources
24 from flash.core.data.process import Deserializer, Preprocess
25 from flash.core.utilities.imports import _MATPLOTLIB_AVAILABLE, _PIL_AVAILABLE, _requires_extras
26 from flash.image.classification.transforms import default_transforms, train_default_transforms
27 from flash.image.data import (
28 ImageDeserializer,
29 ImageFiftyOneDataSource,
30 ImageNumpyDataSource,
31 ImagePathsDataSource,
32 ImageTensorDataSource,
33 )
34
35 if _MATPLOTLIB_AVAILABLE:
36 import matplotlib.pyplot as plt
37 else:
38 plt = None
39
40 if _PIL_AVAILABLE:
41 from PIL import Image
42 else:
43
44 class Image:
45 Image = None
46
47
48 class ImageClassificationPreprocess(Preprocess):
49
50 def __init__(
51 self,
52 train_transform: Optional[Dict[str, Callable]] = None,
53 val_transform: Optional[Dict[str, Callable]] = None,
54 test_transform: Optional[Dict[str, Callable]] = None,
55 predict_transform: Optional[Dict[str, Callable]] = None,
56 image_size: Tuple[int, int] = (196, 196),
57 deserializer: Optional[Deserializer] = None,
58 **data_source_kwargs: Any,
59 ):
60 self.image_size = image_size
61
62 super().__init__(
63 train_transform=train_transform,
64 val_transform=val_transform,
65 test_transform=test_transform,
66 predict_transform=predict_transform,
67 data_sources={
68 DefaultDataSources.FIFTYONE: ImageFiftyOneDataSource(**data_source_kwargs),
69 DefaultDataSources.FILES: ImagePathsDataSource(),
70 DefaultDataSources.FOLDERS: ImagePathsDataSource(),
71 DefaultDataSources.NUMPY: ImageNumpyDataSource(),
72 DefaultDataSources.TENSORS: ImageTensorDataSource(),
73 },
74 deserializer=deserializer or ImageDeserializer(),
75 default_data_source=DefaultDataSources.FILES,
76 )
77
78 def get_state_dict(self) -> Dict[str, Any]:
79 return {**self.transforms, "image_size": self.image_size}
80
81 @classmethod
82 def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool = False):
83 return cls(**state_dict)
84
85 def default_transforms(self) -> Optional[Dict[str, Callable]]:
86 return default_transforms(self.image_size)
87
88 def train_default_transforms(self) -> Optional[Dict[str, Callable]]:
89 return train_default_transforms(self.image_size)
90
91
92 class ImageClassificationData(DataModule):
93 """Data module for image classification tasks."""
94
95 preprocess_cls = ImageClassificationPreprocess
96
97 def set_block_viz_window(self, value: bool) -> None:
98 """Setter method to switch on/off matplotlib to pop up windows."""
99 self.data_fetcher.block_viz_window = value
100
101 @staticmethod
102 def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:
103 return MatplotlibVisualization(*args, **kwargs)
104
105
106 class MatplotlibVisualization(BaseVisualization):
107 """Process and show the image batch and its associated label using matplotlib.
108 """
109 max_cols: int = 4 # maximum number of columns we accept
110 block_viz_window: bool = True # parameter to allow user to block visualisation windows
111
112 @staticmethod
113 @_requires_extras("image")
114 def _to_numpy(img: Union[torch.Tensor, Image.Image]) -> np.ndarray:
115 out: np.ndarray
116 if isinstance(img, Image.Image):
117 out = np.array(img)
118 elif isinstance(img, torch.Tensor):
119 out = img.squeeze(0).permute(1, 2, 0).cpu().numpy()
120 else:
121 raise TypeError(f"Unknown image type. Got: {type(img)}.")
122 return out
123
124 @_requires_extras("image")
125 def _show_images_and_labels(self, data: List[Any], num_samples: int, title: str):
126 # define the image grid
127 cols: int = min(num_samples, self.max_cols)
128 rows: int = num_samples // cols
129
130 # create figure and set title
131 fig, axs = plt.subplots(rows, cols)
132 fig.suptitle(title)
133
134 for i, ax in enumerate(axs.ravel()):
135 # unpack images and labels
136 if isinstance(data, list):
137 _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]
138 elif isinstance(data, dict):
139 _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]
140 else:
141 raise TypeError(f"Unknown data type. Got: {type(data)}.")
142 # convert images to numpy
143 _img: np.ndarray = self._to_numpy(_img)
144 if isinstance(_label, torch.Tensor):
145 _label = _label.squeeze().tolist()
146 # show image and set label as subplot title
147 ax.imshow(_img)
148 ax.set_title(str(_label))
149 ax.axis('off')
150 plt.show(block=self.block_viz_window)
151
152 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):
153 win_title: str = f"{running_stage} - show_load_sample"
154 self._show_images_and_labels(samples, len(samples), win_title)
155
156 def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
157 win_title: str = f"{running_stage} - show_pre_tensor_transform"
158 self._show_images_and_labels(samples, len(samples), win_title)
159
160 def show_to_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
161 win_title: str = f"{running_stage} - show_to_tensor_transform"
162 self._show_images_and_labels(samples, len(samples), win_title)
163
164 def show_post_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
165 win_title: str = f"{running_stage} - show_post_tensor_transform"
166 self._show_images_and_labels(samples, len(samples), win_title)
167
168 def show_per_batch_transform(self, batch: List[Any], running_stage):
169 win_title: str = f"{running_stage} - show_per_batch_transform"
170 self._show_images_and_labels(batch[0], batch[0][DefaultDataKeys.INPUT].shape[0], win_title)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flash/image/classification/data.py b/flash/image/classification/data.py
--- a/flash/image/classification/data.py
+++ b/flash/image/classification/data.py
@@ -134,9 +134,9 @@
for i, ax in enumerate(axs.ravel()):
# unpack images and labels
if isinstance(data, list):
- _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]
+ _img, _label = data[i][DefaultDataKeys.INPUT], data[i].get(DefaultDataKeys.TARGET, "")
elif isinstance(data, dict):
- _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]
+ _img, _label = data[DefaultDataKeys.INPUT][i], data.get(DefaultDataKeys.TARGET, [""] * (i + 1))[i]
else:
raise TypeError(f"Unknown data type. Got: {type(data)}.")
# convert images to numpy
|
{"golden_diff": "diff --git a/flash/image/classification/data.py b/flash/image/classification/data.py\n--- a/flash/image/classification/data.py\n+++ b/flash/image/classification/data.py\n@@ -134,9 +134,9 @@\n for i, ax in enumerate(axs.ravel()):\n # unpack images and labels\n if isinstance(data, list):\n- _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]\n+ _img, _label = data[i][DefaultDataKeys.INPUT], data[i].get(DefaultDataKeys.TARGET, \"\")\n elif isinstance(data, dict):\n- _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]\n+ _img, _label = data[DefaultDataKeys.INPUT][i], data.get(DefaultDataKeys.TARGET, [\"\"] * (i + 1))[i]\n else:\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\n # convert images to numpy\n", "issue": "show_predict_batch fails due to no targets being available\n## \ud83d\udc1b Bug\r\n\r\nWhen calling the `show_predict_batch` method for image classification data an error is thrown due to not targets being available.\r\n\r\n```python\r\nfor i, ax in enumerate(axs.ravel()):\r\n # unpack images and labels\r\n if isinstance(data, list):\r\n _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]\r\n elif isinstance(data, dict):\r\n _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]\r\n else:\r\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\r\n # convert images to numpy\r\n _img: np.ndarray = self._to_numpy(_img)\r\n if isinstance(_label, torch.Tensor):\r\n _label = _label.squeeze().tolist()\r\n # show image and set label as subplot title\r\n ax.imshow(_img)\r\n ax.set_title(str(_label))\r\n ax.axis('off')\r\n```\r\n\r\nThe fix should be simple:\r\n\r\n```python\r\nfor i, ax in enumerate(axs.ravel()):\r\n # unpack images and labels\r\n if isinstance(data, list):\r\n # use the get method to return an empty string if no targets are available\r\n _img, _label = data[i][DefaultDataKeys.INPUT], data[i].get([DefaultDataKeys.TARGET], \"\")\r\n elif isinstance(data, dict):\r\n # use the get method to return a list that contains an empty string if no targets are available\r\n _img, _label = data[DefaultDataKeys.INPUT][i], data.get([DefaultDataKeys.TARGET], [\"\"])[i]\r\n else:\r\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\r\n # convert images to numpy\r\n _img: np.ndarray = self._to_numpy(_img)\r\n if isinstance(_label, torch.Tensor):\r\n _label = _label.squeeze().tolist()\r\n # show image and set label as subplot title\r\n ax.imshow(_img)\r\n ax.set_title(str(_label))\r\n ax.axis('off')\r\n```\r\n\r\nI can create a PR later, when I have time.\r\n\r\n### To Reproduce\r\n\r\nJust have flash installed.\r\n\r\n\r\n#### Code sample\r\n```python\r\nfrom flash.core.data.utils import download_data\r\nfrom flash.image import ImageClassificationData\r\n\r\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\", \"data/\")\r\n\r\ndatamodule = ImageClassificationData.from_folders(\r\n train_folder=\"data/hymenoptera_data/train/\",\r\n val_folder=\"data/hymenoptera_data/val/\",\r\n test_folder=\"data/hymenoptera_data/test/\",\r\n predict_folder=\"data/hymenoptera_data/predict/\"\r\n)\r\n\r\ndatamodule.show_predict_batch()\r\n```\r\n\r\nThis will give the following error message:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-4-ff8f29471c71> in <module>\r\n----> 1 datamodule.show_predict_batch()\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/core/data/data_module.py in show_predict_batch(self, hooks_names, reset)\r\n 225 \"\"\"This function is used to visualize a batch from the predict dataloader.\"\"\"\r\n 226 stage_name: str = _STAGES_PREFIX[RunningStage.PREDICTING]\r\n--> 227 self._show_batch(stage_name, hooks_names, reset=reset)\r\n 228 \r\n 229 @staticmethod\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/core/data/data_module.py in _show_batch(self, stage, func_names, reset)\r\n 203 _ = next(iter_dataloader)\r\n 204 data_fetcher: BaseVisualization = self.data_fetcher\r\n--> 205 data_fetcher._show(stage, func_names)\r\n 206 if reset:\r\n 207 self.data_fetcher.batches[stage] = {}\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/core/data/base_viz.py in _show(self, running_stage, func_names_list)\r\n 110 \r\n 111 def _show(self, running_stage: RunningStage, func_names_list: List[str]) -> None:\r\n--> 112 self.show(self.batches[running_stage], running_stage, func_names_list)\r\n 113 \r\n 114 def show(self, batch: Dict[str, Any], running_stage: RunningStage, func_names_list: List[str]) -> None:\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/core/data/base_viz.py in show(self, batch, running_stage, func_names_list)\r\n 124 hook_name = f\"show_{func_name}\"\r\n 125 if _is_overriden(hook_name, self, BaseVisualization):\r\n--> 126 getattr(self, hook_name)(batch[func_name], running_stage)\r\n 127 \r\n 128 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/image/classification/data.py in show_load_sample(self, samples, running_stage)\r\n 144 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):\r\n 145 win_title: str = f\"{running_stage} - show_load_sample\"\r\n--> 146 self._show_images_and_labels(samples, len(samples), win_title)\r\n 147 \r\n 148 def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/image/classification/data.py in _show_images_and_labels(self, data, num_samples, title)\r\n 127 # unpack images and labels\r\n 128 if isinstance(data, list):\r\n--> 129 _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]\r\n 130 elif isinstance(data, dict):\r\n 131 _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]\r\n\r\nKeyError: <DefaultDataKeys.TARGET: 'target'>\r\n```\r\n### Expected behavior\r\n\r\nThe batch should be shown without labels.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom pytorch_lightning.trainer.states import RunningStage\n\nfrom flash.core.data.base_viz import BaseVisualization # for viz\nfrom flash.core.data.callback import BaseDataFetcher\nfrom flash.core.data.data_module import DataModule\nfrom flash.core.data.data_source import DefaultDataKeys, DefaultDataSources\nfrom flash.core.data.process import Deserializer, Preprocess\nfrom flash.core.utilities.imports import _MATPLOTLIB_AVAILABLE, _PIL_AVAILABLE, _requires_extras\nfrom flash.image.classification.transforms import default_transforms, train_default_transforms\nfrom flash.image.data import (\n ImageDeserializer,\n ImageFiftyOneDataSource,\n ImageNumpyDataSource,\n ImagePathsDataSource,\n ImageTensorDataSource,\n)\n\nif _MATPLOTLIB_AVAILABLE:\n import matplotlib.pyplot as plt\nelse:\n plt = None\n\nif _PIL_AVAILABLE:\n from PIL import Image\nelse:\n\n class Image:\n Image = None\n\n\nclass ImageClassificationPreprocess(Preprocess):\n\n def __init__(\n self,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n image_size: Tuple[int, int] = (196, 196),\n deserializer: Optional[Deserializer] = None,\n **data_source_kwargs: Any,\n ):\n self.image_size = image_size\n\n super().__init__(\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n DefaultDataSources.FIFTYONE: ImageFiftyOneDataSource(**data_source_kwargs),\n DefaultDataSources.FILES: ImagePathsDataSource(),\n DefaultDataSources.FOLDERS: ImagePathsDataSource(),\n DefaultDataSources.NUMPY: ImageNumpyDataSource(),\n DefaultDataSources.TENSORS: ImageTensorDataSource(),\n },\n deserializer=deserializer or ImageDeserializer(),\n default_data_source=DefaultDataSources.FILES,\n )\n\n def get_state_dict(self) -> Dict[str, Any]:\n return {**self.transforms, \"image_size\": self.image_size}\n\n @classmethod\n def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool = False):\n return cls(**state_dict)\n\n def default_transforms(self) -> Optional[Dict[str, Callable]]:\n return default_transforms(self.image_size)\n\n def train_default_transforms(self) -> Optional[Dict[str, Callable]]:\n return train_default_transforms(self.image_size)\n\n\nclass ImageClassificationData(DataModule):\n \"\"\"Data module for image classification tasks.\"\"\"\n\n preprocess_cls = ImageClassificationPreprocess\n\n def set_block_viz_window(self, value: bool) -> None:\n \"\"\"Setter method to switch on/off matplotlib to pop up windows.\"\"\"\n self.data_fetcher.block_viz_window = value\n\n @staticmethod\n def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:\n return MatplotlibVisualization(*args, **kwargs)\n\n\nclass MatplotlibVisualization(BaseVisualization):\n \"\"\"Process and show the image batch and its associated label using matplotlib.\n \"\"\"\n max_cols: int = 4 # maximum number of columns we accept\n block_viz_window: bool = True # parameter to allow user to block visualisation windows\n\n @staticmethod\n @_requires_extras(\"image\")\n def _to_numpy(img: Union[torch.Tensor, Image.Image]) -> np.ndarray:\n out: np.ndarray\n if isinstance(img, Image.Image):\n out = np.array(img)\n elif isinstance(img, torch.Tensor):\n out = img.squeeze(0).permute(1, 2, 0).cpu().numpy()\n else:\n raise TypeError(f\"Unknown image type. Got: {type(img)}.\")\n return out\n\n @_requires_extras(\"image\")\n def _show_images_and_labels(self, data: List[Any], num_samples: int, title: str):\n # define the image grid\n cols: int = min(num_samples, self.max_cols)\n rows: int = num_samples // cols\n\n # create figure and set title\n fig, axs = plt.subplots(rows, cols)\n fig.suptitle(title)\n\n for i, ax in enumerate(axs.ravel()):\n # unpack images and labels\n if isinstance(data, list):\n _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]\n elif isinstance(data, dict):\n _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]\n else:\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\n # convert images to numpy\n _img: np.ndarray = self._to_numpy(_img)\n if isinstance(_label, torch.Tensor):\n _label = _label.squeeze().tolist()\n # show image and set label as subplot title\n ax.imshow(_img)\n ax.set_title(str(_label))\n ax.axis('off')\n plt.show(block=self.block_viz_window)\n\n def show_load_sample(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_load_sample\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_pre_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_to_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_to_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_post_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_post_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_per_batch_transform(self, batch: List[Any], running_stage):\n win_title: str = f\"{running_stage} - show_per_batch_transform\"\n self._show_images_and_labels(batch[0], batch[0][DefaultDataKeys.INPUT].shape[0], win_title)\n", "path": "flash/image/classification/data.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom pytorch_lightning.trainer.states import RunningStage\n\nfrom flash.core.data.base_viz import BaseVisualization # for viz\nfrom flash.core.data.callback import BaseDataFetcher\nfrom flash.core.data.data_module import DataModule\nfrom flash.core.data.data_source import DefaultDataKeys, DefaultDataSources\nfrom flash.core.data.process import Deserializer, Preprocess\nfrom flash.core.utilities.imports import _MATPLOTLIB_AVAILABLE, _PIL_AVAILABLE, _requires_extras\nfrom flash.image.classification.transforms import default_transforms, train_default_transforms\nfrom flash.image.data import (\n ImageDeserializer,\n ImageFiftyOneDataSource,\n ImageNumpyDataSource,\n ImagePathsDataSource,\n ImageTensorDataSource,\n)\n\nif _MATPLOTLIB_AVAILABLE:\n import matplotlib.pyplot as plt\nelse:\n plt = None\n\nif _PIL_AVAILABLE:\n from PIL import Image\nelse:\n\n class Image:\n Image = None\n\n\nclass ImageClassificationPreprocess(Preprocess):\n\n def __init__(\n self,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n image_size: Tuple[int, int] = (196, 196),\n deserializer: Optional[Deserializer] = None,\n **data_source_kwargs: Any,\n ):\n self.image_size = image_size\n\n super().__init__(\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n DefaultDataSources.FIFTYONE: ImageFiftyOneDataSource(**data_source_kwargs),\n DefaultDataSources.FILES: ImagePathsDataSource(),\n DefaultDataSources.FOLDERS: ImagePathsDataSource(),\n DefaultDataSources.NUMPY: ImageNumpyDataSource(),\n DefaultDataSources.TENSORS: ImageTensorDataSource(),\n },\n deserializer=deserializer or ImageDeserializer(),\n default_data_source=DefaultDataSources.FILES,\n )\n\n def get_state_dict(self) -> Dict[str, Any]:\n return {**self.transforms, \"image_size\": self.image_size}\n\n @classmethod\n def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool = False):\n return cls(**state_dict)\n\n def default_transforms(self) -> Optional[Dict[str, Callable]]:\n return default_transforms(self.image_size)\n\n def train_default_transforms(self) -> Optional[Dict[str, Callable]]:\n return train_default_transforms(self.image_size)\n\n\nclass ImageClassificationData(DataModule):\n \"\"\"Data module for image classification tasks.\"\"\"\n\n preprocess_cls = ImageClassificationPreprocess\n\n def set_block_viz_window(self, value: bool) -> None:\n \"\"\"Setter method to switch on/off matplotlib to pop up windows.\"\"\"\n self.data_fetcher.block_viz_window = value\n\n @staticmethod\n def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:\n return MatplotlibVisualization(*args, **kwargs)\n\n\nclass MatplotlibVisualization(BaseVisualization):\n \"\"\"Process and show the image batch and its associated label using matplotlib.\n \"\"\"\n max_cols: int = 4 # maximum number of columns we accept\n block_viz_window: bool = True # parameter to allow user to block visualisation windows\n\n @staticmethod\n @_requires_extras(\"image\")\n def _to_numpy(img: Union[torch.Tensor, Image.Image]) -> np.ndarray:\n out: np.ndarray\n if isinstance(img, Image.Image):\n out = np.array(img)\n elif isinstance(img, torch.Tensor):\n out = img.squeeze(0).permute(1, 2, 0).cpu().numpy()\n else:\n raise TypeError(f\"Unknown image type. Got: {type(img)}.\")\n return out\n\n @_requires_extras(\"image\")\n def _show_images_and_labels(self, data: List[Any], num_samples: int, title: str):\n # define the image grid\n cols: int = min(num_samples, self.max_cols)\n rows: int = num_samples // cols\n\n # create figure and set title\n fig, axs = plt.subplots(rows, cols)\n fig.suptitle(title)\n\n for i, ax in enumerate(axs.ravel()):\n # unpack images and labels\n if isinstance(data, list):\n _img, _label = data[i][DefaultDataKeys.INPUT], data[i].get(DefaultDataKeys.TARGET, \"\")\n elif isinstance(data, dict):\n _img, _label = data[DefaultDataKeys.INPUT][i], data.get(DefaultDataKeys.TARGET, [\"\"] * (i + 1))[i]\n else:\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\n # convert images to numpy\n _img: np.ndarray = self._to_numpy(_img)\n if isinstance(_label, torch.Tensor):\n _label = _label.squeeze().tolist()\n # show image and set label as subplot title\n ax.imshow(_img)\n ax.set_title(str(_label))\n ax.axis('off')\n plt.show(block=self.block_viz_window)\n\n def show_load_sample(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_load_sample\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_pre_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_to_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_to_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_post_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_post_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_per_batch_transform(self, batch: List[Any], running_stage):\n win_title: str = f\"{running_stage} - show_per_batch_transform\"\n self._show_images_and_labels(batch[0], batch[0][DefaultDataKeys.INPUT].shape[0], win_title)\n", "path": "flash/image/classification/data.py"}]}
| 3,589 | 227 |
gh_patches_debug_33861
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1660
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Costa Rica page has changed and now errors
```
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 131, in launch_parsers
**parser_kwargs)
File "/home/contrib/parsers/CR.py", line 178, in fetch_production
jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']
IndexError: list index out of range
```
https://github.com/tmrowco/electricitymap-contrib/blob/86f06791f2292c8a20e383a4642ced8b16083333/parsers/CR.py#L178
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/CR.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding=utf-8
3
4 import logging
5
6 import arrow
7 import pandas as pd
8 import requests
9 from bs4 import BeautifulSoup
10
11 TIMEZONE = 'America/Costa_Rica'
12 DATE_FORMAT = 'DD/MM/YYYY'
13 MONTH_FORMAT = 'MM/YYYY'
14 POWER_PLANTS = {
15 u'Aeroenergía': 'wind',
16 u'Altamira': 'wind',
17 u'Angostura': 'hydro',
18 u'Arenal': 'hydro',
19 u'Balsa Inferior': 'hydro',
20 u'Barranca': 'unknown',
21 u'Barro Morado': 'geothermal',
22 u'Bijagua': 'hydro',
23 u'Birris12': 'hydro',
24 u'Birris3': 'hydro',
25 u'Boca de Pozo': 'hydro',
26 u'CNFL': 'unknown',
27 u'Cachí': 'hydro',
28 u'Campos Azules': 'wind',
29 u'Canalete': 'unknown',
30 u'Cariblanco': 'hydro',
31 u'Carrillos': 'hydro',
32 u'Caño Grande': 'hydro',
33 u'Caño Grande III': 'hydro',
34 u'Chiripa': 'wind',
35 u'Chocosuelas': 'hydro',
36 u'Chucás': 'hydro',
37 u'Cubujuquí': 'hydro',
38 u'Daniel Gutiérrez': 'hydro',
39 u'Dengo': 'hydro',
40 u'Don Pedro': 'hydro',
41 u'Doña Julia': 'hydro',
42 u'Echandi': 'hydro',
43 u'El Angel': 'hydro',
44 u'El Angel Ampliación': 'hydro',
45 u'El Embalse': 'hydro',
46 u'El General': 'hydro',
47 u'El Viejo': 'biomass',
48 u'Garabito': 'oil',
49 u'Garita': 'hydro',
50 u'Guápiles': 'oil',
51 u'Hidrozarcas': 'hydro',
52 u'La Esperanza (CoopeL)': 'hydro',
53 u'La Joya': 'hydro',
54 u'Los Negros': 'hydro',
55 u'Los Negros II': 'hydro',
56 u'Los Santos': 'wind',
57 u'MOVASA': 'wind',
58 u'Matamoros': 'unknown',
59 u'Miravalles I': 'geothermal',
60 u'Miravalles II': 'geothermal',
61 u'Miravalles III': 'geothermal',
62 u'Miravalles V': 'geothermal',
63 u'Moín I': 'oil',
64 u'Moín II': 'oil',
65 u'Moín III': 'oil',
66 u'Orosí': 'wind',
67 u'Orotina': 'unknown',
68 u'Otros': 'unknown',
69 u'PE Mogote': 'wind',
70 u'PEG': 'wind',
71 u'Pailas': 'geothermal',
72 u'Parque Solar Juanilama': 'solar',
73 u'Parque Solar Miravalles': 'solar',
74 u'Peñas Blancas': 'hydro',
75 u'Pirrís': 'hydro',
76 u'Plantas Eólicas': 'wind',
77 u'Platanar': 'hydro',
78 u'Pocosol': 'hydro',
79 u'Poás I y II': 'hydro',
80 u'Reventazón': 'hydro',
81 u'Río Lajas': 'hydro',
82 u'Río Macho': 'hydro',
83 u'San Antonio': 'oil',
84 u'San Lorenzo (C)': 'hydro',
85 u'Sandillal': 'hydro',
86 u'Suerkata': 'hydro',
87 u'Taboga': 'biomass',
88 u'Tacares': 'hydro',
89 u'Tejona': 'wind',
90 u'Tilawind': 'wind',
91 u'Torito': 'hydro',
92 u'Toro I': 'hydro',
93 u'Toro II': 'hydro',
94 u'Toro III': 'hydro',
95 u'Tuis (JASEC)': 'hydro',
96 u'Valle Central': 'wind',
97 u'Vara Blanca': 'hydro',
98 u'Ventanas-Garita': 'hydro',
99 u'Vientos de La Perla': 'wind',
100 u'Vientos de Miramar': 'wind',
101 u'Vientos del Este': 'wind',
102 u'Volcán': 'hydro',
103 }
104
105 CHARACTERISTIC_NAME = 'Angostura'
106
107
108 def empty_record(zone_key):
109 return {
110 'zoneKey': zone_key,
111 'capacity': {},
112 'production': {
113 'biomass': 0.0,
114 'coal': 0.0,
115 'gas': 0.0,
116 'hydro': 0.0,
117 'nuclear': 0.0,
118 'oil': 0.0,
119 'solar': 0.0,
120 'wind': 0.0,
121 'geothermal': 0.0,
122 'unknown': 0.0
123 },
124 'storage': {},
125 'source': 'grupoice.com'
126 }
127
128
129 def df_to_data(zone_key, day, df, logger):
130 df = df.dropna(axis=1, how='any')
131 # Check for empty dataframe
132 if df.shape == (1, 1):
133 return []
134 df = df.drop(['Intercambio Sur', 'Intercambio Norte', 'Total'], errors='ignore')
135 df = df.iloc[:, :-1]
136
137 results = []
138 unknown_plants = set()
139 hour = 0
140 for column in df:
141 data = empty_record(zone_key)
142 data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime
143 for index, value in df[column].items():
144 source = POWER_PLANTS.get(index)
145 if not source:
146 source = 'unknown'
147 unknown_plants.add(index)
148 data['datetime'] = data_time
149 data['production'][source] += max(0.0, value)
150 hour += 1
151 results.append(data)
152
153 for plant in unknown_plants:
154 logger.warning('{} is not mapped to generation type'.format(plant),
155 extra={'key': zone_key})
156
157 return results
158
159
160 def fetch_production(zone_key='CR', session=None,
161 target_datetime=None, logger=logging.getLogger(__name__)):
162 # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.
163 target_datetime = arrow.get(target_datetime).to(TIMEZONE)
164
165 if target_datetime < arrow.get('2012-07-01'):
166 # data availability limit found by manual trial and error
167 logger.error('CR API does not provide data before 2012-07-01, '
168 '{} was requested'.format(target_datetime),
169 extra={"key": zone_key})
170 return None
171
172 # Do not use existing session as some amount of cache is taking place
173 r = requests.session()
174 url = 'https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'
175 response = r.get(url)
176
177 soup = BeautifulSoup(response.text, 'html.parser')
178 jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']
179
180 data = [
181 ('formPosdespacho', 'formPosdespacho'),
182 ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),
183 ('formPosdespacho:pickFecha', ''),
184 ('formPosdespacho:j_idt60_selection', ''),
185 ('formPosdespacho:j_idt60_scrollState', '0,1915'),
186 ('javax.faces.ViewState', jsf_view_state),
187 ]
188 response = r.post(url, cookies={}, data=data)
189
190 # tell pandas which table to use by providing CHARACTERISTIC_NAME
191 df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]
192
193 results = df_to_data(zone_key, target_datetime, df, logger)
194
195 return results
196
197
198 def fetch_exchange(zone_key1='CR', zone_key2='NI', session=None, target_datetime=None, logger=None):
199 """Requests the last known power exchange (in MW) between two regions
200
201 Arguments:
202 zone_key1 -- the first country code
203 zone_key2 -- the second country code; order of the two codes in params doesn't matter
204 session (optional) -- request session passed in order to re-use an existing session
205
206 Return:
207 A dictionary in the form:
208 {
209 'sortedZoneKeys': 'DK->NO',
210 'datetime': '2017-01-01T00:00:00Z',
211 'netFlow': 0.0,
212 'source': 'mysource.com'
213 }
214
215 where net flow is from DK into NO
216 """
217 if target_datetime:
218 raise NotImplementedError('This parser is not yet able to parse past dates')
219
220 sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
221
222 df = pd.read_csv('http://www.enteoperador.org/newsite/flash/data.csv', index_col=False)
223
224 if sorted_zone_keys == 'CR->NI':
225 flow = df['NICR'][0]
226 elif sorted_zone_keys == 'CR->PA':
227 flow = -1 * df['CRPA'][0]
228 else:
229 raise NotImplementedError('This exchange pair is not implemented')
230
231 data = {
232 'datetime': arrow.now(TIMEZONE).datetime,
233 'sortedZoneKeys': sorted_zone_keys,
234 'netFlow': flow,
235 'source': 'enteoperador.org'
236 }
237
238 return data
239
240
241 if __name__ == '__main__':
242 """Main method, never used by the Electricity Map backend, but handy for testing."""
243
244 from pprint import pprint
245
246 print('fetch_production() ->')
247 pprint(fetch_production())
248
249 print('fetch_production(target_datetime=arrow.get("2018-03-13T12:00Z") ->')
250 pprint(fetch_production(target_datetime=arrow.get('2018-03-13T12:00Z')))
251
252 # this should work
253 print('fetch_production(target_datetime=arrow.get("2013-03-13T12:00Z") ->')
254 pprint(fetch_production(target_datetime=arrow.get('2013-03-13T12:00Z')))
255
256 # this should return None
257 print('fetch_production(target_datetime=arrow.get("2007-03-13T12:00Z") ->')
258 pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))
259
260 print('fetch_exchange() ->')
261 print(fetch_exchange())
262
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/CR.py b/parsers/CR.py
--- a/parsers/CR.py
+++ b/parsers/CR.py
@@ -67,6 +67,7 @@
u'Orotina': 'unknown',
u'Otros': 'unknown',
u'PE Mogote': 'wind',
+ u'PE Río Naranjo': 'hydro',
u'PEG': 'wind',
u'Pailas': 'geothermal',
u'Parque Solar Juanilama': 'solar',
@@ -151,7 +152,7 @@
results.append(data)
for plant in unknown_plants:
- logger.warning('{} is not mapped to generation type'.format(plant),
+ logger.warning(u'{} is not mapped to generation type'.format(plant),
extra={'key': zone_key})
return results
@@ -171,21 +172,19 @@
# Do not use existing session as some amount of cache is taking place
r = requests.session()
- url = 'https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'
+ url = 'https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'
response = r.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
- jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']
+ jsf_view_state = soup.find("input", {"name": 'javax.faces.ViewState'})['value']
data = [
- ('formPosdespacho', 'formPosdespacho'),
('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),
('formPosdespacho:pickFecha', ''),
- ('formPosdespacho:j_idt60_selection', ''),
- ('formPosdespacho:j_idt60_scrollState', '0,1915'),
+ ('formPosdespacho_SUBMIT', 1),
('javax.faces.ViewState', jsf_view_state),
]
- response = r.post(url, cookies={}, data=data)
+ response = r.post(url, data=data)
# tell pandas which table to use by providing CHARACTERISTIC_NAME
df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]
|
{"golden_diff": "diff --git a/parsers/CR.py b/parsers/CR.py\n--- a/parsers/CR.py\n+++ b/parsers/CR.py\n@@ -67,6 +67,7 @@\n u'Orotina': 'unknown',\n u'Otros': 'unknown',\n u'PE Mogote': 'wind',\n+ u'PE R\u00edo Naranjo': 'hydro',\n u'PEG': 'wind',\n u'Pailas': 'geothermal',\n u'Parque Solar Juanilama': 'solar',\n@@ -151,7 +152,7 @@\n results.append(data)\n \n for plant in unknown_plants:\n- logger.warning('{} is not mapped to generation type'.format(plant),\n+ logger.warning(u'{} is not mapped to generation type'.format(plant),\n extra={'key': zone_key})\n \n return results\n@@ -171,21 +172,19 @@\n \n # Do not use existing session as some amount of cache is taking place\n r = requests.session()\n- url = 'https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n+ url = 'https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n response = r.get(url)\n \n soup = BeautifulSoup(response.text, 'html.parser')\n- jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']\n+ jsf_view_state = soup.find(\"input\", {\"name\": 'javax.faces.ViewState'})['value']\n \n data = [\n- ('formPosdespacho', 'formPosdespacho'),\n ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),\n ('formPosdespacho:pickFecha', ''),\n- ('formPosdespacho:j_idt60_selection', ''),\n- ('formPosdespacho:j_idt60_scrollState', '0,1915'),\n+ ('formPosdespacho_SUBMIT', 1),\n ('javax.faces.ViewState', jsf_view_state),\n ]\n- response = r.post(url, cookies={}, data=data)\n+ response = r.post(url, data=data)\n \n # tell pandas which table to use by providing CHARACTERISTIC_NAME\n df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]\n", "issue": "Costa Rica page has changed and now errors\n```\r\nTraceback (most recent call last):\r\n File \"/home/feeder/lib/fetch_data.py\", line 131, in launch_parsers\r\n **parser_kwargs)\r\n File \"/home/contrib/parsers/CR.py\", line 178, in fetch_production\r\n jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']\r\nIndexError: list index out of range\r\n```\r\n\r\nhttps://github.com/tmrowco/electricitymap-contrib/blob/86f06791f2292c8a20e383a4642ced8b16083333/parsers/CR.py#L178\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\nimport logging\n\nimport arrow\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nTIMEZONE = 'America/Costa_Rica'\nDATE_FORMAT = 'DD/MM/YYYY'\nMONTH_FORMAT = 'MM/YYYY'\nPOWER_PLANTS = {\n u'Aeroenerg\u00eda': 'wind',\n u'Altamira': 'wind',\n u'Angostura': 'hydro',\n u'Arenal': 'hydro',\n u'Balsa Inferior': 'hydro',\n u'Barranca': 'unknown',\n u'Barro Morado': 'geothermal',\n u'Bijagua': 'hydro',\n u'Birris12': 'hydro',\n u'Birris3': 'hydro',\n u'Boca de Pozo': 'hydro',\n u'CNFL': 'unknown',\n u'Cach\u00ed': 'hydro',\n u'Campos Azules': 'wind',\n u'Canalete': 'unknown',\n u'Cariblanco': 'hydro',\n u'Carrillos': 'hydro',\n u'Ca\u00f1o Grande': 'hydro',\n u'Ca\u00f1o Grande III': 'hydro',\n u'Chiripa': 'wind',\n u'Chocosuelas': 'hydro',\n u'Chuc\u00e1s': 'hydro',\n u'Cubujuqu\u00ed': 'hydro',\n u'Daniel Guti\u00e9rrez': 'hydro',\n u'Dengo': 'hydro',\n u'Don Pedro': 'hydro',\n u'Do\u00f1a Julia': 'hydro',\n u'Echandi': 'hydro',\n u'El Angel': 'hydro',\n u'El Angel Ampliaci\u00f3n': 'hydro',\n u'El Embalse': 'hydro',\n u'El General': 'hydro',\n u'El Viejo': 'biomass',\n u'Garabito': 'oil',\n u'Garita': 'hydro',\n u'Gu\u00e1piles': 'oil',\n u'Hidrozarcas': 'hydro',\n u'La Esperanza (CoopeL)': 'hydro',\n u'La Joya': 'hydro',\n u'Los Negros': 'hydro',\n u'Los Negros II': 'hydro',\n u'Los Santos': 'wind',\n u'MOVASA': 'wind',\n u'Matamoros': 'unknown',\n u'Miravalles I': 'geothermal',\n u'Miravalles II': 'geothermal',\n u'Miravalles III': 'geothermal',\n u'Miravalles V': 'geothermal',\n u'Mo\u00edn I': 'oil',\n u'Mo\u00edn II': 'oil',\n u'Mo\u00edn III': 'oil',\n u'Oros\u00ed': 'wind',\n u'Orotina': 'unknown',\n u'Otros': 'unknown',\n u'PE Mogote': 'wind',\n u'PEG': 'wind',\n u'Pailas': 'geothermal',\n u'Parque Solar Juanilama': 'solar',\n u'Parque Solar Miravalles': 'solar',\n u'Pe\u00f1as Blancas': 'hydro',\n u'Pirr\u00eds': 'hydro',\n u'Plantas E\u00f3licas': 'wind',\n u'Platanar': 'hydro',\n u'Pocosol': 'hydro',\n u'Po\u00e1s I y II': 'hydro',\n u'Reventaz\u00f3n': 'hydro',\n u'R\u00edo Lajas': 'hydro',\n u'R\u00edo Macho': 'hydro',\n u'San Antonio': 'oil',\n u'San Lorenzo (C)': 'hydro',\n u'Sandillal': 'hydro',\n u'Suerkata': 'hydro',\n u'Taboga': 'biomass',\n u'Tacares': 'hydro',\n u'Tejona': 'wind',\n u'Tilawind': 'wind',\n u'Torito': 'hydro',\n u'Toro I': 'hydro',\n u'Toro II': 'hydro',\n u'Toro III': 'hydro',\n u'Tuis (JASEC)': 'hydro',\n u'Valle Central': 'wind',\n u'Vara Blanca': 'hydro',\n u'Ventanas-Garita': 'hydro',\n u'Vientos de La Perla': 'wind',\n u'Vientos de Miramar': 'wind',\n u'Vientos del Este': 'wind',\n u'Volc\u00e1n': 'hydro',\n}\n\nCHARACTERISTIC_NAME = 'Angostura'\n\n\ndef empty_record(zone_key):\n return {\n 'zoneKey': zone_key,\n 'capacity': {},\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {},\n 'source': 'grupoice.com'\n }\n\n\ndef df_to_data(zone_key, day, df, logger):\n df = df.dropna(axis=1, how='any')\n # Check for empty dataframe\n if df.shape == (1, 1):\n return []\n df = df.drop(['Intercambio Sur', 'Intercambio Norte', 'Total'], errors='ignore')\n df = df.iloc[:, :-1]\n\n results = []\n unknown_plants = set()\n hour = 0\n for column in df:\n data = empty_record(zone_key)\n data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime\n for index, value in df[column].items():\n source = POWER_PLANTS.get(index)\n if not source:\n source = 'unknown'\n unknown_plants.add(index)\n data['datetime'] = data_time\n data['production'][source] += max(0.0, value)\n hour += 1\n results.append(data)\n\n for plant in unknown_plants:\n logger.warning('{} is not mapped to generation type'.format(plant),\n extra={'key': zone_key})\n\n return results\n\n\ndef fetch_production(zone_key='CR', session=None,\n target_datetime=None, logger=logging.getLogger(__name__)):\n # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.\n target_datetime = arrow.get(target_datetime).to(TIMEZONE)\n\n if target_datetime < arrow.get('2012-07-01'):\n # data availability limit found by manual trial and error\n logger.error('CR API does not provide data before 2012-07-01, '\n '{} was requested'.format(target_datetime),\n extra={\"key\": zone_key})\n return None\n\n # Do not use existing session as some amount of cache is taking place\n r = requests.session()\n url = 'https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n response = r.get(url)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']\n\n data = [\n ('formPosdespacho', 'formPosdespacho'),\n ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),\n ('formPosdespacho:pickFecha', ''),\n ('formPosdespacho:j_idt60_selection', ''),\n ('formPosdespacho:j_idt60_scrollState', '0,1915'),\n ('javax.faces.ViewState', jsf_view_state),\n ]\n response = r.post(url, cookies={}, data=data)\n\n # tell pandas which table to use by providing CHARACTERISTIC_NAME\n df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]\n\n results = df_to_data(zone_key, target_datetime, df, logger)\n\n return results\n\n\ndef fetch_exchange(zone_key1='CR', zone_key2='NI', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two regions\n\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n df = pd.read_csv('http://www.enteoperador.org/newsite/flash/data.csv', index_col=False)\n\n if sorted_zone_keys == 'CR->NI':\n flow = df['NICR'][0]\n elif sorted_zone_keys == 'CR->PA':\n flow = -1 * df['CRPA'][0]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n data = {\n 'datetime': arrow.now(TIMEZONE).datetime,\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': flow,\n 'source': 'enteoperador.org'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n from pprint import pprint\n\n print('fetch_production() ->')\n pprint(fetch_production())\n\n print('fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2018-03-13T12:00Z')))\n\n # this should work\n print('fetch_production(target_datetime=arrow.get(\"2013-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2013-03-13T12:00Z')))\n\n # this should return None\n print('fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))\n\n print('fetch_exchange() ->')\n print(fetch_exchange())\n", "path": "parsers/CR.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\nimport logging\n\nimport arrow\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nTIMEZONE = 'America/Costa_Rica'\nDATE_FORMAT = 'DD/MM/YYYY'\nMONTH_FORMAT = 'MM/YYYY'\nPOWER_PLANTS = {\n u'Aeroenerg\u00eda': 'wind',\n u'Altamira': 'wind',\n u'Angostura': 'hydro',\n u'Arenal': 'hydro',\n u'Balsa Inferior': 'hydro',\n u'Barranca': 'unknown',\n u'Barro Morado': 'geothermal',\n u'Bijagua': 'hydro',\n u'Birris12': 'hydro',\n u'Birris3': 'hydro',\n u'Boca de Pozo': 'hydro',\n u'CNFL': 'unknown',\n u'Cach\u00ed': 'hydro',\n u'Campos Azules': 'wind',\n u'Canalete': 'unknown',\n u'Cariblanco': 'hydro',\n u'Carrillos': 'hydro',\n u'Ca\u00f1o Grande': 'hydro',\n u'Ca\u00f1o Grande III': 'hydro',\n u'Chiripa': 'wind',\n u'Chocosuelas': 'hydro',\n u'Chuc\u00e1s': 'hydro',\n u'Cubujuqu\u00ed': 'hydro',\n u'Daniel Guti\u00e9rrez': 'hydro',\n u'Dengo': 'hydro',\n u'Don Pedro': 'hydro',\n u'Do\u00f1a Julia': 'hydro',\n u'Echandi': 'hydro',\n u'El Angel': 'hydro',\n u'El Angel Ampliaci\u00f3n': 'hydro',\n u'El Embalse': 'hydro',\n u'El General': 'hydro',\n u'El Viejo': 'biomass',\n u'Garabito': 'oil',\n u'Garita': 'hydro',\n u'Gu\u00e1piles': 'oil',\n u'Hidrozarcas': 'hydro',\n u'La Esperanza (CoopeL)': 'hydro',\n u'La Joya': 'hydro',\n u'Los Negros': 'hydro',\n u'Los Negros II': 'hydro',\n u'Los Santos': 'wind',\n u'MOVASA': 'wind',\n u'Matamoros': 'unknown',\n u'Miravalles I': 'geothermal',\n u'Miravalles II': 'geothermal',\n u'Miravalles III': 'geothermal',\n u'Miravalles V': 'geothermal',\n u'Mo\u00edn I': 'oil',\n u'Mo\u00edn II': 'oil',\n u'Mo\u00edn III': 'oil',\n u'Oros\u00ed': 'wind',\n u'Orotina': 'unknown',\n u'Otros': 'unknown',\n u'PE Mogote': 'wind',\n u'PE R\u00edo Naranjo': 'hydro',\n u'PEG': 'wind',\n u'Pailas': 'geothermal',\n u'Parque Solar Juanilama': 'solar',\n u'Parque Solar Miravalles': 'solar',\n u'Pe\u00f1as Blancas': 'hydro',\n u'Pirr\u00eds': 'hydro',\n u'Plantas E\u00f3licas': 'wind',\n u'Platanar': 'hydro',\n u'Pocosol': 'hydro',\n u'Po\u00e1s I y II': 'hydro',\n u'Reventaz\u00f3n': 'hydro',\n u'R\u00edo Lajas': 'hydro',\n u'R\u00edo Macho': 'hydro',\n u'San Antonio': 'oil',\n u'San Lorenzo (C)': 'hydro',\n u'Sandillal': 'hydro',\n u'Suerkata': 'hydro',\n u'Taboga': 'biomass',\n u'Tacares': 'hydro',\n u'Tejona': 'wind',\n u'Tilawind': 'wind',\n u'Torito': 'hydro',\n u'Toro I': 'hydro',\n u'Toro II': 'hydro',\n u'Toro III': 'hydro',\n u'Tuis (JASEC)': 'hydro',\n u'Valle Central': 'wind',\n u'Vara Blanca': 'hydro',\n u'Ventanas-Garita': 'hydro',\n u'Vientos de La Perla': 'wind',\n u'Vientos de Miramar': 'wind',\n u'Vientos del Este': 'wind',\n u'Volc\u00e1n': 'hydro',\n}\n\nCHARACTERISTIC_NAME = 'Angostura'\n\n\ndef empty_record(zone_key):\n return {\n 'zoneKey': zone_key,\n 'capacity': {},\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {},\n 'source': 'grupoice.com'\n }\n\n\ndef df_to_data(zone_key, day, df, logger):\n df = df.dropna(axis=1, how='any')\n # Check for empty dataframe\n if df.shape == (1, 1):\n return []\n df = df.drop(['Intercambio Sur', 'Intercambio Norte', 'Total'], errors='ignore')\n df = df.iloc[:, :-1]\n\n results = []\n unknown_plants = set()\n hour = 0\n for column in df:\n data = empty_record(zone_key)\n data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime\n for index, value in df[column].items():\n source = POWER_PLANTS.get(index)\n if not source:\n source = 'unknown'\n unknown_plants.add(index)\n data['datetime'] = data_time\n data['production'][source] += max(0.0, value)\n hour += 1\n results.append(data)\n\n for plant in unknown_plants:\n logger.warning(u'{} is not mapped to generation type'.format(plant),\n extra={'key': zone_key})\n\n return results\n\n\ndef fetch_production(zone_key='CR', session=None,\n target_datetime=None, logger=logging.getLogger(__name__)):\n # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.\n target_datetime = arrow.get(target_datetime).to(TIMEZONE)\n\n if target_datetime < arrow.get('2012-07-01'):\n # data availability limit found by manual trial and error\n logger.error('CR API does not provide data before 2012-07-01, '\n '{} was requested'.format(target_datetime),\n extra={\"key\": zone_key})\n return None\n\n # Do not use existing session as some amount of cache is taking place\n r = requests.session()\n url = 'https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n response = r.get(url)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n jsf_view_state = soup.find(\"input\", {\"name\": 'javax.faces.ViewState'})['value']\n\n data = [\n ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),\n ('formPosdespacho:pickFecha', ''),\n ('formPosdespacho_SUBMIT', 1),\n ('javax.faces.ViewState', jsf_view_state),\n ]\n response = r.post(url, data=data)\n\n # tell pandas which table to use by providing CHARACTERISTIC_NAME\n df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]\n\n results = df_to_data(zone_key, target_datetime, df, logger)\n\n return results\n\n\ndef fetch_exchange(zone_key1='CR', zone_key2='NI', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two regions\n\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n df = pd.read_csv('http://www.enteoperador.org/newsite/flash/data.csv', index_col=False)\n\n if sorted_zone_keys == 'CR->NI':\n flow = df['NICR'][0]\n elif sorted_zone_keys == 'CR->PA':\n flow = -1 * df['CRPA'][0]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n data = {\n 'datetime': arrow.now(TIMEZONE).datetime,\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': flow,\n 'source': 'enteoperador.org'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n from pprint import pprint\n\n print('fetch_production() ->')\n pprint(fetch_production())\n\n print('fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2018-03-13T12:00Z')))\n\n # this should work\n print('fetch_production(target_datetime=arrow.get(\"2013-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2013-03-13T12:00Z')))\n\n # this should return None\n print('fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))\n\n print('fetch_exchange() ->')\n print(fetch_exchange())\n", "path": "parsers/CR.py"}]}
| 3,505 | 536 |
gh_patches_debug_1349
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-3150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
#3289 Interactive Event
**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung/
**device & browser:** *e.g. Firefox 80.0 (64-bit)*
**Comment/Question:**

01 – The questions were supposed to be 20px, but its also fine for me like that.
02 – The Icons are different and fine, but this one seems a bit off, I have save the ones I did here in case you need them: Nextcloud/Projekte/meinBerlin/Material/CI/Bilder & Grafiken/icons/svg
03 – Shall we put a max number of characters here since the questions should be short? @CarolingerSeilchenspringer
04 – I did the spacing between the questions using 8px but if that was already the meinBerlin spacing guidelines, we leave like that.
05 – The category labels seem a bit far from the question, if possible, could we shorten the spacing here a bit so it gets closer from the question and "far" from the Like button?
06 – Are the moderators supposed to like the question? If yes, its not working for me. :/

07 – If possible can we use here the text style from the notes (14px)?

08 – The statistics seem a bit off compared to the sketch on Zeplin. Same on Mobile.

09 – Selected categories seem a bit off after selecting them, they kind of stay there instead of just being selected. (Do you know what I mean? I can also explain to you better sharing the screen or something).

10 – The Menu on Mobile is different than on Zeplin.

11 – Same here, the infobox is difficult to understand. Also the buttons are off, spacing inbetween them are different.

12 – Small thing here but if possible, the success infobox should be aligned properly to the left.

13 – Can we increase the spacing here a bit between the link and icon?

14 – The Blueprint is not updated, I left the .svg file on Taiga
I just didn't manage to test the Initiator's view cause there is no project open to testing, could you edit the phase from one of yours and send me the link? Cause all the ones I created I'm the moderator as well.
Thanks a lot! Let me know if you need anything :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/dashboard/blueprints.py`
Content:
```
1 from django.utils.translation import ugettext_lazy as _
2
3 from adhocracy4.dashboard.blueprints import ProjectBlueprint
4 from meinberlin.apps.budgeting import phases as budgeting_phases
5 from meinberlin.apps.documents import phases as documents_phases
6 from meinberlin.apps.ideas import phases as ideas_phases
7 from meinberlin.apps.kiezkasse import phases as kiezkasse_phases
8 from meinberlin.apps.livequestions import phases as livequestion_phases
9 from meinberlin.apps.mapideas import phases as mapideas_phases
10 from meinberlin.apps.maptopicprio import phases as maptopicprio_phases
11 from meinberlin.apps.polls import phases as poll_phases
12 from meinberlin.apps.topicprio import phases as topicprio_phases
13
14 blueprints = [
15 ('brainstorming',
16 ProjectBlueprint(
17 title=_('Brainstorming'),
18 description=_(
19 'Collect first ideas for a specific topic and comment on them.'
20 ),
21 content=[
22 ideas_phases.CollectPhase(),
23 ],
24 image='images/brainstorming.svg',
25 settings_model=None,
26 )),
27 ('map-brainstorming',
28 ProjectBlueprint(
29 title=_('Spatial Brainstorming'),
30 description=_(
31 'Collect location specific ideas for a topic and comment on them.'
32 ),
33 content=[
34 mapideas_phases.CollectPhase(),
35 ],
36 image='images/map-brainstorming.svg',
37 settings_model=('a4maps', 'AreaSettings'),
38 )),
39 ('map-idea-collection',
40 ProjectBlueprint(
41 title=_('Spatial Idea Collection'),
42 description=_(
43 'Collect location specific ideas that can be rated and commented.'
44 ),
45 content=[
46 mapideas_phases.CollectFeedbackPhase(),
47 ],
48 image='images/map-idea-collection.svg',
49 settings_model=('a4maps', 'AreaSettings'),
50 )),
51 ('agenda-setting',
52 ProjectBlueprint(
53 title=_('Agenda Setting'),
54 description=_(
55 'With Agenda-Setting it’s possible to identify topics and to '
56 'define mission statements. Anyone can submit topics that can be '
57 'commented and rated.'
58 ),
59 content=[
60 ideas_phases.CollectFeedbackPhase(),
61 ],
62 image='images/agenda-setting.svg',
63 settings_model=None,
64 )),
65 ('text-review',
66 ProjectBlueprint(
67 title=_('Text Review'),
68 description=_(
69 'In the text-review it’s possible to structure draft texts '
70 'that can be commented.'
71 ),
72 content=[
73 documents_phases.CommentPhase(),
74 ],
75 image='images/text-review.svg',
76 settings_model=None,
77 )),
78 ('participatory-budgeting',
79 ProjectBlueprint(
80 title=_('Participatory budgeting'),
81 description=_(
82 'With participatory-budgeting it’s possible to make proposals '
83 'with budget specifications and locate them. Anyone can comment '
84 'and rate on different proposals.'
85 ),
86 content=[
87 budgeting_phases.RequestPhase()
88 ],
89 image='images/participatory-budgeting.svg',
90 settings_model=('a4maps', 'AreaSettings'),
91 )),
92 ('poll',
93 ProjectBlueprint(
94 title=_('Poll'),
95 description=_(
96 'Create a poll with multiple questions and possible answers. '
97 'Anyone can cast votes and comment on the poll.'
98 ),
99 content=[
100 poll_phases.VotingPhase(),
101 ],
102 image='images/poll.svg',
103 settings_model=None,
104 )),
105 ('topic-prioritization',
106 ProjectBlueprint(
107 title=_('Topic Priorization'),
108 description=_(
109 'Comment and prioritize topics.'
110 ),
111 content=[
112 topicprio_phases.PrioritizePhase(),
113 ],
114 image='images/priorization.svg',
115 settings_model=None,
116 )),
117 ('map-topic-prioritization',
118 ProjectBlueprint(
119 title=_('Place Prioritization'),
120 description=_(
121 'Comment and prioritize places located on a map.'
122 ),
123 content=[
124 maptopicprio_phases.PrioritizePhase(),
125 ],
126 image='images/place-priotization.svg',
127 settings_model=('a4maps', 'AreaSettings'),
128 )),
129 ('kiezkasse',
130 ProjectBlueprint(
131 title=_('Kiezkasse'),
132 description=_(
133 'With kiezkasse it’s possible to make proposals '
134 'with budget specifications and locate them. '
135 'The proposals can be commented and rated.'
136 ),
137 content=[
138 kiezkasse_phases.RequestFeedbackPhase(),
139 ],
140 image='images/kiezkasse.svg',
141 settings_model=('a4maps', 'AreaSettings'),
142 )),
143 ('interactive-event',
144 ProjectBlueprint(
145 title=_('Interactive Event'),
146 description=_(
147 'The participants of an event can ask their questions online. '
148 'Other participants can support the question. You as the '
149 'moderator can sort the questions by support or '
150 'characteristic.'
151 ),
152 content=[
153 livequestion_phases.IssuePhase(),
154 ],
155 image='images/text-review.svg',
156 settings_model=None,
157 )),
158 ]
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/dashboard/blueprints.py b/meinberlin/apps/dashboard/blueprints.py
--- a/meinberlin/apps/dashboard/blueprints.py
+++ b/meinberlin/apps/dashboard/blueprints.py
@@ -152,7 +152,7 @@
content=[
livequestion_phases.IssuePhase(),
],
- image='images/text-review.svg',
+ image='images/interactive-event.svg',
settings_model=None,
)),
]
|
{"golden_diff": "diff --git a/meinberlin/apps/dashboard/blueprints.py b/meinberlin/apps/dashboard/blueprints.py\n--- a/meinberlin/apps/dashboard/blueprints.py\n+++ b/meinberlin/apps/dashboard/blueprints.py\n@@ -152,7 +152,7 @@\n content=[\n livequestion_phases.IssuePhase(),\n ],\n- image='images/text-review.svg',\n+ image='images/interactive-event.svg',\n settings_model=None,\n )),\n ]\n", "issue": "#3289 Interactive Event \n**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung/\r\n**device & browser:** *e.g. Firefox 80.0 (64-bit)*\r\n\r\n**Comment/Question:**\r\n\r\n01 \u2013 The questions were supposed to be 20px, but its also fine for me like that. \r\n02 \u2013 The Icons are different and fine, but this one seems a bit off, I have save the ones I did here in case you need them: Nextcloud/Projekte/meinBerlin/Material/CI/Bilder & Grafiken/icons/svg\r\n03 \u2013 Shall we put a max number of characters here since the questions should be short? @CarolingerSeilchenspringer \r\n04 \u2013 I did the spacing between the questions using 8px but if that was already the meinBerlin spacing guidelines, we leave like that. \r\n05 \u2013 The category labels seem a bit far from the question, if possible, could we shorten the spacing here a bit so it gets closer from the question and \"far\" from the Like button? \r\n06 \u2013 Are the moderators supposed to like the question? If yes, its not working for me. :/\r\n\r\n\r\n07 \u2013 If possible can we use here the text style from the notes (14px)? \r\n\r\n\r\n08 \u2013 The statistics seem a bit off compared to the sketch on Zeplin. Same on Mobile.\r\n\r\n\r\n09 \u2013 Selected categories seem a bit off after selecting them, they kind of stay there instead of just being selected. (Do you know what I mean? I can also explain to you better sharing the screen or something).\r\n\r\n\r\n10 \u2013 The Menu on Mobile is different than on Zeplin. \r\n\r\n\r\n11 \u2013 Same here, the infobox is difficult to understand. Also the buttons are off, spacing inbetween them are different.\r\n\r\n\r\n12 \u2013 Small thing here but if possible, the success infobox should be aligned properly to the left. \r\n\r\n\r\n13 \u2013 Can we increase the spacing here a bit between the link and icon? \r\n\r\n\r\n14 \u2013 The Blueprint is not updated, I left the .svg file on Taiga\r\n \r\nI just didn't manage to test the Initiator's view cause there is no project open to testing, could you edit the phase from one of yours and send me the link? Cause all the ones I created I'm the moderator as well. \r\n\r\nThanks a lot! Let me know if you need anything :)\r\n\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom meinberlin.apps.budgeting import phases as budgeting_phases\nfrom meinberlin.apps.documents import phases as documents_phases\nfrom meinberlin.apps.ideas import phases as ideas_phases\nfrom meinberlin.apps.kiezkasse import phases as kiezkasse_phases\nfrom meinberlin.apps.livequestions import phases as livequestion_phases\nfrom meinberlin.apps.mapideas import phases as mapideas_phases\nfrom meinberlin.apps.maptopicprio import phases as maptopicprio_phases\nfrom meinberlin.apps.polls import phases as poll_phases\nfrom meinberlin.apps.topicprio import phases as topicprio_phases\n\nblueprints = [\n ('brainstorming',\n ProjectBlueprint(\n title=_('Brainstorming'),\n description=_(\n 'Collect first ideas for a specific topic and comment on them.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.svg',\n settings_model=None,\n )),\n ('map-brainstorming',\n ProjectBlueprint(\n title=_('Spatial Brainstorming'),\n description=_(\n 'Collect location specific ideas for a topic and comment on them.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n ],\n image='images/map-brainstorming.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('map-idea-collection',\n ProjectBlueprint(\n title=_('Spatial Idea Collection'),\n description=_(\n 'Collect location specific ideas that can be rated and commented.'\n ),\n content=[\n mapideas_phases.CollectFeedbackPhase(),\n ],\n image='images/map-idea-collection.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('agenda-setting',\n ProjectBlueprint(\n title=_('Agenda Setting'),\n description=_(\n 'With Agenda-Setting it\u2019s possible to identify topics and to '\n 'define mission statements. Anyone can submit topics that can be '\n 'commented and rated.'\n ),\n content=[\n ideas_phases.CollectFeedbackPhase(),\n ],\n image='images/agenda-setting.svg',\n settings_model=None,\n )),\n ('text-review',\n ProjectBlueprint(\n title=_('Text Review'),\n description=_(\n 'In the text-review it\u2019s possible to structure draft texts '\n 'that can be commented.'\n ),\n content=[\n documents_phases.CommentPhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n ('participatory-budgeting',\n ProjectBlueprint(\n title=_('Participatory budgeting'),\n description=_(\n 'With participatory-budgeting it\u2019s possible to make proposals '\n 'with budget specifications and locate them. Anyone can comment '\n 'and rate on different proposals.'\n ),\n content=[\n budgeting_phases.RequestPhase()\n ],\n image='images/participatory-budgeting.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('poll',\n ProjectBlueprint(\n title=_('Poll'),\n description=_(\n 'Create a poll with multiple questions and possible answers. '\n 'Anyone can cast votes and comment on the poll.'\n ),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.svg',\n settings_model=None,\n )),\n ('topic-prioritization',\n ProjectBlueprint(\n title=_('Topic Priorization'),\n description=_(\n 'Comment and prioritize topics.'\n ),\n content=[\n topicprio_phases.PrioritizePhase(),\n ],\n image='images/priorization.svg',\n settings_model=None,\n )),\n ('map-topic-prioritization',\n ProjectBlueprint(\n title=_('Place Prioritization'),\n description=_(\n 'Comment and prioritize places located on a map.'\n ),\n content=[\n maptopicprio_phases.PrioritizePhase(),\n ],\n image='images/place-priotization.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('kiezkasse',\n ProjectBlueprint(\n title=_('Kiezkasse'),\n description=_(\n 'With kiezkasse it\u2019s possible to make proposals '\n 'with budget specifications and locate them. '\n 'The proposals can be commented and rated.'\n ),\n content=[\n kiezkasse_phases.RequestFeedbackPhase(),\n ],\n image='images/kiezkasse.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('interactive-event',\n ProjectBlueprint(\n title=_('Interactive Event'),\n description=_(\n 'The participants of an event can ask their questions online. '\n 'Other participants can support the question. You as the '\n 'moderator can sort the questions by support or '\n 'characteristic.'\n ),\n content=[\n livequestion_phases.IssuePhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n]\n", "path": "meinberlin/apps/dashboard/blueprints.py"}], "after_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom meinberlin.apps.budgeting import phases as budgeting_phases\nfrom meinberlin.apps.documents import phases as documents_phases\nfrom meinberlin.apps.ideas import phases as ideas_phases\nfrom meinberlin.apps.kiezkasse import phases as kiezkasse_phases\nfrom meinberlin.apps.livequestions import phases as livequestion_phases\nfrom meinberlin.apps.mapideas import phases as mapideas_phases\nfrom meinberlin.apps.maptopicprio import phases as maptopicprio_phases\nfrom meinberlin.apps.polls import phases as poll_phases\nfrom meinberlin.apps.topicprio import phases as topicprio_phases\n\nblueprints = [\n ('brainstorming',\n ProjectBlueprint(\n title=_('Brainstorming'),\n description=_(\n 'Collect first ideas for a specific topic and comment on them.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.svg',\n settings_model=None,\n )),\n ('map-brainstorming',\n ProjectBlueprint(\n title=_('Spatial Brainstorming'),\n description=_(\n 'Collect location specific ideas for a topic and comment on them.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n ],\n image='images/map-brainstorming.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('map-idea-collection',\n ProjectBlueprint(\n title=_('Spatial Idea Collection'),\n description=_(\n 'Collect location specific ideas that can be rated and commented.'\n ),\n content=[\n mapideas_phases.CollectFeedbackPhase(),\n ],\n image='images/map-idea-collection.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('agenda-setting',\n ProjectBlueprint(\n title=_('Agenda Setting'),\n description=_(\n 'With Agenda-Setting it\u2019s possible to identify topics and to '\n 'define mission statements. Anyone can submit topics that can be '\n 'commented and rated.'\n ),\n content=[\n ideas_phases.CollectFeedbackPhase(),\n ],\n image='images/agenda-setting.svg',\n settings_model=None,\n )),\n ('text-review',\n ProjectBlueprint(\n title=_('Text Review'),\n description=_(\n 'In the text-review it\u2019s possible to structure draft texts '\n 'that can be commented.'\n ),\n content=[\n documents_phases.CommentPhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n ('participatory-budgeting',\n ProjectBlueprint(\n title=_('Participatory budgeting'),\n description=_(\n 'With participatory-budgeting it\u2019s possible to make proposals '\n 'with budget specifications and locate them. Anyone can comment '\n 'and rate on different proposals.'\n ),\n content=[\n budgeting_phases.RequestPhase()\n ],\n image='images/participatory-budgeting.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('poll',\n ProjectBlueprint(\n title=_('Poll'),\n description=_(\n 'Create a poll with multiple questions and possible answers. '\n 'Anyone can cast votes and comment on the poll.'\n ),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.svg',\n settings_model=None,\n )),\n ('topic-prioritization',\n ProjectBlueprint(\n title=_('Topic Priorization'),\n description=_(\n 'Comment and prioritize topics.'\n ),\n content=[\n topicprio_phases.PrioritizePhase(),\n ],\n image='images/priorization.svg',\n settings_model=None,\n )),\n ('map-topic-prioritization',\n ProjectBlueprint(\n title=_('Place Prioritization'),\n description=_(\n 'Comment and prioritize places located on a map.'\n ),\n content=[\n maptopicprio_phases.PrioritizePhase(),\n ],\n image='images/place-priotization.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('kiezkasse',\n ProjectBlueprint(\n title=_('Kiezkasse'),\n description=_(\n 'With kiezkasse it\u2019s possible to make proposals '\n 'with budget specifications and locate them. '\n 'The proposals can be commented and rated.'\n ),\n content=[\n kiezkasse_phases.RequestFeedbackPhase(),\n ],\n image='images/kiezkasse.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('interactive-event',\n ProjectBlueprint(\n title=_('Interactive Event'),\n description=_(\n 'The participants of an event can ask their questions online. '\n 'Other participants can support the question. You as the '\n 'moderator can sort the questions by support or '\n 'characteristic.'\n ),\n content=[\n livequestion_phases.IssuePhase(),\n ],\n image='images/interactive-event.svg',\n settings_model=None,\n )),\n]\n", "path": "meinberlin/apps/dashboard/blueprints.py"}]}
| 2,958 | 103 |
gh_patches_debug_10405
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-340
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A user must never evaluate him/herself
In rare cases a user can be participant and contributor in a course (e.g. participating student and tutor for the exercises at the same time).
The system has to make sure that this user can't answer questions related to him/herself.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/student/views.py`
Content:
```
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied
3 from django.db import transaction
4 from django.shortcuts import get_object_or_404, redirect, render_to_response
5 from django.template import RequestContext
6 from django.utils.datastructures import SortedDict
7 from django.utils.translation import ugettext as _
8
9 from evap.evaluation.auth import login_required
10 from evap.evaluation.models import Course, Semester
11 from evap.evaluation.tools import questionnaires_and_contributions
12 from evap.student.forms import QuestionsForm
13 from evap.student.tools import make_form_identifier
14
15 from datetime import datetime
16
17
18 @login_required
19 def index(request):
20 # retrieve all courses, which the user can evaluate at some point
21 users_courses = Course.objects.filter(
22 participants=request.user
23 ).exclude(
24 voters=request.user
25 )
26 # split up into current and future courses
27 current_courses = users_courses.filter(state='inEvaluation')
28 future_courses = users_courses.filter(state='approved')
29
30 return render_to_response(
31 "student_index.html",
32 dict(current_courses=current_courses,
33 future_courses=future_courses),
34 context_instance=RequestContext(request))
35
36
37 @login_required
38 def vote(request, course_id):
39 # retrieve course and make sure that the user is allowed to vote
40 course = get_object_or_404(Course, id=course_id)
41 if not course.can_user_vote(request.user):
42 raise PermissionDenied
43
44 # build forms
45 forms = SortedDict()
46 for questionnaire, contribution in questionnaires_and_contributions(course):
47 form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)
48 forms[(contribution, questionnaire)] = form
49
50 if all(form.is_valid() for form in forms.values()):
51 # begin vote operation
52 with transaction.commit_on_success():
53 for (contribution, questionnaire), form in forms.items():
54 for question in questionnaire.question_set.all():
55 identifier = make_form_identifier(contribution, questionnaire, question)
56 value = form.cleaned_data.get(identifier)
57
58 if type(value) in [str, unicode]:
59 value = value.strip()
60
61 if value == 6: #no answer
62 value = None
63
64 # store the answer if one was given
65 if value:
66 question.answer_class.objects.create(
67 contribution=contribution,
68 question=question,
69 answer=value)
70
71 # remember that the user voted already
72 course.voters.add(request.user)
73
74 messages.add_message(request, messages.INFO, _("Your vote was recorded."))
75 return redirect('evap.student.views.index')
76 else:
77 return render_to_response(
78 "student_vote.html",
79 dict(forms=forms.values(),
80 course=course),
81 context_instance=RequestContext(request))
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/student/views.py b/evap/student/views.py
--- a/evap/student/views.py
+++ b/evap/student/views.py
@@ -45,6 +45,8 @@
forms = SortedDict()
for questionnaire, contribution in questionnaires_and_contributions(course):
form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)
+ if form.contribution.contributor == request.user:
+ continue # users shall not vote about themselves
forms[(contribution, questionnaire)] = form
if all(form.is_valid() for form in forms.values()):
|
{"golden_diff": "diff --git a/evap/student/views.py b/evap/student/views.py\n--- a/evap/student/views.py\n+++ b/evap/student/views.py\n@@ -45,6 +45,8 @@\n forms = SortedDict()\n for questionnaire, contribution in questionnaires_and_contributions(course):\n form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)\n+ if form.contribution.contributor == request.user:\n+ continue # users shall not vote about themselves\n forms[(contribution, questionnaire)] = form\n \n if all(form.is_valid() for form in forms.values()):\n", "issue": "A user must never evaluate him/herself\nIn rare cases a user can be participant and contributor in a course (e.g. participating student and tutor for the exercises at the same time).\nThe system has to make sure that this user can't answer questions related to him/herself.\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import login_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import questionnaires_and_contributions\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom datetime import datetime\n\n\n@login_required\ndef index(request):\n # retrieve all courses, which the user can evaluate at some point\n users_courses = Course.objects.filter(\n participants=request.user\n ).exclude(\n voters=request.user\n )\n # split up into current and future courses\n current_courses = users_courses.filter(state='inEvaluation')\n future_courses = users_courses.filter(state='approved')\n\n return render_to_response(\n \"student_index.html\",\n dict(current_courses=current_courses,\n future_courses=future_courses),\n context_instance=RequestContext(request))\n\n\n@login_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # build forms\n forms = SortedDict()\n for questionnaire, contribution in questionnaires_and_contributions(course):\n form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)\n forms[(contribution, questionnaire)] = form\n\n if all(form.is_valid() for form in forms.values()):\n # begin vote operation\n with transaction.commit_on_success():\n for (contribution, questionnaire), form in forms.items():\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = form.cleaned_data.get(identifier)\n\n if type(value) in [str, unicode]:\n value = value.strip()\n\n if value == 6: #no answer\n value = None\n\n # store the answer if one was given\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n messages.add_message(request, messages.INFO, _(\"Your vote was recorded.\"))\n return redirect('evap.student.views.index')\n else:\n return render_to_response(\n \"student_vote.html\",\n dict(forms=forms.values(),\n course=course),\n context_instance=RequestContext(request))\n", "path": "evap/student/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import login_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import questionnaires_and_contributions\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom datetime import datetime\n\n\n@login_required\ndef index(request):\n # retrieve all courses, which the user can evaluate at some point\n users_courses = Course.objects.filter(\n participants=request.user\n ).exclude(\n voters=request.user\n )\n # split up into current and future courses\n current_courses = users_courses.filter(state='inEvaluation')\n future_courses = users_courses.filter(state='approved')\n\n return render_to_response(\n \"student_index.html\",\n dict(current_courses=current_courses,\n future_courses=future_courses),\n context_instance=RequestContext(request))\n\n\n@login_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # build forms\n forms = SortedDict()\n for questionnaire, contribution in questionnaires_and_contributions(course):\n form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)\n if form.contribution.contributor == request.user:\n continue # users shall not vote about themselves\n forms[(contribution, questionnaire)] = form\n\n if all(form.is_valid() for form in forms.values()):\n # begin vote operation\n with transaction.commit_on_success():\n for (contribution, questionnaire), form in forms.items():\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = form.cleaned_data.get(identifier)\n\n if type(value) in [str, unicode]:\n value = value.strip()\n\n if value == 6: #no answer\n value = None\n\n # store the answer if one was given\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n messages.add_message(request, messages.INFO, _(\"Your vote was recorded.\"))\n return redirect('evap.student.views.index')\n else:\n return render_to_response(\n \"student_vote.html\",\n dict(forms=forms.values(),\n course=course),\n context_instance=RequestContext(request))\n", "path": "evap/student/views.py"}]}
| 1,051 | 134 |
gh_patches_debug_49873
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-6285
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
After successful charge, order redirection to success page fails

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/ticketing.py`
Content:
```
1 from datetime import datetime
2
3 from app.api.helpers.db import save_to_db, get_count
4 from app.api.helpers.exceptions import ConflictException
5 from app.api.helpers.files import make_frontend_url
6 from app.api.helpers.mail import send_email_to_attendees
7 from app.api.helpers.notification import send_notif_to_attendees, send_notif_ticket_purchase_organizer
8 from app.api.helpers.order import delete_related_attendees_for_order, create_pdf_tickets_for_holder
9 from app.api.helpers.payment import StripePaymentsManager, PayPalPaymentsManager
10 from app.models import db
11 from app.models.ticket_fee import TicketFees
12 from app.models.ticket_holder import TicketHolder
13
14
15 class TicketingManager(object):
16 """All ticketing and orders related helper functions"""
17
18 @staticmethod
19 def get_order_expiry():
20 return 10
21
22 @staticmethod
23 def match_discount_quantity(discount_code, ticket_holders=None):
24 qty = 0
25 old_holders = get_count(TicketHolder.query.filter(TicketHolder.ticket_id.in_(discount_code.tickets.split(","))))
26
27 for holder in ticket_holders:
28 ticket_holder = TicketHolder.query.filter_by(id=holder).one()
29 if ticket_holder.ticket.id in discount_code.tickets.split(","):
30 qty += 1
31 if (qty + old_holders) <= discount_code.tickets_number and \
32 discount_code.min_quantity <= qty <= discount_code.max_quantity:
33 return True
34
35 return False
36
37 @staticmethod
38 def calculate_update_amount(order):
39 discount = None
40 if order.discount_code_id:
41 discount = order.discount_code
42 # Access code part will be done ticket_holders API
43 amount = 0
44 total_discount = 0
45 fees = TicketFees.query.filter_by(currency=order.event.payment_currency).first()
46
47 for order_ticket in order.order_tickets:
48 with db.session.no_autoflush:
49 if order_ticket.ticket.is_fee_absorbed or not fees:
50 ticket_amount = (order_ticket.ticket.price * order_ticket.quantity)
51 amount += (order_ticket.ticket.price * order_ticket.quantity)
52 else:
53 order_fee = fees.service_fee * (order_ticket.ticket.price * order_ticket.quantity) / 100
54 if order_fee > fees.maximum_fee:
55 ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee
56 amount += (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee
57 else:
58 ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + order_fee
59 amount += (order_ticket.ticket.price * order_ticket.quantity) + order_fee
60
61 if discount and str(order_ticket.ticket.id) in discount.tickets.split(","):
62 if discount.type == "amount":
63 total_discount += discount.value * order_ticket.quantity
64 else:
65 total_discount += discount.value * ticket_amount / 100
66
67 if discount:
68 if discount.type == "amount":
69 order.amount = max(amount - total_discount, 0)
70 elif discount.type == "percent":
71 order.amount = amount - (discount.value * amount / 100.0)
72 else:
73 order.amount = amount
74 save_to_db(order)
75 return order
76
77 @staticmethod
78 def charge_stripe_order_payment(order, token_id):
79 """
80 Charge the user through Stripe
81 :param order: Order for which to charge for
82 :param token_id: Stripe token
83 :return:
84 """
85 # save the stripe token with the order
86 order.stripe_token = token_id
87 save_to_db(order)
88
89 # charge the user
90 try:
91 charge = StripePaymentsManager.capture_payment(order)
92 except ConflictException as e:
93 # payment failed hence expire the order
94 order.status = 'expired'
95 save_to_db(order)
96
97 # delete related attendees to unlock the tickets
98 delete_related_attendees_for_order(order)
99
100 raise e
101
102 # charge.paid is true if the charge succeeded, or was successfully authorized for later capture.
103 if charge.paid:
104 # update the order in the db.
105 order.paid_via = charge.source.object
106 order.brand = charge.source.brand
107 order.exp_month = charge.source.exp_month
108 order.exp_year = charge.source.exp_year
109 order.last4 = charge.source.last4
110 order.transaction_id = charge.id
111 order.status = 'completed'
112 order.completed_at = datetime.utcnow()
113 save_to_db(order)
114
115 # create tickets.
116 create_pdf_tickets_for_holder(order)
117
118 # send email and notifications.
119 send_email_to_attendees(order, current_user.id)
120 send_notif_to_attendees(order, current_user.id)
121
122 order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))
123 for organizer in order.event.organizers:
124 send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,
125 order.id)
126 if order.event.owner:
127 send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,
128 order.event.name, order.id)
129
130 return True, 'Charge successful'
131 else:
132 # payment failed hence expire the order
133 order.status = 'expired'
134 save_to_db(order)
135
136 # delete related attendees to unlock the tickets
137 delete_related_attendees_for_order(order)
138
139 # return the failure message from stripe.
140 return False, charge.failure_message
141
142 @staticmethod
143 def charge_paypal_order_payment(order, paypal_payer_id, paypal_payment_id):
144 """
145 Charge the user through paypal.
146 :param order: Order for which to charge for.
147 :param paypal_payment_id: payment_id
148 :param paypal_payer_id: payer_id
149 :return:
150 """
151
152 # save the paypal payment_id with the order
153 order.paypal_token = paypal_payment_id
154 save_to_db(order)
155
156 # create the transaction.
157 status, error = PayPalPaymentsManager.execute_payment(paypal_payer_id, paypal_payment_id)
158
159 if status:
160 # successful transaction hence update the order details.
161 order.paid_via = 'paypal'
162 order.status = 'completed'
163 order.transaction_id = paypal_payment_id
164 order.completed_at = datetime.utcnow()
165 save_to_db(order)
166
167 # create tickets
168 create_pdf_tickets_for_holder(order)
169
170 # send email and notifications
171 send_email_to_attendees(order, order.user_id)
172 send_notif_to_attendees(order, order.user_id)
173
174 order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))
175 for organizer in order.event.organizers:
176 send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,
177 order.id)
178 if order.event.owner:
179 send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,
180 order.event.name, order.id)
181
182 return True, 'Charge successful'
183 else:
184 # payment failed hence expire the order
185 order.status = 'expired'
186 save_to_db(order)
187
188 # delete related attendees to unlock the tickets
189 delete_related_attendees_for_order(order)
190
191 # return the error message from Paypal
192 return False, error
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/ticketing.py b/app/api/helpers/ticketing.py
--- a/app/api/helpers/ticketing.py
+++ b/app/api/helpers/ticketing.py
@@ -10,6 +10,7 @@
from app.models import db
from app.models.ticket_fee import TicketFees
from app.models.ticket_holder import TicketHolder
+from flask_jwt_extended import current_user
class TicketingManager(object):
|
{"golden_diff": "diff --git a/app/api/helpers/ticketing.py b/app/api/helpers/ticketing.py\n--- a/app/api/helpers/ticketing.py\n+++ b/app/api/helpers/ticketing.py\n@@ -10,6 +10,7 @@\n from app.models import db\n from app.models.ticket_fee import TicketFees\n from app.models.ticket_holder import TicketHolder\n+from flask_jwt_extended import current_user\n \n \n class TicketingManager(object):\n", "issue": "After successful charge, order redirection to success page fails\n\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom app.api.helpers.db import save_to_db, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.files import make_frontend_url\nfrom app.api.helpers.mail import send_email_to_attendees\nfrom app.api.helpers.notification import send_notif_to_attendees, send_notif_ticket_purchase_organizer\nfrom app.api.helpers.order import delete_related_attendees_for_order, create_pdf_tickets_for_holder\nfrom app.api.helpers.payment import StripePaymentsManager, PayPalPaymentsManager\nfrom app.models import db\nfrom app.models.ticket_fee import TicketFees\nfrom app.models.ticket_holder import TicketHolder\n\n\nclass TicketingManager(object):\n \"\"\"All ticketing and orders related helper functions\"\"\"\n\n @staticmethod\n def get_order_expiry():\n return 10\n\n @staticmethod\n def match_discount_quantity(discount_code, ticket_holders=None):\n qty = 0\n old_holders = get_count(TicketHolder.query.filter(TicketHolder.ticket_id.in_(discount_code.tickets.split(\",\"))))\n\n for holder in ticket_holders:\n ticket_holder = TicketHolder.query.filter_by(id=holder).one()\n if ticket_holder.ticket.id in discount_code.tickets.split(\",\"):\n qty += 1\n if (qty + old_holders) <= discount_code.tickets_number and \\\n discount_code.min_quantity <= qty <= discount_code.max_quantity:\n return True\n\n return False\n\n @staticmethod\n def calculate_update_amount(order):\n discount = None\n if order.discount_code_id:\n discount = order.discount_code\n # Access code part will be done ticket_holders API\n amount = 0\n total_discount = 0\n fees = TicketFees.query.filter_by(currency=order.event.payment_currency).first()\n\n for order_ticket in order.order_tickets:\n with db.session.no_autoflush:\n if order_ticket.ticket.is_fee_absorbed or not fees:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity)\n amount += (order_ticket.ticket.price * order_ticket.quantity)\n else:\n order_fee = fees.service_fee * (order_ticket.ticket.price * order_ticket.quantity) / 100\n if order_fee > fees.maximum_fee:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee\n amount += (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee\n else:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + order_fee\n amount += (order_ticket.ticket.price * order_ticket.quantity) + order_fee\n\n if discount and str(order_ticket.ticket.id) in discount.tickets.split(\",\"):\n if discount.type == \"amount\":\n total_discount += discount.value * order_ticket.quantity\n else:\n total_discount += discount.value * ticket_amount / 100\n\n if discount:\n if discount.type == \"amount\":\n order.amount = max(amount - total_discount, 0)\n elif discount.type == \"percent\":\n order.amount = amount - (discount.value * amount / 100.0)\n else:\n order.amount = amount\n save_to_db(order)\n return order\n\n @staticmethod\n def charge_stripe_order_payment(order, token_id):\n \"\"\"\n Charge the user through Stripe\n :param order: Order for which to charge for\n :param token_id: Stripe token\n :return:\n \"\"\"\n # save the stripe token with the order\n order.stripe_token = token_id\n save_to_db(order)\n\n # charge the user\n try:\n charge = StripePaymentsManager.capture_payment(order)\n except ConflictException as e:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n raise e\n\n # charge.paid is true if the charge succeeded, or was successfully authorized for later capture.\n if charge.paid:\n # update the order in the db.\n order.paid_via = charge.source.object\n order.brand = charge.source.brand\n order.exp_month = charge.source.exp_month\n order.exp_year = charge.source.exp_year\n order.last4 = charge.source.last4\n order.transaction_id = charge.id\n order.status = 'completed'\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n\n # create tickets.\n create_pdf_tickets_for_holder(order)\n\n # send email and notifications.\n send_email_to_attendees(order, current_user.id)\n send_notif_to_attendees(order, current_user.id)\n\n order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))\n for organizer in order.event.organizers:\n send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,\n order.id)\n if order.event.owner:\n send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,\n order.event.name, order.id)\n\n return True, 'Charge successful'\n else:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n # return the failure message from stripe.\n return False, charge.failure_message\n\n @staticmethod\n def charge_paypal_order_payment(order, paypal_payer_id, paypal_payment_id):\n \"\"\"\n Charge the user through paypal.\n :param order: Order for which to charge for.\n :param paypal_payment_id: payment_id\n :param paypal_payer_id: payer_id\n :return:\n \"\"\"\n\n # save the paypal payment_id with the order\n order.paypal_token = paypal_payment_id\n save_to_db(order)\n\n # create the transaction.\n status, error = PayPalPaymentsManager.execute_payment(paypal_payer_id, paypal_payment_id)\n\n if status:\n # successful transaction hence update the order details.\n order.paid_via = 'paypal'\n order.status = 'completed'\n order.transaction_id = paypal_payment_id\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n\n # create tickets\n create_pdf_tickets_for_holder(order)\n\n # send email and notifications\n send_email_to_attendees(order, order.user_id)\n send_notif_to_attendees(order, order.user_id)\n\n order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))\n for organizer in order.event.organizers:\n send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,\n order.id)\n if order.event.owner:\n send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,\n order.event.name, order.id)\n\n return True, 'Charge successful'\n else:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n # return the error message from Paypal\n return False, error\n", "path": "app/api/helpers/ticketing.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom app.api.helpers.db import save_to_db, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.files import make_frontend_url\nfrom app.api.helpers.mail import send_email_to_attendees\nfrom app.api.helpers.notification import send_notif_to_attendees, send_notif_ticket_purchase_organizer\nfrom app.api.helpers.order import delete_related_attendees_for_order, create_pdf_tickets_for_holder\nfrom app.api.helpers.payment import StripePaymentsManager, PayPalPaymentsManager\nfrom app.models import db\nfrom app.models.ticket_fee import TicketFees\nfrom app.models.ticket_holder import TicketHolder\nfrom flask_jwt_extended import current_user\n\n\nclass TicketingManager(object):\n \"\"\"All ticketing and orders related helper functions\"\"\"\n\n @staticmethod\n def get_order_expiry():\n return 10\n\n @staticmethod\n def match_discount_quantity(discount_code, ticket_holders=None):\n qty = 0\n old_holders = get_count(TicketHolder.query.filter(TicketHolder.ticket_id.in_(discount_code.tickets.split(\",\"))))\n\n for holder in ticket_holders:\n ticket_holder = TicketHolder.query.filter_by(id=holder).one()\n if ticket_holder.ticket.id in discount_code.tickets.split(\",\"):\n qty += 1\n if (qty + old_holders) <= discount_code.tickets_number and \\\n discount_code.min_quantity <= qty <= discount_code.max_quantity:\n return True\n\n return False\n\n @staticmethod\n def calculate_update_amount(order):\n discount = None\n if order.discount_code_id:\n discount = order.discount_code\n # Access code part will be done ticket_holders API\n amount = 0\n total_discount = 0\n fees = TicketFees.query.filter_by(currency=order.event.payment_currency).first()\n\n for order_ticket in order.order_tickets:\n with db.session.no_autoflush:\n if order_ticket.ticket.is_fee_absorbed or not fees:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity)\n amount += (order_ticket.ticket.price * order_ticket.quantity)\n else:\n order_fee = fees.service_fee * (order_ticket.ticket.price * order_ticket.quantity) / 100\n if order_fee > fees.maximum_fee:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee\n amount += (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee\n else:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + order_fee\n amount += (order_ticket.ticket.price * order_ticket.quantity) + order_fee\n\n if discount and str(order_ticket.ticket.id) in discount.tickets.split(\",\"):\n if discount.type == \"amount\":\n total_discount += discount.value * order_ticket.quantity\n else:\n total_discount += discount.value * ticket_amount / 100\n\n if discount:\n if discount.type == \"amount\":\n order.amount = max(amount - total_discount, 0)\n elif discount.type == \"percent\":\n order.amount = amount - (discount.value * amount / 100.0)\n else:\n order.amount = amount\n save_to_db(order)\n return order\n\n @staticmethod\n def charge_stripe_order_payment(order, token_id):\n \"\"\"\n Charge the user through Stripe\n :param order: Order for which to charge for\n :param token_id: Stripe token\n :return:\n \"\"\"\n # save the stripe token with the order\n order.stripe_token = token_id\n save_to_db(order)\n\n # charge the user\n try:\n charge = StripePaymentsManager.capture_payment(order)\n except ConflictException as e:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n raise e\n\n # charge.paid is true if the charge succeeded, or was successfully authorized for later capture.\n if charge.paid:\n # update the order in the db.\n order.paid_via = charge.source.object\n order.brand = charge.source.brand\n order.exp_month = charge.source.exp_month\n order.exp_year = charge.source.exp_year\n order.last4 = charge.source.last4\n order.transaction_id = charge.id\n order.status = 'completed'\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n\n # create tickets.\n create_pdf_tickets_for_holder(order)\n\n # send email and notifications.\n send_email_to_attendees(order, current_user.id)\n send_notif_to_attendees(order, current_user.id)\n\n order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))\n for organizer in order.event.organizers:\n send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,\n order.id)\n if order.event.owner:\n send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,\n order.event.name, order.id)\n\n return True, 'Charge successful'\n else:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n # return the failure message from stripe.\n return False, charge.failure_message\n\n @staticmethod\n def charge_paypal_order_payment(order, paypal_payer_id, paypal_payment_id):\n \"\"\"\n Charge the user through paypal.\n :param order: Order for which to charge for.\n :param paypal_payment_id: payment_id\n :param paypal_payer_id: payer_id\n :return:\n \"\"\"\n\n # save the paypal payment_id with the order\n order.paypal_token = paypal_payment_id\n save_to_db(order)\n\n # create the transaction.\n status, error = PayPalPaymentsManager.execute_payment(paypal_payer_id, paypal_payment_id)\n\n if status:\n # successful transaction hence update the order details.\n order.paid_via = 'paypal'\n order.status = 'completed'\n order.transaction_id = paypal_payment_id\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n\n # create tickets\n create_pdf_tickets_for_holder(order)\n\n # send email and notifications\n send_email_to_attendees(order, order.user_id)\n send_notif_to_attendees(order, order.user_id)\n\n order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))\n for organizer in order.event.organizers:\n send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,\n order.id)\n if order.event.owner:\n send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,\n order.event.name, order.id)\n\n return True, 'Charge successful'\n else:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n # return the error message from Paypal\n return False, error\n", "path": "app/api/helpers/ticketing.py"}]}
| 2,355 | 93 |
gh_patches_debug_5905
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-746
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecation notice about urllib3[secure]
### Description
pyOpenSSL and urllib3[secure] are deprecated in the upcoming release (1.26.12)
https://github.com/urllib3/urllib3/issues/2680
Removed 'urllib3[secure] < 1.25 ; python_version < "3.5"' and 'urllib3[secure] < 2 ; python_version >= "3.5"'
Closes #746
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import os
5 import sys
6
7 from setuptools import Extension, find_packages, setup
8
9 with open("README.md", "r") as fp:
10 long_description = fp.read()
11
12 packages = find_packages("src")
13 if sys.version_info < (3, 6):
14 packages = [p for p in packages if not p.startswith("scout_apm.async_")]
15
16 compile_extensions = (
17 # Python 3+
18 sys.version_info >= (3,)
19 # Not Jython
20 and not sys.platform.startswith("java")
21 # Not PyPy
22 and "__pypy__" not in sys.builtin_module_names
23 # Not explicitly disabled
24 and (os.environ.get("SCOUT_DISABLE_EXTENSIONS", "") == "")
25 )
26 if compile_extensions:
27 ext_modules = [
28 Extension(
29 name=str("scout_apm.core._objtrace"),
30 sources=[str("src/scout_apm/core/_objtrace.c")],
31 optional=True,
32 )
33 ]
34 else:
35 ext_modules = []
36
37 setup(
38 name="scout_apm",
39 version="2.26.1",
40 description="Scout Application Performance Monitoring Agent",
41 long_description=long_description,
42 long_description_content_type="text/markdown",
43 url="https://github.com/scoutapp/scout_apm_python",
44 project_urls={
45 "Documentation": "https://docs.scoutapm.com/#python-agent",
46 "Changelog": (
47 "https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md"
48 ),
49 },
50 author="Scout",
51 author_email="[email protected]",
52 license="MIT",
53 zip_safe=False,
54 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
55 packages=packages,
56 package_dir={str(""): str("src")},
57 ext_modules=ext_modules,
58 entry_points={
59 "console_scripts": [
60 "core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
61 ]
62 },
63 install_requires=[
64 'asgiref ; python_version >= "3.5"',
65 'contextvars ; python_version >= "3.6" and python_version < "3.7"',
66 'importlib-metadata ; python_version < "3.8"',
67 "psutil>=5,<6",
68 'urllib3[secure] < 1.25 ; python_version < "3.5"',
69 'urllib3[secure] < 2 ; python_version >= "3.5"',
70 "wrapt>=1.10,<2.0",
71 ],
72 keywords=["apm", "performance monitoring", "development"],
73 classifiers=[
74 "Development Status :: 5 - Production/Stable",
75 "Framework :: Bottle",
76 "Framework :: Django",
77 "Framework :: Django :: 1.8",
78 "Framework :: Django :: 1.9",
79 "Framework :: Django :: 1.10",
80 "Framework :: Django :: 1.11",
81 "Framework :: Django :: 2.0",
82 "Framework :: Django :: 2.1",
83 "Framework :: Django :: 2.2",
84 "Framework :: Django :: 3.0",
85 "Framework :: Django :: 3.1",
86 "Framework :: Django :: 3.2",
87 "Framework :: Django :: 4.0",
88 "Framework :: Flask",
89 "Framework :: Pyramid",
90 "Intended Audience :: Developers",
91 "Topic :: System :: Monitoring",
92 "License :: OSI Approved :: MIT License",
93 "Operating System :: MacOS",
94 "Operating System :: POSIX",
95 "Operating System :: POSIX :: Linux",
96 "Programming Language :: Python :: 2",
97 "Programming Language :: Python :: 2.7",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3.4",
100 "Programming Language :: Python :: 3.5",
101 "Programming Language :: Python :: 3.6",
102 "Programming Language :: Python :: 3.7",
103 "Programming Language :: Python :: 3.8",
104 "Programming Language :: Python :: 3.9",
105 "Programming Language :: Python :: 3.10",
106 ],
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
'importlib-metadata ; python_version < "3.8"',
"psutil>=5,<6",
'urllib3[secure] < 1.25 ; python_version < "3.5"',
- 'urllib3[secure] < 2 ; python_version >= "3.5"',
+ 'urllib3 < 2 ; python_version >= "3.5"',
"wrapt>=1.10,<2.0",
],
keywords=["apm", "performance monitoring", "development"],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n- 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n+ 'urllib3 < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=[\"apm\", \"performance monitoring\", \"development\"],\n", "issue": "Deprecation notice about urllib3[secure]\n ### Description\r\n\r\n pyOpenSSL and urllib3[secure] are deprecated in the upcoming release (1.26.12)\r\n https://github.com/urllib3/urllib3/issues/2680\r\n Removed 'urllib3[secure] < 1.25 ; python_version < \"3.5\"' and 'urllib3[secure] < 2 ; python_version >= \"3.5\"'\r\n\r\nCloses #746\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.26.1\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'contextvars ; python_version >= \"3.6\" and python_version < \"3.7\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=[\"apm\", \"performance monitoring\", \"development\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.26.1\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'contextvars ; python_version >= \"3.6\" and python_version < \"3.7\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3 < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=[\"apm\", \"performance monitoring\", \"development\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}]}
| 1,511 | 146 |
gh_patches_debug_36454
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1493
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] If no checks to display in table - should state that
**Describe the bug**
Current status:

(picture taken from here: https://docs.deepchecks.com/dev/user-guide/tabular/auto_tutorials/plot_phishing_urls.html#understanding-the-checks-results)
**Expected behavior**
Used to have a text instead, recover that text
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/core/serialization/suite_result/widget.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module containing ipywidget serializer for the SuiteResult type."""
12 import typing as t
13 import warnings
14
15 import pandas as pd
16 from ipywidgets import HTML, Tab, VBox, Widget
17
18 from deepchecks.core import check_result as check_types
19 from deepchecks.core import suite
20 from deepchecks.core.serialization.abc import WidgetSerializer
21 from deepchecks.core.serialization.check_result.html import CheckResultSection
22 from deepchecks.core.serialization.check_result.widget import CheckResultSerializer as CheckResultWidgetSerializer
23 from deepchecks.core.serialization.common import Html as CommonHtml
24 from deepchecks.core.serialization.common import join, normalize_widget_style
25 from deepchecks.core.serialization.dataframe.widget import DataFrameSerializer
26
27 from . import html
28
29 __all__ = ['SuiteResultSerializer']
30
31
32 class SuiteResultSerializer(WidgetSerializer['suite.SuiteResult']):
33 """Serializes any SuiteResult instance into ipywidgets.Widget instance.
34
35 Parameters
36 ----------
37 value : SuiteResult
38 SuiteResult instance that needed to be serialized.
39 """
40
41 def __init__(self, value: 'suite.SuiteResult', **kwargs):
42 if not isinstance(value, suite.SuiteResult):
43 raise TypeError(
44 f'Expected "SuiteResult" but got "{type(value).__name__}"'
45 )
46 self.value = value
47 self._html_serializer = html.SuiteResultSerializer(self.value)
48
49 def serialize(
50 self,
51 output_id: t.Optional[str] = None,
52 **kwargs
53 ) -> VBox:
54 """Serialize a SuiteResult instance into ipywidgets.Widget instance.
55
56 Parameters
57 ----------
58 output_id : Optional[str], default None
59 unique output identifier that will be used to form anchor links
60
61 Returns
62 -------
63 ipywidgets.VBox
64 """
65 tab = Tab()
66 tab.set_title(0, 'Checks With Conditions')
67 tab.set_title(1, 'Checks Without Conditions')
68 tab.set_title(2, 'Checks Without Output')
69
70 tab.children = [
71 self.prepare_results_with_condition_and_display(
72 output_id=output_id, **kwargs
73 ),
74 self.prepare_results_without_condition(
75 output_id=output_id,
76 check_sections=['additional-output'],
77 **kwargs
78 ),
79 self.prepare_failures_list()
80 ]
81
82 style = '<style>.jupyter-widgets.widget-tab > .p-TabBar .p-TabBar-tab {flex: 0 1 auto}</style>'
83
84 return VBox(children=[
85 HTML(value=style),
86 self.prepare_summary(output_id=output_id, **kwargs),
87 tab
88 ])
89
90 def prepare_summary(
91 self,
92 output_id: t.Optional[str] = None,
93 **kwargs
94 ) -> HTML:
95 """Prepare summary widget."""
96 return HTML(value=self._html_serializer.prepare_summary(
97 output_id=output_id,
98 **kwargs
99 ))
100
101 def prepare_conditions_table(
102 self,
103 output_id: t.Optional[str] = None,
104 **kwargs
105 ) -> HTML:
106 """Prepare summary widget."""
107 return normalize_widget_style(HTML(value=self._html_serializer.prepare_conditions_table(
108 output_id=output_id,
109 include_check_name=True,
110 **kwargs
111 )))
112
113 def prepare_failures_list(self) -> HTML:
114 """Prepare failures list widget."""
115 return normalize_widget_style(HTML(
116 value=self._html_serializer.prepare_failures_list()
117 ))
118
119 def prepare_results_without_condition(
120 self,
121 output_id: t.Optional[str] = None,
122 check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,
123 **kwargs
124 ) -> VBox:
125 """Prepare widget that shows results without conditions.
126
127 Parameters
128 ----------
129 output_id : Optional[str], default None
130 unique output identifier that will be used to form anchor links
131 check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None
132 sequence of check result sections to include into the output,
133 in case of 'None' all sections will be included
134
135 Returns
136 -------
137 ipywidgets.VBox
138 """
139 results = t.cast(
140 t.List[check_types.CheckResult],
141 self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)
142 )
143 results_without_conditions = [
144 CheckResultWidgetSerializer(it).serialize(
145 output_id=output_id,
146 include=check_sections,
147 **kwargs
148 )
149 for it in results
150 ]
151 return normalize_widget_style(VBox(children=[
152 HTML(value='<h2>Check Without Conditions Output</h2>'),
153 self.prepare_navigation_for_unconditioned_results(output_id),
154 HTML(value=CommonHtml.light_hr),
155 *join(results_without_conditions, HTML(value=CommonHtml.light_hr))
156 ]))
157
158 def prepare_results_with_condition_and_display(
159 self,
160 output_id: t.Optional[str] = None,
161 check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,
162 **kwargs
163 ) -> VBox:
164 """Prepare widget that shows results with conditions and display.
165
166 Parameters
167 ----------
168 output_id : Optional[str], default None
169 unique output identifier that will be used to form anchor links
170 check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None
171 sequence of check result sections to include into the output,
172 in case of 'None' all sections will be included
173
174 Returns
175 -------
176 ipywidgets.VBox
177 """
178 results = t.cast(
179 t.List[check_types.CheckResult],
180 self.value.select_results(self.value.results_with_conditions & self.value.results_with_display)
181 )
182 results_with_condition_and_display = [
183 CheckResultWidgetSerializer(it).serialize(
184 output_id=output_id,
185 include=check_sections,
186 **kwargs
187 )
188 for it in results
189 ]
190 output = (
191 join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))
192 if len(results_with_condition_and_display) > 0
193 else (HTML(value='<p>Nothing to show</p>'),)
194 )
195 return normalize_widget_style(VBox(children=[
196 self.prepare_conditions_table(output_id=output_id),
197 HTML(value='<h2>Check With Conditions Output</h2>'),
198 *output
199 ]))
200
201 def prepare_navigation_for_unconditioned_results(
202 self,
203 output_id: t.Optional[str] = None,
204 **kwargs
205 ) -> Widget:
206 """Prepare navigation widget for the tab with unconditioned_results.
207
208 Parameters
209 ----------
210 output_id : Optional[str], default None
211 unique output identifier that will be used to form anchor links
212
213 Returns
214 -------
215 ipywidgets.Widget
216 """
217 data = []
218
219 results = t.cast(
220 t.List[check_types.CheckResult],
221 self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)
222 )
223
224 for check_result in results:
225 check_header = check_result.get_header()
226
227 if output_id:
228 href = f'href="#{check_result.get_check_id(output_id)}"'
229 header = f'<a {href}>{check_header}</a>'
230 else:
231 header = check_header
232
233 summary = check_result.get_metadata(with_doc_link=True)['summary']
234 data.append([header, summary])
235
236 df = pd.DataFrame(
237 data=data,
238 columns=['Check', 'Summary']
239 )
240
241 with warnings.catch_warnings():
242 warnings.simplefilter(action='ignore', category=FutureWarning)
243 return DataFrameSerializer(df.style.hide_index()).serialize()
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deepchecks/core/serialization/suite_result/widget.py b/deepchecks/core/serialization/suite_result/widget.py
--- a/deepchecks/core/serialization/suite_result/widget.py
+++ b/deepchecks/core/serialization/suite_result/widget.py
@@ -113,7 +113,7 @@
def prepare_failures_list(self) -> HTML:
"""Prepare failures list widget."""
return normalize_widget_style(HTML(
- value=self._html_serializer.prepare_failures_list()
+ value=self._html_serializer.prepare_failures_list() or '<p>No outputs to show.</p>'
))
def prepare_results_without_condition(
@@ -148,12 +148,19 @@
)
for it in results
]
- return normalize_widget_style(VBox(children=[
- HTML(value='<h2>Check Without Conditions Output</h2>'),
- self.prepare_navigation_for_unconditioned_results(output_id),
- HTML(value=CommonHtml.light_hr),
- *join(results_without_conditions, HTML(value=CommonHtml.light_hr))
- ]))
+ if len(results_without_conditions) > 0:
+ children = (
+ HTML(value='<h2>Check Without Conditions Output</h2>'),
+ self.prepare_navigation_for_unconditioned_results(output_id),
+ HTML(value=CommonHtml.light_hr),
+ *join(results_without_conditions, HTML(value=CommonHtml.light_hr))
+ )
+ else:
+ children = (
+ HTML(value='<p>No outputs to show.</p>'),
+ )
+
+ return normalize_widget_style(VBox(children=children))
def prepare_results_with_condition_and_display(
self,
@@ -187,16 +194,18 @@
)
for it in results
]
- output = (
- join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))
- if len(results_with_condition_and_display) > 0
- else (HTML(value='<p>Nothing to show</p>'),)
- )
- return normalize_widget_style(VBox(children=[
- self.prepare_conditions_table(output_id=output_id),
- HTML(value='<h2>Check With Conditions Output</h2>'),
- *output
- ]))
+
+ if len(results_with_condition_and_display) > 0:
+ children = (
+ self.prepare_conditions_table(output_id=output_id),
+ HTML(value='<h2>Check With Conditions Output</h2>'),
+ *join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))
+ )
+ else:
+ children = (
+ HTML(value='<p>No outputs to show.</p>'),
+ )
+ return normalize_widget_style(VBox(children=children))
def prepare_navigation_for_unconditioned_results(
self,
|
{"golden_diff": "diff --git a/deepchecks/core/serialization/suite_result/widget.py b/deepchecks/core/serialization/suite_result/widget.py\n--- a/deepchecks/core/serialization/suite_result/widget.py\n+++ b/deepchecks/core/serialization/suite_result/widget.py\n@@ -113,7 +113,7 @@\n def prepare_failures_list(self) -> HTML:\n \"\"\"Prepare failures list widget.\"\"\"\n return normalize_widget_style(HTML(\n- value=self._html_serializer.prepare_failures_list()\n+ value=self._html_serializer.prepare_failures_list() or '<p>No outputs to show.</p>'\n ))\n \n def prepare_results_without_condition(\n@@ -148,12 +148,19 @@\n )\n for it in results\n ]\n- return normalize_widget_style(VBox(children=[\n- HTML(value='<h2>Check Without Conditions Output</h2>'),\n- self.prepare_navigation_for_unconditioned_results(output_id),\n- HTML(value=CommonHtml.light_hr),\n- *join(results_without_conditions, HTML(value=CommonHtml.light_hr))\n- ]))\n+ if len(results_without_conditions) > 0:\n+ children = (\n+ HTML(value='<h2>Check Without Conditions Output</h2>'),\n+ self.prepare_navigation_for_unconditioned_results(output_id),\n+ HTML(value=CommonHtml.light_hr),\n+ *join(results_without_conditions, HTML(value=CommonHtml.light_hr))\n+ )\n+ else:\n+ children = (\n+ HTML(value='<p>No outputs to show.</p>'),\n+ )\n+\n+ return normalize_widget_style(VBox(children=children))\n \n def prepare_results_with_condition_and_display(\n self,\n@@ -187,16 +194,18 @@\n )\n for it in results\n ]\n- output = (\n- join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))\n- if len(results_with_condition_and_display) > 0\n- else (HTML(value='<p>Nothing to show</p>'),)\n- )\n- return normalize_widget_style(VBox(children=[\n- self.prepare_conditions_table(output_id=output_id),\n- HTML(value='<h2>Check With Conditions Output</h2>'),\n- *output\n- ]))\n+\n+ if len(results_with_condition_and_display) > 0:\n+ children = (\n+ self.prepare_conditions_table(output_id=output_id),\n+ HTML(value='<h2>Check With Conditions Output</h2>'),\n+ *join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))\n+ )\n+ else:\n+ children = (\n+ HTML(value='<p>No outputs to show.</p>'),\n+ )\n+ return normalize_widget_style(VBox(children=children))\n \n def prepare_navigation_for_unconditioned_results(\n self,\n", "issue": "[BUG] If no checks to display in table - should state that\n**Describe the bug**\r\nCurrent status:\r\n\r\n(picture taken from here: https://docs.deepchecks.com/dev/user-guide/tabular/auto_tutorials/plot_phishing_urls.html#understanding-the-checks-results)\r\n\r\n**Expected behavior**\r\nUsed to have a text instead, recover that text\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing ipywidget serializer for the SuiteResult type.\"\"\"\nimport typing as t\nimport warnings\n\nimport pandas as pd\nfrom ipywidgets import HTML, Tab, VBox, Widget\n\nfrom deepchecks.core import check_result as check_types\nfrom deepchecks.core import suite\nfrom deepchecks.core.serialization.abc import WidgetSerializer\nfrom deepchecks.core.serialization.check_result.html import CheckResultSection\nfrom deepchecks.core.serialization.check_result.widget import CheckResultSerializer as CheckResultWidgetSerializer\nfrom deepchecks.core.serialization.common import Html as CommonHtml\nfrom deepchecks.core.serialization.common import join, normalize_widget_style\nfrom deepchecks.core.serialization.dataframe.widget import DataFrameSerializer\n\nfrom . import html\n\n__all__ = ['SuiteResultSerializer']\n\n\nclass SuiteResultSerializer(WidgetSerializer['suite.SuiteResult']):\n \"\"\"Serializes any SuiteResult instance into ipywidgets.Widget instance.\n\n Parameters\n ----------\n value : SuiteResult\n SuiteResult instance that needed to be serialized.\n \"\"\"\n\n def __init__(self, value: 'suite.SuiteResult', **kwargs):\n if not isinstance(value, suite.SuiteResult):\n raise TypeError(\n f'Expected \"SuiteResult\" but got \"{type(value).__name__}\"'\n )\n self.value = value\n self._html_serializer = html.SuiteResultSerializer(self.value)\n\n def serialize(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Serialize a SuiteResult instance into ipywidgets.Widget instance.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n tab = Tab()\n tab.set_title(0, 'Checks With Conditions')\n tab.set_title(1, 'Checks Without Conditions')\n tab.set_title(2, 'Checks Without Output')\n\n tab.children = [\n self.prepare_results_with_condition_and_display(\n output_id=output_id, **kwargs\n ),\n self.prepare_results_without_condition(\n output_id=output_id,\n check_sections=['additional-output'],\n **kwargs\n ),\n self.prepare_failures_list()\n ]\n\n style = '<style>.jupyter-widgets.widget-tab > .p-TabBar .p-TabBar-tab {flex: 0 1 auto}</style>'\n\n return VBox(children=[\n HTML(value=style),\n self.prepare_summary(output_id=output_id, **kwargs),\n tab\n ])\n\n def prepare_summary(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> HTML:\n \"\"\"Prepare summary widget.\"\"\"\n return HTML(value=self._html_serializer.prepare_summary(\n output_id=output_id,\n **kwargs\n ))\n\n def prepare_conditions_table(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> HTML:\n \"\"\"Prepare summary widget.\"\"\"\n return normalize_widget_style(HTML(value=self._html_serializer.prepare_conditions_table(\n output_id=output_id,\n include_check_name=True,\n **kwargs\n )))\n\n def prepare_failures_list(self) -> HTML:\n \"\"\"Prepare failures list widget.\"\"\"\n return normalize_widget_style(HTML(\n value=self._html_serializer.prepare_failures_list()\n ))\n\n def prepare_results_without_condition(\n self,\n output_id: t.Optional[str] = None,\n check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Prepare widget that shows results without conditions.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None\n sequence of check result sections to include into the output,\n in case of 'None' all sections will be included\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)\n )\n results_without_conditions = [\n CheckResultWidgetSerializer(it).serialize(\n output_id=output_id,\n include=check_sections,\n **kwargs\n )\n for it in results\n ]\n return normalize_widget_style(VBox(children=[\n HTML(value='<h2>Check Without Conditions Output</h2>'),\n self.prepare_navigation_for_unconditioned_results(output_id),\n HTML(value=CommonHtml.light_hr),\n *join(results_without_conditions, HTML(value=CommonHtml.light_hr))\n ]))\n\n def prepare_results_with_condition_and_display(\n self,\n output_id: t.Optional[str] = None,\n check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Prepare widget that shows results with conditions and display.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None\n sequence of check result sections to include into the output,\n in case of 'None' all sections will be included\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_with_conditions & self.value.results_with_display)\n )\n results_with_condition_and_display = [\n CheckResultWidgetSerializer(it).serialize(\n output_id=output_id,\n include=check_sections,\n **kwargs\n )\n for it in results\n ]\n output = (\n join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))\n if len(results_with_condition_and_display) > 0\n else (HTML(value='<p>Nothing to show</p>'),)\n )\n return normalize_widget_style(VBox(children=[\n self.prepare_conditions_table(output_id=output_id),\n HTML(value='<h2>Check With Conditions Output</h2>'),\n *output\n ]))\n\n def prepare_navigation_for_unconditioned_results(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> Widget:\n \"\"\"Prepare navigation widget for the tab with unconditioned_results.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n\n Returns\n -------\n ipywidgets.Widget\n \"\"\"\n data = []\n\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)\n )\n\n for check_result in results:\n check_header = check_result.get_header()\n\n if output_id:\n href = f'href=\"#{check_result.get_check_id(output_id)}\"'\n header = f'<a {href}>{check_header}</a>'\n else:\n header = check_header\n\n summary = check_result.get_metadata(with_doc_link=True)['summary']\n data.append([header, summary])\n\n df = pd.DataFrame(\n data=data,\n columns=['Check', 'Summary']\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n return DataFrameSerializer(df.style.hide_index()).serialize()\n", "path": "deepchecks/core/serialization/suite_result/widget.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing ipywidget serializer for the SuiteResult type.\"\"\"\nimport typing as t\nimport warnings\n\nimport pandas as pd\nfrom ipywidgets import HTML, Tab, VBox, Widget\n\nfrom deepchecks.core import check_result as check_types\nfrom deepchecks.core import suite\nfrom deepchecks.core.serialization.abc import WidgetSerializer\nfrom deepchecks.core.serialization.check_result.html import CheckResultSection\nfrom deepchecks.core.serialization.check_result.widget import CheckResultSerializer as CheckResultWidgetSerializer\nfrom deepchecks.core.serialization.common import Html as CommonHtml\nfrom deepchecks.core.serialization.common import join, normalize_widget_style\nfrom deepchecks.core.serialization.dataframe.widget import DataFrameSerializer\n\nfrom . import html\n\n__all__ = ['SuiteResultSerializer']\n\n\nclass SuiteResultSerializer(WidgetSerializer['suite.SuiteResult']):\n \"\"\"Serializes any SuiteResult instance into ipywidgets.Widget instance.\n\n Parameters\n ----------\n value : SuiteResult\n SuiteResult instance that needed to be serialized.\n \"\"\"\n\n def __init__(self, value: 'suite.SuiteResult', **kwargs):\n if not isinstance(value, suite.SuiteResult):\n raise TypeError(\n f'Expected \"SuiteResult\" but got \"{type(value).__name__}\"'\n )\n self.value = value\n self._html_serializer = html.SuiteResultSerializer(self.value)\n\n def serialize(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Serialize a SuiteResult instance into ipywidgets.Widget instance.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n tab = Tab()\n tab.set_title(0, 'Checks With Conditions')\n tab.set_title(1, 'Checks Without Conditions')\n tab.set_title(2, 'Checks Without Output')\n\n tab.children = [\n self.prepare_results_with_condition_and_display(\n output_id=output_id, **kwargs\n ),\n self.prepare_results_without_condition(\n output_id=output_id,\n check_sections=['additional-output'],\n **kwargs\n ),\n self.prepare_failures_list()\n ]\n\n style = '<style>.jupyter-widgets.widget-tab > .p-TabBar .p-TabBar-tab {flex: 0 1 auto}</style>'\n\n return VBox(children=[\n HTML(value=style),\n self.prepare_summary(output_id=output_id, **kwargs),\n tab\n ])\n\n def prepare_summary(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> HTML:\n \"\"\"Prepare summary widget.\"\"\"\n return HTML(value=self._html_serializer.prepare_summary(\n output_id=output_id,\n **kwargs\n ))\n\n def prepare_conditions_table(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> HTML:\n \"\"\"Prepare summary widget.\"\"\"\n return normalize_widget_style(HTML(value=self._html_serializer.prepare_conditions_table(\n output_id=output_id,\n include_check_name=True,\n **kwargs\n )))\n\n def prepare_failures_list(self) -> HTML:\n \"\"\"Prepare failures list widget.\"\"\"\n return normalize_widget_style(HTML(\n value=self._html_serializer.prepare_failures_list() or '<p>No outputs to show.</p>'\n ))\n\n def prepare_results_without_condition(\n self,\n output_id: t.Optional[str] = None,\n check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Prepare widget that shows results without conditions.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None\n sequence of check result sections to include into the output,\n in case of 'None' all sections will be included\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)\n )\n results_without_conditions = [\n CheckResultWidgetSerializer(it).serialize(\n output_id=output_id,\n include=check_sections,\n **kwargs\n )\n for it in results\n ]\n if len(results_without_conditions) > 0:\n children = (\n HTML(value='<h2>Check Without Conditions Output</h2>'),\n self.prepare_navigation_for_unconditioned_results(output_id),\n HTML(value=CommonHtml.light_hr),\n *join(results_without_conditions, HTML(value=CommonHtml.light_hr))\n )\n else:\n children = (\n HTML(value='<p>No outputs to show.</p>'),\n )\n\n return normalize_widget_style(VBox(children=children))\n\n def prepare_results_with_condition_and_display(\n self,\n output_id: t.Optional[str] = None,\n check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Prepare widget that shows results with conditions and display.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None\n sequence of check result sections to include into the output,\n in case of 'None' all sections will be included\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_with_conditions & self.value.results_with_display)\n )\n results_with_condition_and_display = [\n CheckResultWidgetSerializer(it).serialize(\n output_id=output_id,\n include=check_sections,\n **kwargs\n )\n for it in results\n ]\n\n if len(results_with_condition_and_display) > 0:\n children = (\n self.prepare_conditions_table(output_id=output_id),\n HTML(value='<h2>Check With Conditions Output</h2>'),\n *join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))\n )\n else:\n children = (\n HTML(value='<p>No outputs to show.</p>'),\n )\n return normalize_widget_style(VBox(children=children))\n\n def prepare_navigation_for_unconditioned_results(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> Widget:\n \"\"\"Prepare navigation widget for the tab with unconditioned_results.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n\n Returns\n -------\n ipywidgets.Widget\n \"\"\"\n data = []\n\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)\n )\n\n for check_result in results:\n check_header = check_result.get_header()\n\n if output_id:\n href = f'href=\"#{check_result.get_check_id(output_id)}\"'\n header = f'<a {href}>{check_header}</a>'\n else:\n header = check_header\n\n summary = check_result.get_metadata(with_doc_link=True)['summary']\n data.append([header, summary])\n\n df = pd.DataFrame(\n data=data,\n columns=['Check', 'Summary']\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n return DataFrameSerializer(df.style.hide_index()).serialize()\n", "path": "deepchecks/core/serialization/suite_result/widget.py"}]}
| 2,701 | 614 |
gh_patches_debug_31075
|
rasdani/github-patches
|
git_diff
|
databricks__koalas-104
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix pypi description
See https://pypi.org/project/databricks-koalas/
It just pulls in our entire README in markdown format.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 #
4 # Copyright (C) 2019 Databricks, Inc.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19 from setuptools import setup
20
21
22 install_requires = [
23 'pandas>=0.23',
24 'decorator',
25 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
26 ]
27
28 setup(
29 name='databricks-koalas',
30 version='0.0.6',
31 packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',
32 'databricks.koalas.missing'],
33 extras_require={
34 'spark': ['pyspark>=2.4.0'],
35 },
36 install_requires=install_requires,
37 author="Timothy Hunter",
38 author_email="[email protected]",
39 license='http://www.apache.org/licenses/LICENSE-2.0',
40 long_description=open('README.md').read(),
41 )
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,24 +18,46 @@
from setuptools import setup
+DESCRIPTION = "Pandas DataFrame API on Apache Spark"
-install_requires = [
- 'pandas>=0.23',
- 'decorator',
- 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
-]
+LONG_DESCRIPTION = """
+Koalas makes data scientists more productive when interacting with big data,
+by augmenting Apache Spark's Python DataFrame API to be compatible with
+Pandas'.
+
+Pandas is the de facto standard (single-node) dataframe implementation in
+Python, while Spark is the de facto standard for big data processing.
+With this package, data scientists can:
+
+- Be immediately productive with Spark, with no learning curve, if one
+ is already familiar with Pandas.
+- Have a single codebase that works both with Pandas (tests, smaller datasets)
+ and with Spark (distributed datasets).
+"""
setup(
- name='databricks-koalas',
+ name='koalas',
version='0.0.6',
packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',
'databricks.koalas.missing'],
extras_require={
'spark': ['pyspark>=2.4.0'],
},
- install_requires=install_requires,
- author="Timothy Hunter",
- author_email="[email protected]",
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
+ install_requires=[
+ 'pandas>=0.23',
+ 'decorator',
+ 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
+ ],
+ maintainer="Databricks",
+ maintainer_email="[email protected]",
license='http://www.apache.org/licenses/LICENSE-2.0',
- long_description=open('README.md').read(),
+ url="https://github.com/databricks/spark-pandas",
+ project_urls={
+ 'Bug Tracker': 'https://github.com/databricks/spark-pandas/issues',
+ # 'Documentation': '',
+ 'Source Code': 'https://github.com/databricks/spark-pandas'
+ },
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,24 +18,46 @@\n \n from setuptools import setup\n \n+DESCRIPTION = \"Pandas DataFrame API on Apache Spark\"\n \n-install_requires = [\n- 'pandas>=0.23',\n- 'decorator',\n- 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n-]\n+LONG_DESCRIPTION = \"\"\"\n+Koalas makes data scientists more productive when interacting with big data,\n+by augmenting Apache Spark's Python DataFrame API to be compatible with\n+Pandas'.\n+\n+Pandas is the de facto standard (single-node) dataframe implementation in\n+Python, while Spark is the de facto standard for big data processing.\n+With this package, data scientists can:\n+\n+- Be immediately productive with Spark, with no learning curve, if one\n+ is already familiar with Pandas.\n+- Have a single codebase that works both with Pandas (tests, smaller datasets)\n+ and with Spark (distributed datasets).\n+\"\"\"\n \n setup(\n- name='databricks-koalas',\n+ name='koalas',\n version='0.0.6',\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',\n 'databricks.koalas.missing'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n },\n- install_requires=install_requires,\n- author=\"Timothy Hunter\",\n- author_email=\"[email protected]\",\n+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',\n+ install_requires=[\n+ 'pandas>=0.23',\n+ 'decorator',\n+ 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n+ ],\n+ maintainer=\"Databricks\",\n+ maintainer_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n- long_description=open('README.md').read(),\n+ url=\"https://github.com/databricks/spark-pandas\",\n+ project_urls={\n+ 'Bug Tracker': 'https://github.com/databricks/spark-pandas/issues',\n+ # 'Documentation': '',\n+ 'Source Code': 'https://github.com/databricks/spark-pandas'\n+ },\n+ description=DESCRIPTION,\n+ long_description=LONG_DESCRIPTION,\n )\n", "issue": "Fix pypi description\nSee https://pypi.org/project/databricks-koalas/\r\n\r\nIt just pulls in our entire README in markdown format.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom setuptools import setup\n\n\ninstall_requires = [\n 'pandas>=0.23',\n 'decorator',\n 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n]\n\nsetup(\n name='databricks-koalas',\n version='0.0.6',\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',\n 'databricks.koalas.missing'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n },\n install_requires=install_requires,\n author=\"Timothy Hunter\",\n author_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n long_description=open('README.md').read(),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom setuptools import setup\n\nDESCRIPTION = \"Pandas DataFrame API on Apache Spark\"\n\nLONG_DESCRIPTION = \"\"\"\nKoalas makes data scientists more productive when interacting with big data,\nby augmenting Apache Spark's Python DataFrame API to be compatible with\nPandas'.\n\nPandas is the de facto standard (single-node) dataframe implementation in\nPython, while Spark is the de facto standard for big data processing.\nWith this package, data scientists can:\n\n- Be immediately productive with Spark, with no learning curve, if one\n is already familiar with Pandas.\n- Have a single codebase that works both with Pandas (tests, smaller datasets)\n and with Spark (distributed datasets).\n\"\"\"\n\nsetup(\n name='koalas',\n version='0.0.6',\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',\n 'databricks.koalas.missing'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n },\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',\n install_requires=[\n 'pandas>=0.23',\n 'decorator',\n 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n ],\n maintainer=\"Databricks\",\n maintainer_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n url=\"https://github.com/databricks/spark-pandas\",\n project_urls={\n 'Bug Tracker': 'https://github.com/databricks/spark-pandas/issues',\n # 'Documentation': '',\n 'Source Code': 'https://github.com/databricks/spark-pandas'\n },\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n)\n", "path": "setup.py"}]}
| 705 | 597 |
gh_patches_debug_31276
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-3857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Global tasks like repair or reclaim should probably not run in parallel
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
**Describe the bug**
Global tasks that involve all repos like repair or reclaim should run one at a time, like orphan cleanup https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/orphans.py#L29
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/views/repair.py`
Content:
```
1 from drf_spectacular.utils import extend_schema
2 from rest_framework.views import APIView
3
4 from pulpcore.app.response import OperationPostponedResponse
5 from pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer
6 from pulpcore.app.tasks import repair_all_artifacts
7 from pulpcore.tasking.tasks import dispatch
8
9
10 class RepairView(APIView):
11 @extend_schema(
12 description=(
13 "Trigger an asynchronous task that checks for missing "
14 "or corrupted artifacts, and attempts to redownload them."
15 ),
16 summary="Repair Artifact Storage",
17 request=RepairSerializer,
18 responses={202: AsyncOperationResponseSerializer},
19 )
20 def post(self, request):
21 """
22 Repair artifacts.
23 """
24 serializer = RepairSerializer(data=request.data)
25 serializer.is_valid(raise_exception=True)
26
27 verify_checksums = serializer.validated_data["verify_checksums"]
28
29 task = dispatch(repair_all_artifacts, args=[verify_checksums])
30
31 return OperationPostponedResponse(task, request)
32
```
Path: `pulpcore/app/viewsets/reclaim.py`
Content:
```
1 from drf_spectacular.utils import extend_schema
2 from rest_framework.viewsets import ViewSet
3
4 from pulpcore.app.response import OperationPostponedResponse
5 from pulpcore.app.serializers import AsyncOperationResponseSerializer, ReclaimSpaceSerializer
6 from pulpcore.app.tasks import reclaim_space
7 from pulpcore.tasking.tasks import dispatch
8
9
10 class ReclaimSpaceViewSet(ViewSet):
11 """
12 Viewset for reclaim disk space endpoint.
13 """
14
15 serializer_class = ReclaimSpaceSerializer
16
17 @extend_schema(
18 description="Trigger an asynchronous space reclaim operation.",
19 responses={202: AsyncOperationResponseSerializer},
20 )
21 def reclaim(self, request):
22 """
23 Triggers an asynchronous space reclaim operation.
24 """
25 serializer = ReclaimSpaceSerializer(data=request.data)
26 serializer.is_valid(raise_exception=True)
27
28 repos = serializer.validated_data.get("repo_hrefs", [])
29 keeplist = serializer.validated_data.get("repo_versions_keeplist", [])
30 reclaim_repo_pks = []
31 keeplist_rv_pks = []
32 for repo in repos:
33 reclaim_repo_pks.append(repo.pk)
34 for rv in keeplist:
35 repos.append(rv.repository)
36 keeplist_rv_pks.append(rv.pk)
37
38 task = dispatch(
39 reclaim_space,
40 shared_resources=repos,
41 kwargs={
42 "repo_pks": reclaim_repo_pks,
43 "keeplist_rv_pks": keeplist_rv_pks,
44 },
45 )
46
47 return OperationPostponedResponse(task, request)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py
--- a/pulpcore/app/views/repair.py
+++ b/pulpcore/app/views/repair.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.views import APIView
from pulpcore.app.response import OperationPostponedResponse
@@ -26,6 +27,9 @@
verify_checksums = serializer.validated_data["verify_checksums"]
- task = dispatch(repair_all_artifacts, args=[verify_checksums])
+ uri = "/api/v3/repair/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/reclaim.py b/pulpcore/app/viewsets/reclaim.py
--- a/pulpcore/app/viewsets/reclaim.py
+++ b/pulpcore/app/viewsets/reclaim.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.viewsets import ViewSet
from pulpcore.app.response import OperationPostponedResponse
@@ -35,8 +36,17 @@
repos.append(rv.repository)
keeplist_rv_pks.append(rv.pk)
+ if repos:
+ exclusive_resources = None
+ else:
+ uri = "/api/v3/repositories/reclaim_space/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ exclusive_resources = [uri]
+
task = dispatch(
reclaim_space,
+ exclusive_resources=exclusive_resources,
shared_resources=repos,
kwargs={
"repo_pks": reclaim_repo_pks,
|
{"golden_diff": "diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py\n--- a/pulpcore/app/views/repair.py\n+++ b/pulpcore/app/views/repair.py\n@@ -1,4 +1,5 @@\n from drf_spectacular.utils import extend_schema\n+from django.conf import settings\n from rest_framework.views import APIView\n \n from pulpcore.app.response import OperationPostponedResponse\n@@ -26,6 +27,9 @@\n \n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n \n- task = dispatch(repair_all_artifacts, args=[verify_checksums])\n+ uri = \"/api/v3/repair/\"\n+ if settings.DOMAIN_ENABLED:\n+ uri = f\"/{request.pulp_domain.name}{uri}\"\n+ task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])\n \n return OperationPostponedResponse(task, request)\ndiff --git a/pulpcore/app/viewsets/reclaim.py b/pulpcore/app/viewsets/reclaim.py\n--- a/pulpcore/app/viewsets/reclaim.py\n+++ b/pulpcore/app/viewsets/reclaim.py\n@@ -1,4 +1,5 @@\n from drf_spectacular.utils import extend_schema\n+from django.conf import settings\n from rest_framework.viewsets import ViewSet\n \n from pulpcore.app.response import OperationPostponedResponse\n@@ -35,8 +36,17 @@\n repos.append(rv.repository)\n keeplist_rv_pks.append(rv.pk)\n \n+ if repos:\n+ exclusive_resources = None\n+ else:\n+ uri = \"/api/v3/repositories/reclaim_space/\"\n+ if settings.DOMAIN_ENABLED:\n+ uri = f\"/{request.pulp_domain.name}{uri}\"\n+ exclusive_resources = [uri]\n+\n task = dispatch(\n reclaim_space,\n+ exclusive_resources=exclusive_resources,\n shared_resources=repos,\n kwargs={\n \"repo_pks\": reclaim_repo_pks,\n", "issue": "Global tasks like repair or reclaim should probably not run in parallel\n**Version**\r\nPlease provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.\r\n\r\n**Describe the bug**\r\nGlobal tasks that involve all repos like repair or reclaim should run one at a time, like orphan cleanup https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/orphans.py#L29\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer\nfrom pulpcore.app.tasks import repair_all_artifacts\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass RepairView(APIView):\n @extend_schema(\n description=(\n \"Trigger an asynchronous task that checks for missing \"\n \"or corrupted artifacts, and attempts to redownload them.\"\n ),\n summary=\"Repair Artifact Storage\",\n request=RepairSerializer,\n responses={202: AsyncOperationResponseSerializer},\n )\n def post(self, request):\n \"\"\"\n Repair artifacts.\n \"\"\"\n serializer = RepairSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n\n task = dispatch(repair_all_artifacts, args=[verify_checksums])\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/views/repair.py"}, {"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.viewsets import ViewSet\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, ReclaimSpaceSerializer\nfrom pulpcore.app.tasks import reclaim_space\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass ReclaimSpaceViewSet(ViewSet):\n \"\"\"\n Viewset for reclaim disk space endpoint.\n \"\"\"\n\n serializer_class = ReclaimSpaceSerializer\n\n @extend_schema(\n description=\"Trigger an asynchronous space reclaim operation.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def reclaim(self, request):\n \"\"\"\n Triggers an asynchronous space reclaim operation.\n \"\"\"\n serializer = ReclaimSpaceSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n repos = serializer.validated_data.get(\"repo_hrefs\", [])\n keeplist = serializer.validated_data.get(\"repo_versions_keeplist\", [])\n reclaim_repo_pks = []\n keeplist_rv_pks = []\n for repo in repos:\n reclaim_repo_pks.append(repo.pk)\n for rv in keeplist:\n repos.append(rv.repository)\n keeplist_rv_pks.append(rv.pk)\n\n task = dispatch(\n reclaim_space,\n shared_resources=repos,\n kwargs={\n \"repo_pks\": reclaim_repo_pks,\n \"keeplist_rv_pks\": keeplist_rv_pks,\n },\n )\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/viewsets/reclaim.py"}], "after_files": [{"content": "from drf_spectacular.utils import extend_schema\nfrom django.conf import settings\nfrom rest_framework.views import APIView\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer\nfrom pulpcore.app.tasks import repair_all_artifacts\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass RepairView(APIView):\n @extend_schema(\n description=(\n \"Trigger an asynchronous task that checks for missing \"\n \"or corrupted artifacts, and attempts to redownload them.\"\n ),\n summary=\"Repair Artifact Storage\",\n request=RepairSerializer,\n responses={202: AsyncOperationResponseSerializer},\n )\n def post(self, request):\n \"\"\"\n Repair artifacts.\n \"\"\"\n serializer = RepairSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n\n uri = \"/api/v3/repair/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.pulp_domain.name}{uri}\"\n task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/views/repair.py"}, {"content": "from drf_spectacular.utils import extend_schema\nfrom django.conf import settings\nfrom rest_framework.viewsets import ViewSet\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, ReclaimSpaceSerializer\nfrom pulpcore.app.tasks import reclaim_space\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass ReclaimSpaceViewSet(ViewSet):\n \"\"\"\n Viewset for reclaim disk space endpoint.\n \"\"\"\n\n serializer_class = ReclaimSpaceSerializer\n\n @extend_schema(\n description=\"Trigger an asynchronous space reclaim operation.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def reclaim(self, request):\n \"\"\"\n Triggers an asynchronous space reclaim operation.\n \"\"\"\n serializer = ReclaimSpaceSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n repos = serializer.validated_data.get(\"repo_hrefs\", [])\n keeplist = serializer.validated_data.get(\"repo_versions_keeplist\", [])\n reclaim_repo_pks = []\n keeplist_rv_pks = []\n for repo in repos:\n reclaim_repo_pks.append(repo.pk)\n for rv in keeplist:\n repos.append(rv.repository)\n keeplist_rv_pks.append(rv.pk)\n\n if repos:\n exclusive_resources = None\n else:\n uri = \"/api/v3/repositories/reclaim_space/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.pulp_domain.name}{uri}\"\n exclusive_resources = [uri]\n\n task = dispatch(\n reclaim_space,\n exclusive_resources=exclusive_resources,\n shared_resources=repos,\n kwargs={\n \"repo_pks\": reclaim_repo_pks,\n \"keeplist_rv_pks\": keeplist_rv_pks,\n },\n )\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/viewsets/reclaim.py"}]}
| 1,123 | 433 |
gh_patches_debug_15423
|
rasdani/github-patches
|
git_diff
|
deis__deis-5006
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Unable to start application with ' (quote) in env variable
Our customer's bug:
**STR**
``` bash
git clone https://github.com/deis/example-python-flask && cd example-python-flask
deis create
git push deis master
echo "MAILER_SUBJECT=ISP cron can't create booklet" > .env
deis config:push
```
**Actual result**
Creating config... .o.
Client return 504 error, after ~30 min(balancer config)
_Controller log_
``` log
Dec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO Tagging Docker image 10.21.12.123:5000/forecast:git-1c89b116 as forecast:git-1c89b116
Dec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO Building Docker image 10.21.12.123:5000/forecast:v4
Dec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO forecast: 500 Server Error: Internal Server Error ("Syntax error - can't find = in "cron". Must be of the form: name=value")
Dec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO [forecast]: 500 Server Error: Internal Server Error ("Syntax error - can't find = in "cron". Must be of the form: name=value")
```
_deis info_
``` bash
=== forecast Application
updated: 2015-12-14T02:57:36UTC
uuid: 01eacd72-1950-4ec1-a301-4212b59572b6
created: 2015-12-14T02:50:35UTC
url: forecast.web.2gis.local
owner: v.reyder
id: forecast
=== forecast Processes
--- web:
web.1 up (v3)
web.1 down (v4)
=== forecast Domains
```
_Actually config is set_
``` bash
$ deis config
=== forecast Config
MAILER_SUBJECT ISP cron can't create booklet
```
_Registry_
``` bash
s3cmd ls s3://devregistry/registry/repositories/library/forecast/
2015-12-14 02:57 11780 s3://devregistry/registry/repositories/library/forecast/_index_images
2015-12-14 02:56 64 s3://devregistry/registry/repositories/library/forecast/tag_git-1c89b116
2015-12-14 02:57 64 s3://devregistry/registry/repositories/library/forecast/tag_v3
2015-12-14 02:56 147 s3://devregistry/registry/repositories/library/forecast/taggit-1c89b116_json
2015-12-14 02:57 147 s3://devregistry/registry/repositories/library/forecast/tagv3_json
```
**Important: Only v3 tags presents and v4 is not exist**
**Expected result**
Config is set, application is up, and proper tags create in registry.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `controller/registry/dockerclient.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Support the Deis workflow by manipulating and publishing Docker images."""
3
4 from __future__ import unicode_literals
5 import io
6 import logging
7
8 from django.conf import settings
9 from rest_framework.exceptions import PermissionDenied
10 from simpleflock import SimpleFlock
11 import docker
12
13 logger = logging.getLogger(__name__)
14
15
16 class DockerClient(object):
17 """Use the Docker API to pull, tag, build, and push images to deis-registry."""
18
19 FLOCKFILE = '/tmp/controller-pull'
20
21 def __init__(self):
22 self.client = docker.Client(version='auto')
23 self.registry = settings.REGISTRY_HOST + ':' + str(settings.REGISTRY_PORT)
24
25 def publish_release(self, source, config, target, deis_registry):
26 """Update a source Docker image with environment config and publish it to deis-registry."""
27 # get the source repository name and tag
28 src_name, src_tag = docker.utils.parse_repository_tag(source)
29 # get the target repository name and tag
30 name, tag = docker.utils.parse_repository_tag(target)
31 # strip any "http://host.domain:port" prefix from the target repository name,
32 # since we always publish to the Deis registry
33 name = strip_prefix(name)
34
35 # pull the source image from the registry
36 # NOTE: this relies on an implementation detail of deis-builder, that
37 # the image has been uploaded already to deis-registry
38 if deis_registry:
39 repo = "{}/{}".format(self.registry, src_name)
40 else:
41 repo = src_name
42 self.pull(repo, src_tag)
43
44 # tag the image locally without the repository URL
45 image = "{}:{}".format(repo, src_tag)
46 self.tag(image, src_name, tag=src_tag)
47
48 # build a Docker image that adds a "last-mile" layer of environment
49 config.update({'DEIS_APP': name, 'DEIS_RELEASE': tag})
50 self.build(source, config, name, tag)
51
52 # push the image to deis-registry
53 self.push("{}/{}".format(self.registry, name), tag)
54
55 def build(self, source, config, repo, tag):
56 """Add a "last-mile" layer of environment config to a Docker image for deis-registry."""
57 check_blacklist(repo)
58 env = ' '.join("{}='{}'".format(
59 k, v.encode('unicode-escape').replace("'", "\\'")) for k, v in config.viewitems())
60 dockerfile = "FROM {}\nENV {}".format(source, env)
61 f = io.BytesIO(dockerfile.encode('utf-8'))
62 target_repo = "{}/{}:{}".format(self.registry, repo, tag)
63 logger.info("Building Docker image {}".format(target_repo))
64 with SimpleFlock(self.FLOCKFILE, timeout=1200):
65 stream = self.client.build(fileobj=f, tag=target_repo, stream=True, rm=True)
66 log_output(stream)
67
68 def pull(self, repo, tag):
69 """Pull a Docker image into the local storage graph."""
70 check_blacklist(repo)
71 logger.info("Pulling Docker image {}:{}".format(repo, tag))
72 with SimpleFlock(self.FLOCKFILE, timeout=1200):
73 stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)
74 log_output(stream)
75
76 def push(self, repo, tag):
77 """Push a local Docker image to a registry."""
78 logger.info("Pushing Docker image {}:{}".format(repo, tag))
79 stream = self.client.push(repo, tag=tag, stream=True, insecure_registry=True)
80 log_output(stream)
81
82 def tag(self, image, repo, tag):
83 """Tag a local Docker image with a new name and tag."""
84 check_blacklist(repo)
85 logger.info("Tagging Docker image {} as {}:{}".format(image, repo, tag))
86 if not self.client.tag(image, repo, tag=tag, force=True):
87 raise docker.errors.DockerException("tagging failed")
88
89
90 def check_blacklist(repo):
91 """Check a Docker repository name for collision with deis/* components."""
92 blacklisted = [ # NOTE: keep this list up to date!
93 'builder', 'cache', 'controller', 'database', 'logger', 'logspout',
94 'publisher', 'registry', 'router', 'store-admin', 'store-daemon',
95 'store-gateway', 'store-metadata', 'store-monitor',
96 ]
97 if any("deis/{}".format(c) in repo for c in blacklisted):
98 raise PermissionDenied("Repository name {} is not allowed".format(repo))
99
100
101 def log_output(stream):
102 """Log a stream at DEBUG level, and raise DockerException if it contains "error"."""
103 for chunk in stream:
104 logger.debug(chunk)
105 # error handling requires looking at the response body
106 if '"error"' in chunk.lower():
107 raise docker.errors.DockerException(chunk)
108
109
110 def strip_prefix(name):
111 """Strip the schema and host:port from a Docker repository name."""
112 paths = name.split('/')
113 return '/'.join(p for p in paths if p and '.' not in p and ':' not in p)
114
115
116 def publish_release(source, config, target, deis_registry):
117
118 client = DockerClient()
119 return client.publish_release(source, config, target, deis_registry)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/controller/registry/dockerclient.py b/controller/registry/dockerclient.py
--- a/controller/registry/dockerclient.py
+++ b/controller/registry/dockerclient.py
@@ -55,8 +55,8 @@
def build(self, source, config, repo, tag):
"""Add a "last-mile" layer of environment config to a Docker image for deis-registry."""
check_blacklist(repo)
- env = ' '.join("{}='{}'".format(
- k, v.encode('unicode-escape').replace("'", "\\'")) for k, v in config.viewitems())
+ env = ' '.join('{}="{}"'.format(
+ k, v.encode('unicode-escape').replace('"', '\\"')) for k, v in config.viewitems())
dockerfile = "FROM {}\nENV {}".format(source, env)
f = io.BytesIO(dockerfile.encode('utf-8'))
target_repo = "{}/{}:{}".format(self.registry, repo, tag)
|
{"golden_diff": "diff --git a/controller/registry/dockerclient.py b/controller/registry/dockerclient.py\n--- a/controller/registry/dockerclient.py\n+++ b/controller/registry/dockerclient.py\n@@ -55,8 +55,8 @@\n def build(self, source, config, repo, tag):\n \"\"\"Add a \"last-mile\" layer of environment config to a Docker image for deis-registry.\"\"\"\n check_blacklist(repo)\n- env = ' '.join(\"{}='{}'\".format(\n- k, v.encode('unicode-escape').replace(\"'\", \"\\\\'\")) for k, v in config.viewitems())\n+ env = ' '.join('{}=\"{}\"'.format(\n+ k, v.encode('unicode-escape').replace('\"', '\\\\\"')) for k, v in config.viewitems())\n dockerfile = \"FROM {}\\nENV {}\".format(source, env)\n f = io.BytesIO(dockerfile.encode('utf-8'))\n target_repo = \"{}/{}:{}\".format(self.registry, repo, tag)\n", "issue": "[bug] Unable to start application with ' (quote) in env variable\nOur customer's bug:\n**STR**\n\n``` bash\ngit clone https://github.com/deis/example-python-flask && cd example-python-flask\ndeis create\ngit push deis master\necho \"MAILER_SUBJECT=ISP cron can't create booklet\" > .env\ndeis config:push\n```\n\n**Actual result**\nCreating config... .o.\nClient return 504 error, after ~30 min(balancer config)\n_Controller log_\n\n``` log\nDec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO Tagging Docker image 10.21.12.123:5000/forecast:git-1c89b116 as forecast:git-1c89b116\nDec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO Building Docker image 10.21.12.123:5000/forecast:v4\nDec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO forecast: 500 Server Error: Internal Server Error (\"Syntax error - can't find = in \"cron\". Must be of the form: name=value\")\nDec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO [forecast]: 500 Server Error: Internal Server Error (\"Syntax error - can't find = in \"cron\". Must be of the form: name=value\")\n```\n\n_deis info_\n\n``` bash\n=== forecast Application\nupdated: 2015-12-14T02:57:36UTC\nuuid: 01eacd72-1950-4ec1-a301-4212b59572b6\ncreated: 2015-12-14T02:50:35UTC\nurl: forecast.web.2gis.local\nowner: v.reyder\nid: forecast\n\n=== forecast Processes\n--- web:\nweb.1 up (v3)\nweb.1 down (v4)\n\n=== forecast Domains\n```\n\n_Actually config is set_\n\n``` bash\n$ deis config\n=== forecast Config\nMAILER_SUBJECT ISP cron can't create booklet\n```\n\n_Registry_\n\n``` bash\ns3cmd ls s3://devregistry/registry/repositories/library/forecast/\n2015-12-14 02:57 11780 s3://devregistry/registry/repositories/library/forecast/_index_images\n2015-12-14 02:56 64 s3://devregistry/registry/repositories/library/forecast/tag_git-1c89b116\n2015-12-14 02:57 64 s3://devregistry/registry/repositories/library/forecast/tag_v3\n2015-12-14 02:56 147 s3://devregistry/registry/repositories/library/forecast/taggit-1c89b116_json\n2015-12-14 02:57 147 s3://devregistry/registry/repositories/library/forecast/tagv3_json\n```\n\n**Important: Only v3 tags presents and v4 is not exist**\n\n**Expected result**\nConfig is set, application is up, and proper tags create in registry.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Support the Deis workflow by manipulating and publishing Docker images.\"\"\"\n\nfrom __future__ import unicode_literals\nimport io\nimport logging\n\nfrom django.conf import settings\nfrom rest_framework.exceptions import PermissionDenied\nfrom simpleflock import SimpleFlock\nimport docker\n\nlogger = logging.getLogger(__name__)\n\n\nclass DockerClient(object):\n \"\"\"Use the Docker API to pull, tag, build, and push images to deis-registry.\"\"\"\n\n FLOCKFILE = '/tmp/controller-pull'\n\n def __init__(self):\n self.client = docker.Client(version='auto')\n self.registry = settings.REGISTRY_HOST + ':' + str(settings.REGISTRY_PORT)\n\n def publish_release(self, source, config, target, deis_registry):\n \"\"\"Update a source Docker image with environment config and publish it to deis-registry.\"\"\"\n # get the source repository name and tag\n src_name, src_tag = docker.utils.parse_repository_tag(source)\n # get the target repository name and tag\n name, tag = docker.utils.parse_repository_tag(target)\n # strip any \"http://host.domain:port\" prefix from the target repository name,\n # since we always publish to the Deis registry\n name = strip_prefix(name)\n\n # pull the source image from the registry\n # NOTE: this relies on an implementation detail of deis-builder, that\n # the image has been uploaded already to deis-registry\n if deis_registry:\n repo = \"{}/{}\".format(self.registry, src_name)\n else:\n repo = src_name\n self.pull(repo, src_tag)\n\n # tag the image locally without the repository URL\n image = \"{}:{}\".format(repo, src_tag)\n self.tag(image, src_name, tag=src_tag)\n\n # build a Docker image that adds a \"last-mile\" layer of environment\n config.update({'DEIS_APP': name, 'DEIS_RELEASE': tag})\n self.build(source, config, name, tag)\n\n # push the image to deis-registry\n self.push(\"{}/{}\".format(self.registry, name), tag)\n\n def build(self, source, config, repo, tag):\n \"\"\"Add a \"last-mile\" layer of environment config to a Docker image for deis-registry.\"\"\"\n check_blacklist(repo)\n env = ' '.join(\"{}='{}'\".format(\n k, v.encode('unicode-escape').replace(\"'\", \"\\\\'\")) for k, v in config.viewitems())\n dockerfile = \"FROM {}\\nENV {}\".format(source, env)\n f = io.BytesIO(dockerfile.encode('utf-8'))\n target_repo = \"{}/{}:{}\".format(self.registry, repo, tag)\n logger.info(\"Building Docker image {}\".format(target_repo))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.build(fileobj=f, tag=target_repo, stream=True, rm=True)\n log_output(stream)\n\n def pull(self, repo, tag):\n \"\"\"Pull a Docker image into the local storage graph.\"\"\"\n check_blacklist(repo)\n logger.info(\"Pulling Docker image {}:{}\".format(repo, tag))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)\n\n def push(self, repo, tag):\n \"\"\"Push a local Docker image to a registry.\"\"\"\n logger.info(\"Pushing Docker image {}:{}\".format(repo, tag))\n stream = self.client.push(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)\n\n def tag(self, image, repo, tag):\n \"\"\"Tag a local Docker image with a new name and tag.\"\"\"\n check_blacklist(repo)\n logger.info(\"Tagging Docker image {} as {}:{}\".format(image, repo, tag))\n if not self.client.tag(image, repo, tag=tag, force=True):\n raise docker.errors.DockerException(\"tagging failed\")\n\n\ndef check_blacklist(repo):\n \"\"\"Check a Docker repository name for collision with deis/* components.\"\"\"\n blacklisted = [ # NOTE: keep this list up to date!\n 'builder', 'cache', 'controller', 'database', 'logger', 'logspout',\n 'publisher', 'registry', 'router', 'store-admin', 'store-daemon',\n 'store-gateway', 'store-metadata', 'store-monitor',\n ]\n if any(\"deis/{}\".format(c) in repo for c in blacklisted):\n raise PermissionDenied(\"Repository name {} is not allowed\".format(repo))\n\n\ndef log_output(stream):\n \"\"\"Log a stream at DEBUG level, and raise DockerException if it contains \"error\".\"\"\"\n for chunk in stream:\n logger.debug(chunk)\n # error handling requires looking at the response body\n if '\"error\"' in chunk.lower():\n raise docker.errors.DockerException(chunk)\n\n\ndef strip_prefix(name):\n \"\"\"Strip the schema and host:port from a Docker repository name.\"\"\"\n paths = name.split('/')\n return '/'.join(p for p in paths if p and '.' not in p and ':' not in p)\n\n\ndef publish_release(source, config, target, deis_registry):\n\n client = DockerClient()\n return client.publish_release(source, config, target, deis_registry)\n", "path": "controller/registry/dockerclient.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Support the Deis workflow by manipulating and publishing Docker images.\"\"\"\n\nfrom __future__ import unicode_literals\nimport io\nimport logging\n\nfrom django.conf import settings\nfrom rest_framework.exceptions import PermissionDenied\nfrom simpleflock import SimpleFlock\nimport docker\n\nlogger = logging.getLogger(__name__)\n\n\nclass DockerClient(object):\n \"\"\"Use the Docker API to pull, tag, build, and push images to deis-registry.\"\"\"\n\n FLOCKFILE = '/tmp/controller-pull'\n\n def __init__(self):\n self.client = docker.Client(version='auto')\n self.registry = settings.REGISTRY_HOST + ':' + str(settings.REGISTRY_PORT)\n\n def publish_release(self, source, config, target, deis_registry):\n \"\"\"Update a source Docker image with environment config and publish it to deis-registry.\"\"\"\n # get the source repository name and tag\n src_name, src_tag = docker.utils.parse_repository_tag(source)\n # get the target repository name and tag\n name, tag = docker.utils.parse_repository_tag(target)\n # strip any \"http://host.domain:port\" prefix from the target repository name,\n # since we always publish to the Deis registry\n name = strip_prefix(name)\n\n # pull the source image from the registry\n # NOTE: this relies on an implementation detail of deis-builder, that\n # the image has been uploaded already to deis-registry\n if deis_registry:\n repo = \"{}/{}\".format(self.registry, src_name)\n else:\n repo = src_name\n self.pull(repo, src_tag)\n\n # tag the image locally without the repository URL\n image = \"{}:{}\".format(repo, src_tag)\n self.tag(image, src_name, tag=src_tag)\n\n # build a Docker image that adds a \"last-mile\" layer of environment\n config.update({'DEIS_APP': name, 'DEIS_RELEASE': tag})\n self.build(source, config, name, tag)\n\n # push the image to deis-registry\n self.push(\"{}/{}\".format(self.registry, name), tag)\n\n def build(self, source, config, repo, tag):\n \"\"\"Add a \"last-mile\" layer of environment config to a Docker image for deis-registry.\"\"\"\n check_blacklist(repo)\n env = ' '.join('{}=\"{}\"'.format(\n k, v.encode('unicode-escape').replace('\"', '\\\\\"')) for k, v in config.viewitems())\n dockerfile = \"FROM {}\\nENV {}\".format(source, env)\n f = io.BytesIO(dockerfile.encode('utf-8'))\n target_repo = \"{}/{}:{}\".format(self.registry, repo, tag)\n logger.info(\"Building Docker image {}\".format(target_repo))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.build(fileobj=f, tag=target_repo, stream=True, rm=True)\n log_output(stream)\n\n def pull(self, repo, tag):\n \"\"\"Pull a Docker image into the local storage graph.\"\"\"\n check_blacklist(repo)\n logger.info(\"Pulling Docker image {}:{}\".format(repo, tag))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)\n\n def push(self, repo, tag):\n \"\"\"Push a local Docker image to a registry.\"\"\"\n logger.info(\"Pushing Docker image {}:{}\".format(repo, tag))\n stream = self.client.push(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)\n\n def tag(self, image, repo, tag):\n \"\"\"Tag a local Docker image with a new name and tag.\"\"\"\n check_blacklist(repo)\n logger.info(\"Tagging Docker image {} as {}:{}\".format(image, repo, tag))\n if not self.client.tag(image, repo, tag=tag, force=True):\n raise docker.errors.DockerException(\"tagging failed\")\n\n\ndef check_blacklist(repo):\n \"\"\"Check a Docker repository name for collision with deis/* components.\"\"\"\n blacklisted = [ # NOTE: keep this list up to date!\n 'builder', 'cache', 'controller', 'database', 'logger', 'logspout',\n 'publisher', 'registry', 'router', 'store-admin', 'store-daemon',\n 'store-gateway', 'store-metadata', 'store-monitor',\n ]\n if any(\"deis/{}\".format(c) in repo for c in blacklisted):\n raise PermissionDenied(\"Repository name {} is not allowed\".format(repo))\n\n\ndef log_output(stream):\n \"\"\"Log a stream at DEBUG level, and raise DockerException if it contains \"error\".\"\"\"\n for chunk in stream:\n logger.debug(chunk)\n # error handling requires looking at the response body\n if '\"error\"' in chunk.lower():\n raise docker.errors.DockerException(chunk)\n\n\ndef strip_prefix(name):\n \"\"\"Strip the schema and host:port from a Docker repository name.\"\"\"\n paths = name.split('/')\n return '/'.join(p for p in paths if p and '.' not in p and ':' not in p)\n\n\ndef publish_release(source, config, target, deis_registry):\n\n client = DockerClient()\n return client.publish_release(source, config, target, deis_registry)\n", "path": "controller/registry/dockerclient.py"}]}
| 2,498 | 215 |
gh_patches_debug_6402
|
rasdani/github-patches
|
git_diff
|
engnadeau__pybotics-425
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strip links/badges/images from README for PyPi
- While the new PyPi Warehouse supports markdown, images/badges/links are slow to load and will not work if they use relative paths (e.g., https://test.pypi.org/project/pybotics/201803222157/#description)
- These elements should be stripped from the README prior to upload
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Setup module."""
2 import logging
3 from pathlib import Path
4
5 from setuptools import find_packages, setup # type: ignore
6
7
8 def main() -> None:
9 """Run setup."""
10 # run setup
11 setup(name='pybotics',
12 packages=find_packages(include=['pybotics']),
13 url='https://github.com/nnadeau/pybotics',
14 license='MIT',
15 author='Nicholas Nadeau',
16 author_email='[email protected]',
17 description='Python Toolbox for Robotics',
18 long_description=get_readme(),
19 long_description_content_type='text/markdown',
20 use_scm_version=True,
21 setup_requires=['setuptools_scm'],
22 install_requires=get_requirements(), # type: ignore
23 tests_require=['pytest'],
24 classifiers=[
25 'Development Status :: 4 - Beta',
26 'Intended Audience :: Developers',
27 'Intended Audience :: Education',
28 'Intended Audience :: End Users/Desktop',
29 'Intended Audience :: Manufacturing',
30 'Intended Audience :: Science/Research',
31 'Topic :: Education',
32 'Topic :: Scientific/Engineering',
33 'Topic :: Scientific/Engineering :: Artificial Intelligence',
34 'Topic :: Scientific/Engineering :: Human Machine Interfaces',
35 'Topic :: Scientific/Engineering :: Mathematics',
36 'Topic :: Scientific/Engineering :: Physics',
37 'Topic :: Utilities',
38 'License :: OSI Approved :: MIT License',
39 'Programming Language :: Python :: 3 :: Only',
40 'Programming Language :: Python :: 3',
41 'Programming Language :: Python :: 3.4',
42 'Programming Language :: Python :: 3.5',
43 'Programming Language :: Python :: 3.6',
44 ],
45 keywords='python robot robotics research '
46 'automation kinematics geometry')
47
48
49 def get_readme() -> str:
50 """Get README text."""
51 # description
52 readme_path = Path(__file__).parent / 'README.md'
53 logging.info('README path: {}'.format(readme_path.resolve()))
54 with open(str(readme_path)) as f:
55 readme = f.read()
56 return readme
57
58
59 # don't want to import typing... so ignore
60 def get_requirements(): # type: ignore
61 """Get requirements list."""
62 # requirements
63 requirements_path = Path(__file__).parent / 'requirements.txt'
64 logging.info('Requirements path: {}'.format(requirements_path.resolve()))
65 with open(str(requirements_path)) as f:
66 requirements = f.read().splitlines()
67 for i, req in enumerate(requirements):
68 requirements[i] = req.split()[0]
69 return requirements
70
71
72 if __name__ == '__main__':
73 logging.basicConfig(level=logging.INFO)
74 main()
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
long_description=get_readme(),
long_description_content_type='text/markdown',
use_scm_version=True,
- setup_requires=['setuptools_scm'],
+ setup_requires=['setuptools', 'setuptools_scm'],
install_requires=get_requirements(), # type: ignore
tests_require=['pytest'],
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n use_scm_version=True,\n- setup_requires=['setuptools_scm'],\n+ setup_requires=['setuptools', 'setuptools_scm'],\n install_requires=get_requirements(), # type: ignore\n tests_require=['pytest'],\n classifiers=[\n", "issue": "Strip links/badges/images from README for PyPi\n- While the new PyPi Warehouse supports markdown, images/badges/links are slow to load and will not work if they use relative paths (e.g., https://test.pypi.org/project/pybotics/201803222157/#description)\r\n- These elements should be stripped from the README prior to upload\n", "before_files": [{"content": "\"\"\"Setup module.\"\"\"\nimport logging\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup # type: ignore\n\n\ndef main() -> None:\n \"\"\"Run setup.\"\"\"\n # run setup\n setup(name='pybotics',\n packages=find_packages(include=['pybotics']),\n url='https://github.com/nnadeau/pybotics',\n license='MIT',\n author='Nicholas Nadeau',\n author_email='[email protected]',\n description='Python Toolbox for Robotics',\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n install_requires=get_requirements(), # type: ignore\n tests_require=['pytest'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'Topic :: Education',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='python robot robotics research '\n 'automation kinematics geometry')\n\n\ndef get_readme() -> str:\n \"\"\"Get README text.\"\"\"\n # description\n readme_path = Path(__file__).parent / 'README.md'\n logging.info('README path: {}'.format(readme_path.resolve()))\n with open(str(readme_path)) as f:\n readme = f.read()\n return readme\n\n\n# don't want to import typing... so ignore\ndef get_requirements(): # type: ignore\n \"\"\"Get requirements list.\"\"\"\n # requirements\n requirements_path = Path(__file__).parent / 'requirements.txt'\n logging.info('Requirements path: {}'.format(requirements_path.resolve()))\n with open(str(requirements_path)) as f:\n requirements = f.read().splitlines()\n for i, req in enumerate(requirements):\n requirements[i] = req.split()[0]\n return requirements\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n main()\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Setup module.\"\"\"\nimport logging\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup # type: ignore\n\n\ndef main() -> None:\n \"\"\"Run setup.\"\"\"\n # run setup\n setup(name='pybotics',\n packages=find_packages(include=['pybotics']),\n url='https://github.com/nnadeau/pybotics',\n license='MIT',\n author='Nicholas Nadeau',\n author_email='[email protected]',\n description='Python Toolbox for Robotics',\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n use_scm_version=True,\n setup_requires=['setuptools', 'setuptools_scm'],\n install_requires=get_requirements(), # type: ignore\n tests_require=['pytest'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'Topic :: Education',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='python robot robotics research '\n 'automation kinematics geometry')\n\n\ndef get_readme() -> str:\n \"\"\"Get README text.\"\"\"\n # description\n readme_path = Path(__file__).parent / 'README.md'\n logging.info('README path: {}'.format(readme_path.resolve()))\n with open(str(readme_path)) as f:\n readme = f.read()\n return readme\n\n\n# don't want to import typing... so ignore\ndef get_requirements(): # type: ignore\n \"\"\"Get requirements list.\"\"\"\n # requirements\n requirements_path = Path(__file__).parent / 'requirements.txt'\n logging.info('Requirements path: {}'.format(requirements_path.resolve()))\n with open(str(requirements_path)) as f:\n requirements = f.read().splitlines()\n for i, req in enumerate(requirements):\n requirements[i] = req.split()[0]\n return requirements\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n main()\n", "path": "setup.py"}]}
| 1,043 | 101 |
gh_patches_debug_57184
|
rasdani/github-patches
|
git_diff
|
beeware__toga-410
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Menu items broken on Cocoa backend: fails with object has no attribute '_menu_items'
I believe this is a regression from 1d41d3833eb4b8785faf8eb7850f3feec4650350 / #373. The changes `appDelegate.interface` from the `App` instance to `App.interface`. Unfortunately, selecting a menu item still expects `appDelegate.interface` to be the `App` instance, see below where it calls `self.interface._menu_items`:
https://github.com/pybee/toga/blob/ad91f1a65a109b670256028e31c887dc18a4876d/src/cocoa/toga_cocoa/app.py#L71-L75
## Expected Behavior
The action successfully runs.
## Current Behavior
The action doesn't run, you get an error in the terminal:
```
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 234, in 'calling callback function'
File "/Users/pcloke/.virtualenvs/toga/lib/python3.6/site-packages/rubicon/objc/runtime.py", line 1033, in _objc_method
result = f(py_self, *args)
File "/Users/pcloke/toga/src/cocoa/toga_cocoa/app.py", line 73, in selectMenuItem_
cmd = self.interface._menu_items[sender]
AttributeError: 'TogaDemo' object has no attribute '_menu_items'
```
## Steps to reproduce
1. Run the toga demo app (`cd toga && python -m toga_demo`)
2. Click on one of the "Commands" > "Action 1"
## Your Environment
* Python 3.6.
* macOS High Sierra
* Toga Target: cocoa
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cocoa/toga_cocoa/app.py`
Content:
```
1 import asyncio
2 import os
3 import sys
4
5 import toga
6 from rubicon.objc.eventloop import EventLoopPolicy, CocoaLifecycle
7
8 from .libs import *
9 from .window import Window
10
11
12 class MainWindow(Window):
13 def on_close(self):
14 self.interface.app.exit()
15
16
17 class AppDelegate(NSObject):
18 @objc_method
19 def applicationDidFinishLaunching_(self, notification):
20 self.native.activateIgnoringOtherApps(True)
21
22 @objc_method
23 def applicationOpenUntitledFile_(self, sender) -> bool:
24 # FIXME This should be all we need; but for some reason, application types
25 # aren't being registered correctly..
26 # NSDocumentController.sharedDocumentController().openDocument_(None)
27
28 # ...so we do this instead.
29 panel = NSOpenPanel.openPanel()
30 # print("Open documents of type", NSDocumentController.sharedDocumentController().defaultType)
31
32 fileTypes = NSMutableArray.alloc().init()
33 for filetype in self.interface.document_types:
34 fileTypes.addObject(filetype)
35
36 NSDocumentController.sharedDocumentController.runModalOpenPanel(panel, forTypes=fileTypes)
37
38 # print("Untitled File opened?", panel.URLs)
39 self.application_openFiles_(None, panel.URLs)
40
41 return True
42
43 @objc_method
44 def addDocument_(self, document) -> None:
45 # print("Add Document", document)
46 super().addDocument_(document)
47
48 @objc_method
49 def applicationShouldOpenUntitledFile_(self, sender) -> bool:
50 return True
51
52 @objc_method
53 def application_openFiles_(self, app, filenames) -> None:
54 for i in range(0, len(filenames)):
55 filename = filenames.objectAtIndex(i)
56 if isinstance(filename, str):
57 fileURL = NSURL.fileURLWithPath(filename)
58
59 elif filename.objc_class.name == 'NSURL':
60 # This case only exists because we aren't using the
61 # DocumentController to display the file open dialog.
62 # If we were, *all* filenames passed in would be
63 # string paths.
64 fileURL = filename
65 else:
66 return
67
68 self.interface.open_document(fileURL.absoluteString)
69 # NSDocumentController.sharedDocumentController().openDocumentWithContentsOfURL_display_completionHandler_(fileURL, True, None)
70
71 @objc_method
72 def selectMenuItem_(self, sender) -> None:
73 cmd = self.interface._menu_items[sender]
74 if cmd.action:
75 cmd.action(None)
76
77
78 class App:
79 _MAIN_WINDOW_CLASS = MainWindow
80
81 def __init__(self, interface):
82 self.interface = interface
83 self.interface._impl = self
84
85 asyncio.set_event_loop_policy(EventLoopPolicy())
86 self.loop = asyncio.get_event_loop()
87
88 def create(self):
89 self.native = NSApplication.sharedApplication
90 self.native.setActivationPolicy(NSApplicationActivationPolicyRegular)
91
92 self.native.setApplicationIconImage_(self.interface.icon.bind(self.interface.factory).native)
93
94 self.resource_path = os.path.dirname(os.path.dirname(NSBundle.mainBundle.bundlePath))
95
96 appDelegate = AppDelegate.alloc().init()
97 appDelegate.interface = self.interface
98 appDelegate.native = self.native
99 self.native.setDelegate_(appDelegate)
100
101 app_name = self.interface.name
102
103 self.interface.commands.add(
104 toga.Command(None, 'About ' + app_name, group=toga.Group.APP),
105 toga.Command(None, 'Preferences', group=toga.Group.APP),
106 # Quit should always be the last item, in a section on it's own
107 toga.Command(lambda s: self.exit(), 'Quit ' + app_name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),
108
109 toga.Command(None, 'Visit homepage', group=toga.Group.HELP)
110 )
111
112 # Call user code to populate the main window
113 self.interface.startup()
114
115 # Create the lookup table of menu items,
116 # then force the creation of the menus.
117 self._menu_items = {}
118 self.create_menus()
119
120 def open_document(self, fileURL):
121 '''Add a new document to this app.'''
122 print("STUB: If you want to handle opening documents, implement App.open_document(fileURL)")
123
124 def create_menus(self):
125 # Only create the menu if the menu item index has been created.
126 if hasattr(self, '_menu_items'):
127 self._menu_items = {}
128 menubar = NSMenu.alloc().initWithTitle('MainMenu')
129 submenu = None
130 for cmd in self.interface.commands:
131 if cmd == toga.GROUP_BREAK:
132 menubar.setSubmenu(submenu, forItem=menuItem)
133 submenu = None
134 elif cmd == toga.SECTION_BREAK:
135 submenu.addItem_(NSMenuItem.separatorItem())
136 else:
137 if submenu is None:
138 menuItem = menubar.addItemWithTitle(cmd.group.label, action=None, keyEquivalent='')
139 submenu = NSMenu.alloc().initWithTitle(cmd.group.label)
140 submenu.setAutoenablesItems(False)
141
142 item = NSMenuItem.alloc().initWithTitle(
143 cmd.label,
144 action=SEL('selectMenuItem:'),
145 keyEquivalent=cmd.shortcut if cmd.shortcut else ''
146 )
147
148 cmd._widgets.append(item)
149 self._menu_items[item] = cmd
150
151 # This line may appear redundant, but it triggers the logic
152 # to force the enabled status on the underlying widgets.
153 cmd.enabled = cmd.enabled
154 submenu.addItem(item)
155
156 if submenu:
157 menubar.setSubmenu(submenu, forItem=menuItem)
158
159 # Set the menu for the app.
160 self.native.mainMenu = menubar
161
162 def main_loop(self):
163 # Stimulate the build of the app
164 self.create()
165
166 self.loop.run_forever(lifecycle=CocoaLifecycle(self.native))
167
168 def exit(self):
169 self.native.terminate(None)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cocoa/toga_cocoa/app.py b/src/cocoa/toga_cocoa/app.py
--- a/src/cocoa/toga_cocoa/app.py
+++ b/src/cocoa/toga_cocoa/app.py
@@ -70,7 +70,7 @@
@objc_method
def selectMenuItem_(self, sender) -> None:
- cmd = self.interface._menu_items[sender]
+ cmd = self.interface._impl._menu_items[sender]
if cmd.action:
cmd.action(None)
|
{"golden_diff": "diff --git a/src/cocoa/toga_cocoa/app.py b/src/cocoa/toga_cocoa/app.py\n--- a/src/cocoa/toga_cocoa/app.py\n+++ b/src/cocoa/toga_cocoa/app.py\n@@ -70,7 +70,7 @@\n \n @objc_method\n def selectMenuItem_(self, sender) -> None:\n- cmd = self.interface._menu_items[sender]\n+ cmd = self.interface._impl._menu_items[sender]\n if cmd.action:\n cmd.action(None)\n", "issue": "Menu items broken on Cocoa backend: fails with object has no attribute '_menu_items'\nI believe this is a regression from 1d41d3833eb4b8785faf8eb7850f3feec4650350 / #373. The changes `appDelegate.interface` from the `App` instance to `App.interface`. Unfortunately, selecting a menu item still expects `appDelegate.interface` to be the `App` instance, see below where it calls `self.interface._menu_items`:\r\n\r\nhttps://github.com/pybee/toga/blob/ad91f1a65a109b670256028e31c887dc18a4876d/src/cocoa/toga_cocoa/app.py#L71-L75\r\n\r\n## Expected Behavior\r\nThe action successfully runs.\r\n\r\n## Current Behavior\r\nThe action doesn't run, you get an error in the terminal:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"_ctypes/callbacks.c\", line 234, in 'calling callback function'\r\n File \"/Users/pcloke/.virtualenvs/toga/lib/python3.6/site-packages/rubicon/objc/runtime.py\", line 1033, in _objc_method\r\n result = f(py_self, *args)\r\n File \"/Users/pcloke/toga/src/cocoa/toga_cocoa/app.py\", line 73, in selectMenuItem_\r\n cmd = self.interface._menu_items[sender]\r\nAttributeError: 'TogaDemo' object has no attribute '_menu_items'\r\n```\r\n\r\n## Steps to reproduce\r\n\r\n1. Run the toga demo app (`cd toga && python -m toga_demo`)\r\n2. Click on one of the \"Commands\" > \"Action 1\"\r\n\r\n## Your Environment\r\n\r\n* Python 3.6.\r\n* macOS High Sierra\r\n* Toga Target: cocoa\n", "before_files": [{"content": "import asyncio\nimport os\nimport sys\n\nimport toga\nfrom rubicon.objc.eventloop import EventLoopPolicy, CocoaLifecycle\n\nfrom .libs import *\nfrom .window import Window\n\n\nclass MainWindow(Window):\n def on_close(self):\n self.interface.app.exit()\n\n\nclass AppDelegate(NSObject):\n @objc_method\n def applicationDidFinishLaunching_(self, notification):\n self.native.activateIgnoringOtherApps(True)\n\n @objc_method\n def applicationOpenUntitledFile_(self, sender) -> bool:\n # FIXME This should be all we need; but for some reason, application types\n # aren't being registered correctly..\n # NSDocumentController.sharedDocumentController().openDocument_(None)\n\n # ...so we do this instead.\n panel = NSOpenPanel.openPanel()\n # print(\"Open documents of type\", NSDocumentController.sharedDocumentController().defaultType)\n\n fileTypes = NSMutableArray.alloc().init()\n for filetype in self.interface.document_types:\n fileTypes.addObject(filetype)\n\n NSDocumentController.sharedDocumentController.runModalOpenPanel(panel, forTypes=fileTypes)\n\n # print(\"Untitled File opened?\", panel.URLs)\n self.application_openFiles_(None, panel.URLs)\n\n return True\n\n @objc_method\n def addDocument_(self, document) -> None:\n # print(\"Add Document\", document)\n super().addDocument_(document)\n\n @objc_method\n def applicationShouldOpenUntitledFile_(self, sender) -> bool:\n return True\n\n @objc_method\n def application_openFiles_(self, app, filenames) -> None:\n for i in range(0, len(filenames)):\n filename = filenames.objectAtIndex(i)\n if isinstance(filename, str):\n fileURL = NSURL.fileURLWithPath(filename)\n\n elif filename.objc_class.name == 'NSURL':\n # This case only exists because we aren't using the\n # DocumentController to display the file open dialog.\n # If we were, *all* filenames passed in would be\n # string paths.\n fileURL = filename\n else:\n return\n\n self.interface.open_document(fileURL.absoluteString)\n # NSDocumentController.sharedDocumentController().openDocumentWithContentsOfURL_display_completionHandler_(fileURL, True, None)\n\n @objc_method\n def selectMenuItem_(self, sender) -> None:\n cmd = self.interface._menu_items[sender]\n if cmd.action:\n cmd.action(None)\n\n\nclass App:\n _MAIN_WINDOW_CLASS = MainWindow\n\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n asyncio.set_event_loop_policy(EventLoopPolicy())\n self.loop = asyncio.get_event_loop()\n\n def create(self):\n self.native = NSApplication.sharedApplication\n self.native.setActivationPolicy(NSApplicationActivationPolicyRegular)\n\n self.native.setApplicationIconImage_(self.interface.icon.bind(self.interface.factory).native)\n\n self.resource_path = os.path.dirname(os.path.dirname(NSBundle.mainBundle.bundlePath))\n\n appDelegate = AppDelegate.alloc().init()\n appDelegate.interface = self.interface\n appDelegate.native = self.native\n self.native.setDelegate_(appDelegate)\n\n app_name = self.interface.name\n\n self.interface.commands.add(\n toga.Command(None, 'About ' + app_name, group=toga.Group.APP),\n toga.Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n toga.Command(lambda s: self.exit(), 'Quit ' + app_name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),\n\n toga.Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n\n # Call user code to populate the main window\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._menu_items = {}\n self.create_menus()\n\n def open_document(self, fileURL):\n '''Add a new document to this app.'''\n print(\"STUB: If you want to handle opening documents, implement App.open_document(fileURL)\")\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_menu_items'):\n self._menu_items = {}\n menubar = NSMenu.alloc().initWithTitle('MainMenu')\n submenu = None\n for cmd in self.interface.commands:\n if cmd == toga.GROUP_BREAK:\n menubar.setSubmenu(submenu, forItem=menuItem)\n submenu = None\n elif cmd == toga.SECTION_BREAK:\n submenu.addItem_(NSMenuItem.separatorItem())\n else:\n if submenu is None:\n menuItem = menubar.addItemWithTitle(cmd.group.label, action=None, keyEquivalent='')\n submenu = NSMenu.alloc().initWithTitle(cmd.group.label)\n submenu.setAutoenablesItems(False)\n\n item = NSMenuItem.alloc().initWithTitle(\n cmd.label,\n action=SEL('selectMenuItem:'),\n keyEquivalent=cmd.shortcut if cmd.shortcut else ''\n )\n\n cmd._widgets.append(item)\n self._menu_items[item] = cmd\n\n # This line may appear redundant, but it triggers the logic\n # to force the enabled status on the underlying widgets.\n cmd.enabled = cmd.enabled\n submenu.addItem(item)\n\n if submenu:\n menubar.setSubmenu(submenu, forItem=menuItem)\n\n # Set the menu for the app.\n self.native.mainMenu = menubar\n\n def main_loop(self):\n # Stimulate the build of the app\n self.create()\n\n self.loop.run_forever(lifecycle=CocoaLifecycle(self.native))\n\n def exit(self):\n self.native.terminate(None)\n", "path": "src/cocoa/toga_cocoa/app.py"}], "after_files": [{"content": "import asyncio\nimport os\nimport sys\n\nimport toga\nfrom rubicon.objc.eventloop import EventLoopPolicy, CocoaLifecycle\n\nfrom .libs import *\nfrom .window import Window\n\n\nclass MainWindow(Window):\n def on_close(self):\n self.interface.app.exit()\n\n\nclass AppDelegate(NSObject):\n @objc_method\n def applicationDidFinishLaunching_(self, notification):\n self.native.activateIgnoringOtherApps(True)\n\n @objc_method\n def applicationOpenUntitledFile_(self, sender) -> bool:\n # FIXME This should be all we need; but for some reason, application types\n # aren't being registered correctly..\n # NSDocumentController.sharedDocumentController().openDocument_(None)\n\n # ...so we do this instead.\n panel = NSOpenPanel.openPanel()\n # print(\"Open documents of type\", NSDocumentController.sharedDocumentController().defaultType)\n\n fileTypes = NSMutableArray.alloc().init()\n for filetype in self.interface.document_types:\n fileTypes.addObject(filetype)\n\n NSDocumentController.sharedDocumentController.runModalOpenPanel(panel, forTypes=fileTypes)\n\n # print(\"Untitled File opened?\", panel.URLs)\n self.application_openFiles_(None, panel.URLs)\n\n return True\n\n @objc_method\n def addDocument_(self, document) -> None:\n # print(\"Add Document\", document)\n super().addDocument_(document)\n\n @objc_method\n def applicationShouldOpenUntitledFile_(self, sender) -> bool:\n return True\n\n @objc_method\n def application_openFiles_(self, app, filenames) -> None:\n for i in range(0, len(filenames)):\n filename = filenames.objectAtIndex(i)\n if isinstance(filename, str):\n fileURL = NSURL.fileURLWithPath(filename)\n\n elif filename.objc_class.name == 'NSURL':\n # This case only exists because we aren't using the\n # DocumentController to display the file open dialog.\n # If we were, *all* filenames passed in would be\n # string paths.\n fileURL = filename\n else:\n return\n\n self.interface.open_document(fileURL.absoluteString)\n # NSDocumentController.sharedDocumentController().openDocumentWithContentsOfURL_display_completionHandler_(fileURL, True, None)\n\n @objc_method\n def selectMenuItem_(self, sender) -> None:\n cmd = self.interface._impl._menu_items[sender]\n if cmd.action:\n cmd.action(None)\n\n\nclass App:\n _MAIN_WINDOW_CLASS = MainWindow\n\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n asyncio.set_event_loop_policy(EventLoopPolicy())\n self.loop = asyncio.get_event_loop()\n\n def create(self):\n self.native = NSApplication.sharedApplication\n self.native.setActivationPolicy(NSApplicationActivationPolicyRegular)\n\n self.native.setApplicationIconImage_(self.interface.icon.bind(self.interface.factory).native)\n\n self.resource_path = os.path.dirname(os.path.dirname(NSBundle.mainBundle.bundlePath))\n\n appDelegate = AppDelegate.alloc().init()\n appDelegate.interface = self.interface\n appDelegate.native = self.native\n self.native.setDelegate_(appDelegate)\n\n app_name = self.interface.name\n\n self.interface.commands.add(\n toga.Command(None, 'About ' + app_name, group=toga.Group.APP),\n toga.Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n toga.Command(lambda s: self.exit(), 'Quit ' + app_name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),\n\n toga.Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n\n # Call user code to populate the main window\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._menu_items = {}\n self.create_menus()\n\n def open_document(self, fileURL):\n '''Add a new document to this app.'''\n print(\"STUB: If you want to handle opening documents, implement App.open_document(fileURL)\")\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_menu_items'):\n self._menu_items = {}\n menubar = NSMenu.alloc().initWithTitle('MainMenu')\n submenu = None\n for cmd in self.interface.commands:\n if cmd == toga.GROUP_BREAK:\n menubar.setSubmenu(submenu, forItem=menuItem)\n submenu = None\n elif cmd == toga.SECTION_BREAK:\n submenu.addItem_(NSMenuItem.separatorItem())\n else:\n if submenu is None:\n menuItem = menubar.addItemWithTitle(cmd.group.label, action=None, keyEquivalent='')\n submenu = NSMenu.alloc().initWithTitle(cmd.group.label)\n submenu.setAutoenablesItems(False)\n\n item = NSMenuItem.alloc().initWithTitle(\n cmd.label,\n action=SEL('selectMenuItem:'),\n keyEquivalent=cmd.shortcut if cmd.shortcut else ''\n )\n\n cmd._widgets.append(item)\n self._menu_items[item] = cmd\n\n # This line may appear redundant, but it triggers the logic\n # to force the enabled status on the underlying widgets.\n cmd.enabled = cmd.enabled\n submenu.addItem(item)\n\n if submenu:\n menubar.setSubmenu(submenu, forItem=menuItem)\n\n # Set the menu for the app.\n self.native.mainMenu = menubar\n\n def main_loop(self):\n # Stimulate the build of the app\n self.create()\n\n self.loop.run_forever(lifecycle=CocoaLifecycle(self.native))\n\n def exit(self):\n self.native.terminate(None)\n", "path": "src/cocoa/toga_cocoa/app.py"}]}
| 2,311 | 114 |
gh_patches_debug_32678
|
rasdani/github-patches
|
git_diff
|
raspiblitz__raspiblitz-1227
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move BlitzTUI log to RAM disk
The RAM disk (`/var/cache/raspiblitz/`) is a nice way to avoid reads and writes to either the SD card or the external disk for non-persistent data. Several things can be moved; the BlitzTUI log should be a prime example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `home.admin/BlitzTUI/blitztui/version.py`
Content:
```
1 """ Store the version here so:
2 # 1) we don't load dependencies by storing it in __init__.py
3 # 2) we can import it in setup.py for the same reason
4 # 3) we can import it into your module module
5 """
6
7 __version_info__ = ('0', '47', '0')
8 __version__ = '.'.join(__version_info__)
9
```
Path: `home.admin/BlitzTUI/blitztui/file_logger.py`
Content:
```
1 import json
2 import logging
3 import logging.config
4 import os
5 import sys
6
7 IS_WIN32_ENV = sys.platform == "win32"
8
9
10 def setup_logging(default_path=os.path.abspath(os.path.expanduser('~/.blitz-tui.json')), log_level="INFO"):
11 """Setup logging configuration"""
12 path = default_path
13 if os.path.exists(path):
14 with open(path, 'rt') as f:
15 config = json.load(f)
16 logging.config.dictConfig(config)
17
18 else: # if $default_path does not exist use the following default log setup
19
20 if IS_WIN32_ENV:
21 log_file = "blitz-tui.log"
22 else:
23 log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
24
25 default_config_as_dict = dict(
26 version=1,
27 disable_existing_loggers=False,
28 formatters={'simple': {'format': '%(asctime)s - %(levelname)s - %(message)s'},
29 'extended': {
30 'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'}},
31 handlers={'console': {'class': 'logging.StreamHandler',
32 'level': 'INFO',
33 'formatter': 'extended',
34 'stream': 'ext://sys.stdout'},
35 'file_handler': {'class': 'logging.handlers.RotatingFileHandler',
36 'level': log_level,
37 'formatter': 'extended',
38 'filename': log_file,
39 'maxBytes': 10485760,
40 'backupCount': 0,
41 'encoding': 'utf8'}},
42 loggers={'infoblitz': {'level': 'DEBUG',
43 'handlers': ['console', 'file_handler'],
44 'propagate': 'no'}},
45 root={'level': 'DEBUG', 'handlers': ['console', 'file_handler']}
46 )
47
48 logging.config.dictConfig(default_config_as_dict)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/home.admin/BlitzTUI/blitztui/file_logger.py b/home.admin/BlitzTUI/blitztui/file_logger.py
--- a/home.admin/BlitzTUI/blitztui/file_logger.py
+++ b/home.admin/BlitzTUI/blitztui/file_logger.py
@@ -1,3 +1,4 @@
+import getpass
import json
import logging
import logging.config
@@ -20,7 +21,14 @@
if IS_WIN32_ENV:
log_file = "blitz-tui.log"
else:
- log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
+ if os.path.isdir('/var/cache/raspiblitz'):
+ try:
+ os.mkdir('/var/cache/raspiblitz/{}'.format(getpass.getuser()))
+ except FileExistsError:
+ pass
+ log_file = os.path.abspath('/var/cache/raspiblitz/{}/blitz-tui.log'.format(getpass.getuser()))
+ else:
+ log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
default_config_as_dict = dict(
version=1,
@@ -36,8 +44,8 @@
'level': log_level,
'formatter': 'extended',
'filename': log_file,
- 'maxBytes': 10485760,
- 'backupCount': 0,
+ 'maxBytes': 2*1024*1024, # 2 MB
+ 'backupCount': 1,
'encoding': 'utf8'}},
loggers={'infoblitz': {'level': 'DEBUG',
'handlers': ['console', 'file_handler'],
diff --git a/home.admin/BlitzTUI/blitztui/version.py b/home.admin/BlitzTUI/blitztui/version.py
--- a/home.admin/BlitzTUI/blitztui/version.py
+++ b/home.admin/BlitzTUI/blitztui/version.py
@@ -4,5 +4,5 @@
# 3) we can import it into your module module
"""
-__version_info__ = ('0', '47', '0')
+__version_info__ = ('0', '48', '1')
__version__ = '.'.join(__version_info__)
|
{"golden_diff": "diff --git a/home.admin/BlitzTUI/blitztui/file_logger.py b/home.admin/BlitzTUI/blitztui/file_logger.py\n--- a/home.admin/BlitzTUI/blitztui/file_logger.py\n+++ b/home.admin/BlitzTUI/blitztui/file_logger.py\n@@ -1,3 +1,4 @@\n+import getpass\n import json\n import logging\n import logging.config\n@@ -20,7 +21,14 @@\n if IS_WIN32_ENV:\n log_file = \"blitz-tui.log\"\n else:\n- log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n+ if os.path.isdir('/var/cache/raspiblitz'):\n+ try:\n+ os.mkdir('/var/cache/raspiblitz/{}'.format(getpass.getuser()))\n+ except FileExistsError:\n+ pass\n+ log_file = os.path.abspath('/var/cache/raspiblitz/{}/blitz-tui.log'.format(getpass.getuser()))\n+ else:\n+ log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n \n default_config_as_dict = dict(\n version=1,\n@@ -36,8 +44,8 @@\n 'level': log_level,\n 'formatter': 'extended',\n 'filename': log_file,\n- 'maxBytes': 10485760,\n- 'backupCount': 0,\n+ 'maxBytes': 2*1024*1024, # 2 MB\n+ 'backupCount': 1,\n 'encoding': 'utf8'}},\n loggers={'infoblitz': {'level': 'DEBUG',\n 'handlers': ['console', 'file_handler'],\ndiff --git a/home.admin/BlitzTUI/blitztui/version.py b/home.admin/BlitzTUI/blitztui/version.py\n--- a/home.admin/BlitzTUI/blitztui/version.py\n+++ b/home.admin/BlitzTUI/blitztui/version.py\n@@ -4,5 +4,5 @@\n # 3) we can import it into your module module\n \"\"\"\n \n-__version_info__ = ('0', '47', '0')\n+__version_info__ = ('0', '48', '1')\n __version__ = '.'.join(__version_info__)\n", "issue": "Move BlitzTUI log to RAM disk\nThe RAM disk (`/var/cache/raspiblitz/`) is a nice way to avoid reads and writes to either the SD card or the external disk for non-persistent data. Several things can be moved; the BlitzTUI log should be a prime example.\n", "before_files": [{"content": "\"\"\" Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module module\n\"\"\"\n\n__version_info__ = ('0', '47', '0')\n__version__ = '.'.join(__version_info__)\n", "path": "home.admin/BlitzTUI/blitztui/version.py"}, {"content": "import json\nimport logging\nimport logging.config\nimport os\nimport sys\n\nIS_WIN32_ENV = sys.platform == \"win32\"\n\n\ndef setup_logging(default_path=os.path.abspath(os.path.expanduser('~/.blitz-tui.json')), log_level=\"INFO\"):\n \"\"\"Setup logging configuration\"\"\"\n path = default_path\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\n else: # if $default_path does not exist use the following default log setup\n\n if IS_WIN32_ENV:\n log_file = \"blitz-tui.log\"\n else:\n log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n\n default_config_as_dict = dict(\n version=1,\n disable_existing_loggers=False,\n formatters={'simple': {'format': '%(asctime)s - %(levelname)s - %(message)s'},\n 'extended': {\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'}},\n handlers={'console': {'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'extended',\n 'stream': 'ext://sys.stdout'},\n 'file_handler': {'class': 'logging.handlers.RotatingFileHandler',\n 'level': log_level,\n 'formatter': 'extended',\n 'filename': log_file,\n 'maxBytes': 10485760,\n 'backupCount': 0,\n 'encoding': 'utf8'}},\n loggers={'infoblitz': {'level': 'DEBUG',\n 'handlers': ['console', 'file_handler'],\n 'propagate': 'no'}},\n root={'level': 'DEBUG', 'handlers': ['console', 'file_handler']}\n )\n\n logging.config.dictConfig(default_config_as_dict)\n", "path": "home.admin/BlitzTUI/blitztui/file_logger.py"}], "after_files": [{"content": "\"\"\" Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module module\n\"\"\"\n\n__version_info__ = ('0', '48', '1')\n__version__ = '.'.join(__version_info__)\n", "path": "home.admin/BlitzTUI/blitztui/version.py"}, {"content": "import getpass\nimport json\nimport logging\nimport logging.config\nimport os\nimport sys\n\nIS_WIN32_ENV = sys.platform == \"win32\"\n\n\ndef setup_logging(default_path=os.path.abspath(os.path.expanduser('~/.blitz-tui.json')), log_level=\"INFO\"):\n \"\"\"Setup logging configuration\"\"\"\n path = default_path\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\n else: # if $default_path does not exist use the following default log setup\n\n if IS_WIN32_ENV:\n log_file = \"blitz-tui.log\"\n else:\n if os.path.isdir('/var/cache/raspiblitz'):\n try:\n os.mkdir('/var/cache/raspiblitz/{}'.format(getpass.getuser()))\n except FileExistsError:\n pass\n log_file = os.path.abspath('/var/cache/raspiblitz/{}/blitz-tui.log'.format(getpass.getuser()))\n else:\n log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n\n default_config_as_dict = dict(\n version=1,\n disable_existing_loggers=False,\n formatters={'simple': {'format': '%(asctime)s - %(levelname)s - %(message)s'},\n 'extended': {\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'}},\n handlers={'console': {'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'extended',\n 'stream': 'ext://sys.stdout'},\n 'file_handler': {'class': 'logging.handlers.RotatingFileHandler',\n 'level': log_level,\n 'formatter': 'extended',\n 'filename': log_file,\n 'maxBytes': 2*1024*1024, # 2 MB\n 'backupCount': 1,\n 'encoding': 'utf8'}},\n loggers={'infoblitz': {'level': 'DEBUG',\n 'handlers': ['console', 'file_handler'],\n 'propagate': 'no'}},\n root={'level': 'DEBUG', 'handlers': ['console', 'file_handler']}\n )\n\n logging.config.dictConfig(default_config_as_dict)\n", "path": "home.admin/BlitzTUI/blitztui/file_logger.py"}]}
| 951 | 523 |
gh_patches_debug_3076
|
rasdani/github-patches
|
git_diff
|
huggingface__optimum-334
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for GPU optimization of xlm-roberta model type
### System Info
```shell
optimum==1.3.0
python==3.8.13
Ubuntu 18.04
```
### Who can help?
@philschmid
### Information
- [X] The official example scripts
- [x] My own modified scripts
### Tasks
- [ ] An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)
- [ ] My own task or dataset (give details below)
### Reproduction
Hi,
Thanks for this very nice tool for simple and rapid ONNX optimization of Transformer models.
I would like to GPU optimize this multilingual cross-encoder model : `cross-encoder/mmarco-mMiniLMv2-L12-H384-v1` using the code provided in a blog post by @philschmid:
```
from optimum.onnxruntime import ORTOptimizer
from optimum.onnxruntime.configuration import OptimizationConfig
# create ORTOptimizer and define optimization configuration
optimizer = ORTOptimizer.from_pretrained(model_id, feature=model.pipeline_task)
optimization_config = OptimizationConfig(optimization_level=99, # enable all optimizations
optimize_for_gpu=True,
fp16=True
)
# apply the optimization configuration to the model
optimizer.export(
onnx_model_path=onnx_path / "model.onnx",
onnx_optimized_model_output_path=onnx_path / "model-optimized.onnx",
optimization_config=optimization_config,
)
```
But got the following error:
```
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb Cellule 15 in <cell line: 12>()
[6](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=5) optimization_config = OptimizationConfig(optimization_level=99, # enable all optimizations
[7](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=6) optimize_for_gpu=True,
[8](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=7) fp16=True
[9](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=8) )
[11](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=10) # apply the optimization configuration to the model
---> [12](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=11) optimizer.export(
[13](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=12) onnx_model_path=onnx_path / "model.onnx",
[14](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=13) onnx_optimized_model_output_path=onnx_path / "model-optimized.onnx",
[15](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=14) optimization_config=optimization_config,
[16](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=15) )
File ~/anaconda3/envs/haystack-gpu-fresh/lib/python3.8/site-packages/optimum/onnxruntime/optimization.py:125, in ORTOptimizer.export(self, onnx_model_path, onnx_optimized_model_output_path, optimization_config, use_external_data_format)
122 if not onnx_model_path.exists():
123 export(self.preprocessor, self.model, self._onnx_config, self.opset, onnx_model_path)
--> 125 ORTConfigManager.check_supported_model_or_raise(self._model_type)
126 num_heads = getattr(self.model.config, ORTConfigManager.get_num_heads_name(self._model_type))
127 hidden_size = getattr(self.model.config, ORTConfigManager.get_hidden_size_name(self._model_type))
File ~/anaconda3/envs/haystack-gpu-fresh/lib/python3.8/site-packages/optimum/onnxruntime/utils.py:110, in ORTConfigManager.check_supported_model_or_raise(cls, model_type)
107 @classmethod
108 def check_supported_model_or_raise(cls, model_type: str) -> bool:
109 if model_type not in cls._conf:
...
111 f"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. "
112 f"If you want to support {model_type} please propose a PR or open up an issue."
113 )
KeyError: "xlm-roberta model type is not supported yet. Only ['bert', 'albert', 'camembert', 'codegen', 'distilbert', 'deberta', 'deberta-v2', 'electra', 'roberta', 'bart', 'gpt2', 'gpt_neo'] are supported. If you want to support xlm-roberta please propose a PR or open up an issue."
```
Would it be possible to add support for this model type?
Thanks!
### Expected behavior
Would it be possible to add support for `xlm-roberta` model type?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optimum/onnxruntime/utils.py`
Content:
```
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from enum import Enum
15 from pathlib import Path
16 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
17
18 import torch
19 from transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
20 from transformers.utils import logging
21
22 import onnx
23 import onnxruntime as ort
24
25 from ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss
26
27
28 logger = logging.get_logger(__name__)
29
30 ONNX_WEIGHTS_NAME = "model.onnx"
31 OPTIMIZED_ONNX_WEIGHTS_NAME = "optimized_model.onnx"
32 QUANTIZED_ONNX_WEIGHTS_NAME = "q8_model.onnx"
33
34 ONNX_ENCODER_NAME = "encoder_model.onnx"
35 ONNX_DECODER_NAME = "decoder_model.onnx"
36 ONNX_DECODER_WITH_PAST_NAME = "decoder_with_past_model.onnx"
37
38
39 def _is_gpu_available():
40 """
41 checks if a gpu is available.
42 """
43 available_providers = ort.get_available_providers()
44 if "CUDAExecutionProvider" in available_providers and torch.cuda.is_available():
45 return True
46 else:
47 return False
48
49
50 class ORTConfigManager:
51 """
52 A class that contains all the information needed by ONNX Runtime optimization for a given model type.
53
54 Attributes:
55 _conf (`Dict[str, tuple]`):
56 A dictionary mapping each supported model type to a tuple containing the number of attention heads
57 and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.
58 """
59
60 _conf = {
61 "bert": ("num_attention_heads", "hidden_size", "bert"),
62 "albert": ("num_attention_heads", "hidden_size", "bert"),
63 "camembert": ("num_attention_heads", "hidden_size", "bert"),
64 "codegen": ("n_head", "n_embd", "gpt2"),
65 "distilbert": ("n_heads", "dim", "bert"),
66 "deberta": ("num_attention_heads", "hidden_size", "bert"),
67 "deberta-v2": ("num_attention_heads", "hidden_size", "bert"),
68 "electra": ("num_attention_heads", "hidden_size", "bert"),
69 "roberta": ("num_attention_heads", "hidden_size", "bert"),
70 "bart": ("encoder_attention_heads", "d_model", "bart"),
71 "gpt2": ("n_head", "n_embd", "gpt2"),
72 "gpt_neo": ("num_heads", "hidden_size", "gpt2"),
73 }
74
75 @classmethod
76 def get_num_heads_name(cls, model_type: str) -> str:
77 num_heads = "num_attention_heads"
78 try:
79 num_heads = cls._conf[model_type][0]
80 except KeyError:
81 logger.warning(
82 f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to "
83 f"access the number of heads defined in the config is set to `{num_heads}`."
84 )
85 return num_heads
86
87 @classmethod
88 def get_hidden_size_name(cls, model_type: str) -> str:
89 hidden_size = "hidden_size"
90 try:
91 hidden_size = cls._conf[model_type][1]
92 except KeyError:
93 logger.warning(
94 f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to "
95 f"access the hidden size defined in the config is set to `{hidden_size}`."
96 )
97 return hidden_size
98
99 @classmethod
100 def get_model_ort_type(cls, model_type: str) -> str:
101 try:
102 model_type = cls._conf[model_type][2]
103 except KeyError:
104 logger.warning(f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.")
105 return model_type
106
107 @classmethod
108 def check_supported_model_or_raise(cls, model_type: str) -> bool:
109 if model_type not in cls._conf:
110 raise KeyError(
111 f"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. "
112 f"If you want to support {model_type} please propose a PR or open up an issue."
113 )
114
115
116 def generate_identified_filename(filename, identifier):
117 return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
118
119
120 def fix_atenops_to_gather(model_path):
121 # Fix broken ATenOp nodes back to Gather nodes.
122 model = onnx.load(model_path)
123 onnx.checker.check_model(model)
124
125 nodes = model.graph.node
126
127 for node in nodes:
128 if node.op_type in ["ATenOp", "ATen"]:
129 logger.info(f"----Start fixing node: {node.name}----")
130 op_num = node.name.split("_")[-1]
131 new_node = onnx.helper.make_node(
132 "Gather",
133 name="Gather_" + op_num,
134 inputs=[node.input[0], node.input[1]],
135 outputs=node.output,
136 )
137
138 model.graph.node.remove(node)
139 model.graph.node.insert(int(op_num), new_node)
140
141 onnx.checker.check_model(model)
142 onnx.save(model, model_path)
143
144
145 def wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:
146 if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):
147 return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)
148 elif isinstance(onnx_config, OnnxConfigWithPast):
149 return OnnxConfigWithPastAndLoss(onnx_config)
150 else:
151 return OnnxConfigWithLoss(onnx_config)
152
153
154 def get_device_for_provider(provider: str) -> torch.device:
155 """
156 Gets the PyTorch device (CPU/CUDA) associated with an ONNX Runtime provider.
157 """
158 return torch.device("cuda") if provider == "CUDAExecutionProvider" else torch.device("cpu")
159
160
161 def get_provider_for_device(device: torch.device) -> str:
162 """
163 Gets the ONNX Runtime provider associated with the PyTorch device (CPU/CUDA).
164 """
165 return "CUDAExecutionProvider" if device.type.lower() == "cuda" else "CPUExecutionProvider"
166
167
168 class ORTQuantizableOperator(Enum):
169 # Common ops
170 Gather = "Gather"
171 Transpose = "Transpose"
172 EmbedLayerNormalizationQuant = "EmbedLayerNormalization"
173
174 # QLinearOps
175 Conv = "Conv"
176 MatMul = "MatMul"
177 Add = "Add"
178 Mul = "Mul"
179 Relu = "Relu"
180 Clip = "Clip"
181 LeakyRelu = "LeakyRelu"
182 Sigmoid = "Sigmoid"
183 MaxPool = "MaxPool"
184 GlobalAveragePool = "GlobalAveragePool"
185 Split = "Split"
186 Pad = "Pad"
187 Reshape = "Reshape"
188 Squeeze = "Squeeze"
189 Unsqueeze = "Unsqueeze"
190 Resize = "Resize"
191 AveragePool = "AveragePool"
192 Concat = "Concat"
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optimum/onnxruntime/utils.py b/optimum/onnxruntime/utils.py
--- a/optimum/onnxruntime/utils.py
+++ b/optimum/onnxruntime/utils.py
@@ -70,6 +70,7 @@
"bart": ("encoder_attention_heads", "d_model", "bart"),
"gpt2": ("n_head", "n_embd", "gpt2"),
"gpt_neo": ("num_heads", "hidden_size", "gpt2"),
+ "xlm-roberta": ("num_attention_heads", "hidden_size", "bert"),
}
@classmethod
|
{"golden_diff": "diff --git a/optimum/onnxruntime/utils.py b/optimum/onnxruntime/utils.py\n--- a/optimum/onnxruntime/utils.py\n+++ b/optimum/onnxruntime/utils.py\n@@ -70,6 +70,7 @@\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n \"gpt2\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"gpt_neo\": (\"num_heads\", \"hidden_size\", \"gpt2\"),\n+ \"xlm-roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n }\n \n @classmethod\n", "issue": "Support for GPU optimization of xlm-roberta model type\n### System Info\n\n```shell\noptimum==1.3.0\r\npython==3.8.13\r\nUbuntu 18.04\n```\n\n\n### Who can help?\n\n@philschmid \n\n### Information\n\n- [X] The official example scripts\n- [x] My own modified scripts\n\n### Tasks\n\n- [ ] An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)\n- [ ] My own task or dataset (give details below)\n\n### Reproduction\n\nHi,\r\n\r\nThanks for this very nice tool for simple and rapid ONNX optimization of Transformer models.\r\n\r\nI would like to GPU optimize this multilingual cross-encoder model : `cross-encoder/mmarco-mMiniLMv2-L12-H384-v1` using the code provided in a blog post by @philschmid:\r\n\r\n```\r\nfrom optimum.onnxruntime import ORTOptimizer\r\nfrom optimum.onnxruntime.configuration import OptimizationConfig\r\n\r\n# create ORTOptimizer and define optimization configuration\r\noptimizer = ORTOptimizer.from_pretrained(model_id, feature=model.pipeline_task)\r\noptimization_config = OptimizationConfig(optimization_level=99, # enable all optimizations\r\n optimize_for_gpu=True,\r\n fp16=True\r\n )\r\n\r\n# apply the optimization configuration to the model\r\noptimizer.export(\r\n onnx_model_path=onnx_path / \"model.onnx\",\r\n onnx_optimized_model_output_path=onnx_path / \"model-optimized.onnx\",\r\n optimization_config=optimization_config,\r\n)\r\n```\r\n\r\nBut got the following error:\r\n```\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb Cellule 15 in <cell line: 12>()\r\n [6](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=5) optimization_config = OptimizationConfig(optimization_level=99, # enable all optimizations\r\n [7](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=6) optimize_for_gpu=True,\r\n [8](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=7) fp16=True\r\n [9](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=8) )\r\n [11](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=10) # apply the optimization configuration to the model\r\n---> [12](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=11) optimizer.export(\r\n [13](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=12) onnx_model_path=onnx_path / \"model.onnx\",\r\n [14](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=13) onnx_optimized_model_output_path=onnx_path / \"model-optimized.onnx\",\r\n [15](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=14) optimization_config=optimization_config,\r\n [16](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=15) )\r\n\r\nFile ~/anaconda3/envs/haystack-gpu-fresh/lib/python3.8/site-packages/optimum/onnxruntime/optimization.py:125, in ORTOptimizer.export(self, onnx_model_path, onnx_optimized_model_output_path, optimization_config, use_external_data_format)\r\n 122 if not onnx_model_path.exists():\r\n 123 export(self.preprocessor, self.model, self._onnx_config, self.opset, onnx_model_path)\r\n--> 125 ORTConfigManager.check_supported_model_or_raise(self._model_type)\r\n 126 num_heads = getattr(self.model.config, ORTConfigManager.get_num_heads_name(self._model_type))\r\n 127 hidden_size = getattr(self.model.config, ORTConfigManager.get_hidden_size_name(self._model_type))\r\n\r\nFile ~/anaconda3/envs/haystack-gpu-fresh/lib/python3.8/site-packages/optimum/onnxruntime/utils.py:110, in ORTConfigManager.check_supported_model_or_raise(cls, model_type)\r\n 107 @classmethod\r\n 108 def check_supported_model_or_raise(cls, model_type: str) -> bool:\r\n 109 if model_type not in cls._conf:\r\n...\r\n 111 f\"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. \"\r\n 112 f\"If you want to support {model_type} please propose a PR or open up an issue.\"\r\n 113 )\r\n\r\nKeyError: \"xlm-roberta model type is not supported yet. Only ['bert', 'albert', 'camembert', 'codegen', 'distilbert', 'deberta', 'deberta-v2', 'electra', 'roberta', 'bart', 'gpt2', 'gpt_neo'] are supported. If you want to support xlm-roberta please propose a PR or open up an issue.\"\r\n```\r\n\r\nWould it be possible to add support for this model type?\r\n\r\nThanks!\n\n### Expected behavior\n\nWould it be possible to add support for `xlm-roberta` model type?\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom transformers.utils import logging\n\nimport onnx\nimport onnxruntime as ort\n\nfrom ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss\n\n\nlogger = logging.get_logger(__name__)\n\nONNX_WEIGHTS_NAME = \"model.onnx\"\nOPTIMIZED_ONNX_WEIGHTS_NAME = \"optimized_model.onnx\"\nQUANTIZED_ONNX_WEIGHTS_NAME = \"q8_model.onnx\"\n\nONNX_ENCODER_NAME = \"encoder_model.onnx\"\nONNX_DECODER_NAME = \"decoder_model.onnx\"\nONNX_DECODER_WITH_PAST_NAME = \"decoder_with_past_model.onnx\"\n\n\ndef _is_gpu_available():\n \"\"\"\n checks if a gpu is available.\n \"\"\"\n available_providers = ort.get_available_providers()\n if \"CUDAExecutionProvider\" in available_providers and torch.cuda.is_available():\n return True\n else:\n return False\n\n\nclass ORTConfigManager:\n \"\"\"\n A class that contains all the information needed by ONNX Runtime optimization for a given model type.\n\n Attributes:\n _conf (`Dict[str, tuple]`):\n A dictionary mapping each supported model type to a tuple containing the number of attention heads\n and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.\n \"\"\"\n\n _conf = {\n \"bert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"codegen\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n \"deberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"deberta-v2\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n \"gpt2\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"gpt_neo\": (\"num_heads\", \"hidden_size\", \"gpt2\"),\n }\n\n @classmethod\n def get_num_heads_name(cls, model_type: str) -> str:\n num_heads = \"num_attention_heads\"\n try:\n num_heads = cls._conf[model_type][0]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the number of heads defined in the config is set to `{num_heads}`.\"\n )\n return num_heads\n\n @classmethod\n def get_hidden_size_name(cls, model_type: str) -> str:\n hidden_size = \"hidden_size\"\n try:\n hidden_size = cls._conf[model_type][1]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the hidden size defined in the config is set to `{hidden_size}`.\"\n )\n return hidden_size\n\n @classmethod\n def get_model_ort_type(cls, model_type: str) -> str:\n try:\n model_type = cls._conf[model_type][2]\n except KeyError:\n logger.warning(f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.\")\n return model_type\n\n @classmethod\n def check_supported_model_or_raise(cls, model_type: str) -> bool:\n if model_type not in cls._conf:\n raise KeyError(\n f\"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. \"\n f\"If you want to support {model_type} please propose a PR or open up an issue.\"\n )\n\n\ndef generate_identified_filename(filename, identifier):\n return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)\n\n\ndef fix_atenops_to_gather(model_path):\n # Fix broken ATenOp nodes back to Gather nodes.\n model = onnx.load(model_path)\n onnx.checker.check_model(model)\n\n nodes = model.graph.node\n\n for node in nodes:\n if node.op_type in [\"ATenOp\", \"ATen\"]:\n logger.info(f\"----Start fixing node: {node.name}----\")\n op_num = node.name.split(\"_\")[-1]\n new_node = onnx.helper.make_node(\n \"Gather\",\n name=\"Gather_\" + op_num,\n inputs=[node.input[0], node.input[1]],\n outputs=node.output,\n )\n\n model.graph.node.remove(node)\n model.graph.node.insert(int(op_num), new_node)\n\n onnx.checker.check_model(model)\n onnx.save(model, model_path)\n\n\ndef wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:\n if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):\n return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)\n elif isinstance(onnx_config, OnnxConfigWithPast):\n return OnnxConfigWithPastAndLoss(onnx_config)\n else:\n return OnnxConfigWithLoss(onnx_config)\n\n\ndef get_device_for_provider(provider: str) -> torch.device:\n \"\"\"\n Gets the PyTorch device (CPU/CUDA) associated with an ONNX Runtime provider.\n \"\"\"\n return torch.device(\"cuda\") if provider == \"CUDAExecutionProvider\" else torch.device(\"cpu\")\n\n\ndef get_provider_for_device(device: torch.device) -> str:\n \"\"\"\n Gets the ONNX Runtime provider associated with the PyTorch device (CPU/CUDA).\n \"\"\"\n return \"CUDAExecutionProvider\" if device.type.lower() == \"cuda\" else \"CPUExecutionProvider\"\n\n\nclass ORTQuantizableOperator(Enum):\n # Common ops\n Gather = \"Gather\"\n Transpose = \"Transpose\"\n EmbedLayerNormalizationQuant = \"EmbedLayerNormalization\"\n\n # QLinearOps\n Conv = \"Conv\"\n MatMul = \"MatMul\"\n Add = \"Add\"\n Mul = \"Mul\"\n Relu = \"Relu\"\n Clip = \"Clip\"\n LeakyRelu = \"LeakyRelu\"\n Sigmoid = \"Sigmoid\"\n MaxPool = \"MaxPool\"\n GlobalAveragePool = \"GlobalAveragePool\"\n Split = \"Split\"\n Pad = \"Pad\"\n Reshape = \"Reshape\"\n Squeeze = \"Squeeze\"\n Unsqueeze = \"Unsqueeze\"\n Resize = \"Resize\"\n AveragePool = \"AveragePool\"\n Concat = \"Concat\"\n", "path": "optimum/onnxruntime/utils.py"}], "after_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom transformers.utils import logging\n\nimport onnx\nimport onnxruntime as ort\n\nfrom ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss\n\n\nlogger = logging.get_logger(__name__)\n\nONNX_WEIGHTS_NAME = \"model.onnx\"\nOPTIMIZED_ONNX_WEIGHTS_NAME = \"optimized_model.onnx\"\nQUANTIZED_ONNX_WEIGHTS_NAME = \"q8_model.onnx\"\n\nONNX_ENCODER_NAME = \"encoder_model.onnx\"\nONNX_DECODER_NAME = \"decoder_model.onnx\"\nONNX_DECODER_WITH_PAST_NAME = \"decoder_with_past_model.onnx\"\n\n\ndef _is_gpu_available():\n \"\"\"\n checks if a gpu is available.\n \"\"\"\n available_providers = ort.get_available_providers()\n if \"CUDAExecutionProvider\" in available_providers and torch.cuda.is_available():\n return True\n else:\n return False\n\n\nclass ORTConfigManager:\n \"\"\"\n A class that contains all the information needed by ONNX Runtime optimization for a given model type.\n\n Attributes:\n _conf (`Dict[str, tuple]`):\n A dictionary mapping each supported model type to a tuple containing the number of attention heads\n and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.\n \"\"\"\n\n _conf = {\n \"bert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"codegen\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n \"deberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"deberta-v2\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n \"gpt2\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"gpt_neo\": (\"num_heads\", \"hidden_size\", \"gpt2\"),\n \"xlm-roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n }\n\n @classmethod\n def get_num_heads_name(cls, model_type: str) -> str:\n num_heads = \"num_attention_heads\"\n try:\n num_heads = cls._conf[model_type][0]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the number of heads defined in the config is set to `{num_heads}`.\"\n )\n return num_heads\n\n @classmethod\n def get_hidden_size_name(cls, model_type: str) -> str:\n hidden_size = \"hidden_size\"\n try:\n hidden_size = cls._conf[model_type][1]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the hidden size defined in the config is set to `{hidden_size}`.\"\n )\n return hidden_size\n\n @classmethod\n def get_model_ort_type(cls, model_type: str) -> str:\n try:\n model_type = cls._conf[model_type][2]\n except KeyError:\n logger.warning(f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.\")\n return model_type\n\n @classmethod\n def check_supported_model_or_raise(cls, model_type: str) -> bool:\n if model_type not in cls._conf:\n raise KeyError(\n f\"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. \"\n f\"If you want to support {model_type} please propose a PR or open up an issue.\"\n )\n\n\ndef generate_identified_filename(filename, identifier):\n return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)\n\n\ndef fix_atenops_to_gather(model_path):\n # Fix broken ATenOp nodes back to Gather nodes.\n model = onnx.load(model_path)\n onnx.checker.check_model(model)\n\n nodes = model.graph.node\n\n for node in nodes:\n if node.op_type in [\"ATenOp\", \"ATen\"]:\n logger.info(f\"----Start fixing node: {node.name}----\")\n op_num = node.name.split(\"_\")[-1]\n new_node = onnx.helper.make_node(\n \"Gather\",\n name=\"Gather_\" + op_num,\n inputs=[node.input[0], node.input[1]],\n outputs=node.output,\n )\n\n model.graph.node.remove(node)\n model.graph.node.insert(int(op_num), new_node)\n\n onnx.checker.check_model(model)\n onnx.save(model, model_path)\n\n\ndef wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:\n if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):\n return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)\n elif isinstance(onnx_config, OnnxConfigWithPast):\n return OnnxConfigWithPastAndLoss(onnx_config)\n else:\n return OnnxConfigWithLoss(onnx_config)\n\n\ndef get_device_for_provider(provider: str) -> torch.device:\n \"\"\"\n Gets the PyTorch device (CPU/CUDA) associated with an ONNX Runtime provider.\n \"\"\"\n return torch.device(\"cuda\") if provider == \"CUDAExecutionProvider\" else torch.device(\"cpu\")\n\n\ndef get_provider_for_device(device: torch.device) -> str:\n \"\"\"\n Gets the ONNX Runtime provider associated with the PyTorch device (CPU/CUDA).\n \"\"\"\n return \"CUDAExecutionProvider\" if device.type.lower() == \"cuda\" else \"CPUExecutionProvider\"\n\n\nclass ORTQuantizableOperator(Enum):\n # Common ops\n Gather = \"Gather\"\n Transpose = \"Transpose\"\n EmbedLayerNormalizationQuant = \"EmbedLayerNormalization\"\n\n # QLinearOps\n Conv = \"Conv\"\n MatMul = \"MatMul\"\n Add = \"Add\"\n Mul = \"Mul\"\n Relu = \"Relu\"\n Clip = \"Clip\"\n LeakyRelu = \"LeakyRelu\"\n Sigmoid = \"Sigmoid\"\n MaxPool = \"MaxPool\"\n GlobalAveragePool = \"GlobalAveragePool\"\n Split = \"Split\"\n Pad = \"Pad\"\n Reshape = \"Reshape\"\n Squeeze = \"Squeeze\"\n Unsqueeze = \"Unsqueeze\"\n Resize = \"Resize\"\n AveragePool = \"AveragePool\"\n Concat = \"Concat\"\n", "path": "optimum/onnxruntime/utils.py"}]}
| 4,006 | 136 |
gh_patches_debug_38796
|
rasdani/github-patches
|
git_diff
|
facebookresearch__xformers-263
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[feat] Add smooth relu to the fused linear layer (triton) activations
# 🚀 Feature
Should be super easy to add [in there](https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_activations.py), would be interesting to see it benchmarked down the line
## Motivation
See [this](https://ai.googleblog.com/2022/04/reproducibility-in-deep-learning-and.html) and [that](https://arxiv.org/abs/2202.06499)
## Pitch
- easy thing to add
- triton should be fairly efficient there, vs. other options (naive pytorch)
## Alternatives
Not doing it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xformers/components/activations.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6
7 from enum import Enum
8 from typing import Optional
9
10 import torch
11 from torch import nn
12
13
14 class Activation(str, Enum):
15 SquaredReLU = "squared_relu"
16 GeLU = "gelu"
17 LeakyReLU = "leaky_relu"
18 ReLU = "relu"
19
20
21 # For unit testing / parity comparisons, probably not the fastest way
22 class SquaredReLU(nn.Module):
23 def __init__(self) -> None:
24 super().__init__()
25
26 def forward(self, x: torch.Tensor) -> torch.Tensor:
27 x_ = torch.nn.functional.relu(x)
28 return x_ * x_
29
30
31 class Passthrough(nn.Module):
32 def __init__(self) -> None:
33 super().__init__()
34
35 def forward(self, x: torch.Tensor) -> torch.Tensor:
36 return x
37
38
39 def build_activation(activation: Optional[Activation]):
40 if not activation:
41 return Passthrough()
42
43 return {
44 Activation.ReLU: nn.ReLU,
45 Activation.GeLU: nn.GELU,
46 Activation.LeakyReLU: nn.LeakyReLU,
47 Activation.SquaredReLU: SquaredReLU,
48 }[activation]()
49
```
Path: `xformers/triton/k_activations.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import math
7 from typing import Optional
8
9 import triton
10 import triton.language as tl
11
12 from xformers.components import Activation
13
14 _kAlpha = math.sqrt(2.0 / math.pi)
15
16
17 def get_triton_activation_kernel(activation: Optional[Activation]):
18 return (
19 {
20 Activation.ReLU: relu,
21 Activation.LeakyReLU: leaky_relu,
22 Activation.GeLU: gelu,
23 Activation.SquaredReLU: squared_relu,
24 }[activation]
25 if activation
26 else None
27 )
28
29
30 def get_triton_activation_bwd_kernel(activation: Optional[Activation]):
31 return (
32 {
33 Activation.ReLU: relu_grad,
34 Activation.LeakyReLU: leaky_relu_grad,
35 Activation.GeLU: gelu_grad,
36 Activation.SquaredReLU: squared_relu_grad,
37 }[activation]
38 if activation
39 else None
40 )
41
42
43 @triton.jit
44 def tanh(x):
45 # Tanh is just a scaled sigmoid
46 return 2 * tl.sigmoid(2 * x) - 1
47
48
49 @triton.jit
50 def cosh(x):
51 exp_x = tl.exp(x)
52 return (exp_x + 1.0 / exp_x) * 0.5
53
54
55 # a Triton implementation of the most used activations
56 # See for instance http://arxiv.org/abs/1606.08415 for an overview
57
58 # ReLU
59 @triton.jit
60 def relu(x):
61 """
62 ReLU_ activation function
63
64 .. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
65 """
66 zero = 0.0
67 return tl.where(x >= 0, x, zero.to(x.dtype))
68
69
70 @triton.jit
71 def relu_grad(x):
72 # ReLU is different from other activations
73 # in that it does not require the input to retrospectively compute its gradient
74 # here the input is the downstream gradient, and we return the upstream gradient directly
75 zero = 0.0
76 one = 1.0
77 return tl.where(x >= 0, one.to(x.dtype), zero.to(x.dtype))
78
79
80 @triton.jit
81 def squared_relu(x):
82 """
83 Squared ReLU activation, as proposed in the Primer_ paper.
84
85 .. _Primer: https://arxiv.org/abs/2109.08668
86 """
87 x_ = relu(x)
88 return (x_ * x_).to(x.dtype)
89
90
91 @triton.jit
92 def squared_relu_grad(x):
93 return tl.where(x >= 0, 2.0 * x, 0.0)
94
95
96 # Leaky ReLU
97 @triton.jit
98 def leaky_relu(x):
99 """
100 LeakyReLU_ activation
101
102 .. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html
103 """
104 scale = 0.01 + 0.0
105 scale = scale.to(x.dtype)
106 return tl.where(x >= 0, x, scale * x)
107
108
109 @triton.jit
110 def leaky_relu_grad(x):
111 min_grad = 0.01
112 max_grad = 1
113
114 min_grad = min_grad.to(x.dtype)
115 max_grad = max_grad.to(x.dtype)
116
117 return tl.where(x >= 0, max_grad, min_grad)
118
119
120 @triton.jit
121 def gelu(x):
122 """
123 GeLU_ activation - Gaussian error linear unit
124
125 .. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
126 """
127 return 0.5 * x * (1 + tanh(_kAlpha * (x + 0.044715 * x * x * x)))
128
129
130 @triton.jit
131 def gelu_grad(x):
132 # CREDITS: Fast implementation proposed in
133 # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/fused_bias_gelu.py#L30
134 tanh_out = tanh(0.79788456 * x * (1 + 0.044715 * x * x))
135 return 0.5 * x * (
136 (1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
137 ) + 0.5 * (1 + tanh_out)
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/xformers/components/activations.py b/xformers/components/activations.py
--- a/xformers/components/activations.py
+++ b/xformers/components/activations.py
@@ -16,6 +16,7 @@
GeLU = "gelu"
LeakyReLU = "leaky_relu"
ReLU = "relu"
+ SmeLU = "smelu"
# For unit testing / parity comparisons, probably not the fastest way
@@ -28,6 +29,24 @@
return x_ * x_
+class SmeLU(nn.Module):
+ def __init__(self, beta: float = 2.0) -> None:
+ super().__init__()
+ self.beta = beta
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ relu = torch.where(
+ x >= self.beta,
+ x,
+ torch.tensor([0.0], device=x.device, dtype=x.dtype),
+ )
+ return torch.where(
+ torch.abs(x) <= self.beta,
+ ((x + self.beta) ** 2).type_as(x) / (4.0 * self.beta),
+ relu,
+ )
+
+
class Passthrough(nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -45,4 +64,5 @@
Activation.GeLU: nn.GELU,
Activation.LeakyReLU: nn.LeakyReLU,
Activation.SquaredReLU: SquaredReLU,
+ Activation.SmeLU: SmeLU,
}[activation]()
diff --git a/xformers/triton/k_activations.py b/xformers/triton/k_activations.py
--- a/xformers/triton/k_activations.py
+++ b/xformers/triton/k_activations.py
@@ -21,6 +21,7 @@
Activation.LeakyReLU: leaky_relu,
Activation.GeLU: gelu,
Activation.SquaredReLU: squared_relu,
+ Activation.SmeLU: smelu,
}[activation]
if activation
else None
@@ -34,6 +35,7 @@
Activation.LeakyReLU: leaky_relu_grad,
Activation.GeLU: gelu_grad,
Activation.SquaredReLU: squared_relu_grad,
+ Activation.SmeLU: smelu_grad,
}[activation]
if activation
else None
@@ -135,3 +137,32 @@
return 0.5 * x * (
(1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
) + 0.5 * (1 + tanh_out)
+
+
[email protected]
+def smelu(x):
+ """
+ SmeLU_ activation - Smooth ReLU with beta=2.0
+
+ .. _SmeLU: https://arxiv.org/pdf/2202.06499.pdf
+ """
+ zero = 0.0
+ four = 4.0
+ two = 2.0
+ beta = two.to(x.dtype)
+
+ output = (x + beta) * (x + beta) / (four.to(x.dtype) * beta)
+ relu = tl.where(x >= beta, x, zero.to(x.dtype))
+ return tl.where(tl.abs(x) <= beta, output, relu)
+
+
[email protected]
+def smelu_grad(x):
+ zero = 0.0
+ one = 1.0
+ two = 2.0
+ beta = two.to(x.dtype)
+
+ grad = (beta + x) / (two.to(x.dtype) * beta)
+ relu_grad = tl.where(x >= beta, one.to(x.dtype), zero.to(x.dtype))
+ return tl.where(tl.abs(x) <= beta, grad, relu_grad)
|
{"golden_diff": "diff --git a/xformers/components/activations.py b/xformers/components/activations.py\n--- a/xformers/components/activations.py\n+++ b/xformers/components/activations.py\n@@ -16,6 +16,7 @@\n GeLU = \"gelu\"\n LeakyReLU = \"leaky_relu\"\n ReLU = \"relu\"\n+ SmeLU = \"smelu\"\n \n \n # For unit testing / parity comparisons, probably not the fastest way\n@@ -28,6 +29,24 @@\n return x_ * x_\n \n \n+class SmeLU(nn.Module):\n+ def __init__(self, beta: float = 2.0) -> None:\n+ super().__init__()\n+ self.beta = beta\n+\n+ def forward(self, x: torch.Tensor) -> torch.Tensor:\n+ relu = torch.where(\n+ x >= self.beta,\n+ x,\n+ torch.tensor([0.0], device=x.device, dtype=x.dtype),\n+ )\n+ return torch.where(\n+ torch.abs(x) <= self.beta,\n+ ((x + self.beta) ** 2).type_as(x) / (4.0 * self.beta),\n+ relu,\n+ )\n+\n+\n class Passthrough(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n@@ -45,4 +64,5 @@\n Activation.GeLU: nn.GELU,\n Activation.LeakyReLU: nn.LeakyReLU,\n Activation.SquaredReLU: SquaredReLU,\n+ Activation.SmeLU: SmeLU,\n }[activation]()\ndiff --git a/xformers/triton/k_activations.py b/xformers/triton/k_activations.py\n--- a/xformers/triton/k_activations.py\n+++ b/xformers/triton/k_activations.py\n@@ -21,6 +21,7 @@\n Activation.LeakyReLU: leaky_relu,\n Activation.GeLU: gelu,\n Activation.SquaredReLU: squared_relu,\n+ Activation.SmeLU: smelu,\n }[activation]\n if activation\n else None\n@@ -34,6 +35,7 @@\n Activation.LeakyReLU: leaky_relu_grad,\n Activation.GeLU: gelu_grad,\n Activation.SquaredReLU: squared_relu_grad,\n+ Activation.SmeLU: smelu_grad,\n }[activation]\n if activation\n else None\n@@ -135,3 +137,32 @@\n return 0.5 * x * (\n (1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)\n ) + 0.5 * (1 + tanh_out)\n+\n+\[email protected]\n+def smelu(x):\n+ \"\"\"\n+ SmeLU_ activation - Smooth ReLU with beta=2.0\n+\n+ .. _SmeLU: https://arxiv.org/pdf/2202.06499.pdf\n+ \"\"\"\n+ zero = 0.0\n+ four = 4.0\n+ two = 2.0\n+ beta = two.to(x.dtype)\n+\n+ output = (x + beta) * (x + beta) / (four.to(x.dtype) * beta)\n+ relu = tl.where(x >= beta, x, zero.to(x.dtype))\n+ return tl.where(tl.abs(x) <= beta, output, relu)\n+\n+\[email protected]\n+def smelu_grad(x):\n+ zero = 0.0\n+ one = 1.0\n+ two = 2.0\n+ beta = two.to(x.dtype)\n+\n+ grad = (beta + x) / (two.to(x.dtype) * beta)\n+ relu_grad = tl.where(x >= beta, one.to(x.dtype), zero.to(x.dtype))\n+ return tl.where(tl.abs(x) <= beta, grad, relu_grad)\n", "issue": "[feat] Add smooth relu to the fused linear layer (triton) activations\n# \ud83d\ude80 Feature\r\nShould be super easy to add [in there](https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_activations.py), would be interesting to see it benchmarked down the line \r\n\r\n## Motivation\r\nSee [this](https://ai.googleblog.com/2022/04/reproducibility-in-deep-learning-and.html) and [that](https://arxiv.org/abs/2202.06499)\r\n\r\n## Pitch\r\n- easy thing to add\r\n- triton should be fairly efficient there, vs. other options (naive pytorch)\r\n\r\n## Alternatives\r\nNot doing it\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nfrom enum import Enum\nfrom typing import Optional\n\nimport torch\nfrom torch import nn\n\n\nclass Activation(str, Enum):\n SquaredReLU = \"squared_relu\"\n GeLU = \"gelu\"\n LeakyReLU = \"leaky_relu\"\n ReLU = \"relu\"\n\n\n# For unit testing / parity comparisons, probably not the fastest way\nclass SquaredReLU(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x_ = torch.nn.functional.relu(x)\n return x_ * x_\n\n\nclass Passthrough(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return x\n\n\ndef build_activation(activation: Optional[Activation]):\n if not activation:\n return Passthrough()\n\n return {\n Activation.ReLU: nn.ReLU,\n Activation.GeLU: nn.GELU,\n Activation.LeakyReLU: nn.LeakyReLU,\n Activation.SquaredReLU: SquaredReLU,\n }[activation]()\n", "path": "xformers/components/activations.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Optional\n\nimport triton\nimport triton.language as tl\n\nfrom xformers.components import Activation\n\n_kAlpha = math.sqrt(2.0 / math.pi)\n\n\ndef get_triton_activation_kernel(activation: Optional[Activation]):\n return (\n {\n Activation.ReLU: relu,\n Activation.LeakyReLU: leaky_relu,\n Activation.GeLU: gelu,\n Activation.SquaredReLU: squared_relu,\n }[activation]\n if activation\n else None\n )\n\n\ndef get_triton_activation_bwd_kernel(activation: Optional[Activation]):\n return (\n {\n Activation.ReLU: relu_grad,\n Activation.LeakyReLU: leaky_relu_grad,\n Activation.GeLU: gelu_grad,\n Activation.SquaredReLU: squared_relu_grad,\n }[activation]\n if activation\n else None\n )\n\n\[email protected]\ndef tanh(x):\n # Tanh is just a scaled sigmoid\n return 2 * tl.sigmoid(2 * x) - 1\n\n\[email protected]\ndef cosh(x):\n exp_x = tl.exp(x)\n return (exp_x + 1.0 / exp_x) * 0.5\n\n\n# a Triton implementation of the most used activations\n# See for instance http://arxiv.org/abs/1606.08415 for an overview\n\n# ReLU\[email protected]\ndef relu(x):\n \"\"\"\n ReLU_ activation function\n\n .. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html\n \"\"\"\n zero = 0.0\n return tl.where(x >= 0, x, zero.to(x.dtype))\n\n\[email protected]\ndef relu_grad(x):\n # ReLU is different from other activations\n # in that it does not require the input to retrospectively compute its gradient\n # here the input is the downstream gradient, and we return the upstream gradient directly\n zero = 0.0\n one = 1.0\n return tl.where(x >= 0, one.to(x.dtype), zero.to(x.dtype))\n\n\[email protected]\ndef squared_relu(x):\n \"\"\"\n Squared ReLU activation, as proposed in the Primer_ paper.\n\n .. _Primer: https://arxiv.org/abs/2109.08668\n \"\"\"\n x_ = relu(x)\n return (x_ * x_).to(x.dtype)\n\n\[email protected]\ndef squared_relu_grad(x):\n return tl.where(x >= 0, 2.0 * x, 0.0)\n\n\n# Leaky ReLU\[email protected]\ndef leaky_relu(x):\n \"\"\"\n LeakyReLU_ activation\n\n .. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html\n \"\"\"\n scale = 0.01 + 0.0\n scale = scale.to(x.dtype)\n return tl.where(x >= 0, x, scale * x)\n\n\[email protected]\ndef leaky_relu_grad(x):\n min_grad = 0.01\n max_grad = 1\n\n min_grad = min_grad.to(x.dtype)\n max_grad = max_grad.to(x.dtype)\n\n return tl.where(x >= 0, max_grad, min_grad)\n\n\[email protected]\ndef gelu(x):\n \"\"\"\n GeLU_ activation - Gaussian error linear unit\n\n .. _GeLU: https://arxiv.org/pdf/1606.08415.pdf\n \"\"\"\n return 0.5 * x * (1 + tanh(_kAlpha * (x + 0.044715 * x * x * x)))\n\n\[email protected]\ndef gelu_grad(x):\n # CREDITS: Fast implementation proposed in\n # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/fused_bias_gelu.py#L30\n tanh_out = tanh(0.79788456 * x * (1 + 0.044715 * x * x))\n return 0.5 * x * (\n (1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)\n ) + 0.5 * (1 + tanh_out)\n", "path": "xformers/triton/k_activations.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nfrom enum import Enum\nfrom typing import Optional\n\nimport torch\nfrom torch import nn\n\n\nclass Activation(str, Enum):\n SquaredReLU = \"squared_relu\"\n GeLU = \"gelu\"\n LeakyReLU = \"leaky_relu\"\n ReLU = \"relu\"\n SmeLU = \"smelu\"\n\n\n# For unit testing / parity comparisons, probably not the fastest way\nclass SquaredReLU(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x_ = torch.nn.functional.relu(x)\n return x_ * x_\n\n\nclass SmeLU(nn.Module):\n def __init__(self, beta: float = 2.0) -> None:\n super().__init__()\n self.beta = beta\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n relu = torch.where(\n x >= self.beta,\n x,\n torch.tensor([0.0], device=x.device, dtype=x.dtype),\n )\n return torch.where(\n torch.abs(x) <= self.beta,\n ((x + self.beta) ** 2).type_as(x) / (4.0 * self.beta),\n relu,\n )\n\n\nclass Passthrough(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return x\n\n\ndef build_activation(activation: Optional[Activation]):\n if not activation:\n return Passthrough()\n\n return {\n Activation.ReLU: nn.ReLU,\n Activation.GeLU: nn.GELU,\n Activation.LeakyReLU: nn.LeakyReLU,\n Activation.SquaredReLU: SquaredReLU,\n Activation.SmeLU: SmeLU,\n }[activation]()\n", "path": "xformers/components/activations.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Optional\n\nimport triton\nimport triton.language as tl\n\nfrom xformers.components import Activation\n\n_kAlpha = math.sqrt(2.0 / math.pi)\n\n\ndef get_triton_activation_kernel(activation: Optional[Activation]):\n return (\n {\n Activation.ReLU: relu,\n Activation.LeakyReLU: leaky_relu,\n Activation.GeLU: gelu,\n Activation.SquaredReLU: squared_relu,\n Activation.SmeLU: smelu,\n }[activation]\n if activation\n else None\n )\n\n\ndef get_triton_activation_bwd_kernel(activation: Optional[Activation]):\n return (\n {\n Activation.ReLU: relu_grad,\n Activation.LeakyReLU: leaky_relu_grad,\n Activation.GeLU: gelu_grad,\n Activation.SquaredReLU: squared_relu_grad,\n Activation.SmeLU: smelu_grad,\n }[activation]\n if activation\n else None\n )\n\n\[email protected]\ndef tanh(x):\n # Tanh is just a scaled sigmoid\n return 2 * tl.sigmoid(2 * x) - 1\n\n\[email protected]\ndef cosh(x):\n exp_x = tl.exp(x)\n return (exp_x + 1.0 / exp_x) * 0.5\n\n\n# a Triton implementation of the most used activations\n# See for instance http://arxiv.org/abs/1606.08415 for an overview\n\n# ReLU\[email protected]\ndef relu(x):\n \"\"\"\n ReLU_ activation function\n\n .. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html\n \"\"\"\n zero = 0.0\n return tl.where(x >= 0, x, zero.to(x.dtype))\n\n\[email protected]\ndef relu_grad(x):\n # ReLU is different from other activations\n # in that it does not require the input to retrospectively compute its gradient\n # here the input is the downstream gradient, and we return the upstream gradient directly\n zero = 0.0\n one = 1.0\n return tl.where(x >= 0, one.to(x.dtype), zero.to(x.dtype))\n\n\[email protected]\ndef squared_relu(x):\n \"\"\"\n Squared ReLU activation, as proposed in the Primer_ paper.\n\n .. _Primer: https://arxiv.org/abs/2109.08668\n \"\"\"\n x_ = relu(x)\n return (x_ * x_).to(x.dtype)\n\n\[email protected]\ndef squared_relu_grad(x):\n return tl.where(x >= 0, 2.0 * x, 0.0)\n\n\n# Leaky ReLU\[email protected]\ndef leaky_relu(x):\n \"\"\"\n LeakyReLU_ activation\n\n .. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html\n \"\"\"\n scale = 0.01 + 0.0\n scale = scale.to(x.dtype)\n return tl.where(x >= 0, x, scale * x)\n\n\[email protected]\ndef leaky_relu_grad(x):\n min_grad = 0.01\n max_grad = 1\n\n min_grad = min_grad.to(x.dtype)\n max_grad = max_grad.to(x.dtype)\n\n return tl.where(x >= 0, max_grad, min_grad)\n\n\[email protected]\ndef gelu(x):\n \"\"\"\n GeLU_ activation - Gaussian error linear unit\n\n .. _GeLU: https://arxiv.org/pdf/1606.08415.pdf\n \"\"\"\n return 0.5 * x * (1 + tanh(_kAlpha * (x + 0.044715 * x * x * x)))\n\n\[email protected]\ndef gelu_grad(x):\n # CREDITS: Fast implementation proposed in\n # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/fused_bias_gelu.py#L30\n tanh_out = tanh(0.79788456 * x * (1 + 0.044715 * x * x))\n return 0.5 * x * (\n (1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)\n ) + 0.5 * (1 + tanh_out)\n\n\[email protected]\ndef smelu(x):\n \"\"\"\n SmeLU_ activation - Smooth ReLU with beta=2.0\n\n .. _SmeLU: https://arxiv.org/pdf/2202.06499.pdf\n \"\"\"\n zero = 0.0\n four = 4.0\n two = 2.0\n beta = two.to(x.dtype)\n\n output = (x + beta) * (x + beta) / (four.to(x.dtype) * beta)\n relu = tl.where(x >= beta, x, zero.to(x.dtype))\n return tl.where(tl.abs(x) <= beta, output, relu)\n\n\[email protected]\ndef smelu_grad(x):\n zero = 0.0\n one = 1.0\n two = 2.0\n beta = two.to(x.dtype)\n\n grad = (beta + x) / (two.to(x.dtype) * beta)\n relu_grad = tl.where(x >= beta, one.to(x.dtype), zero.to(x.dtype))\n return tl.where(tl.abs(x) <= beta, grad, relu_grad)\n", "path": "xformers/triton/k_activations.py"}]}
| 2,166 | 886 |
gh_patches_debug_5208
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-11146
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CLI become options are ultimately ignored
At this point I am not exactly sure where this is happening, however the become options provided on the CLI are ultimately ignored.
I have however determined that when `ConnectionInformation` is initiated, that the attributes are properly set via the 'set_options`method. Immediately afterwards,`set_play`is executed and the options are set to`None`.
Commenting out the call to `set_play`, the attributes on `ConnectionInformation` remain correct, but by the time that `make_become_cmd` is executed, `self.become` has been set to `False`.
Other than `set_play` overwriting the variables when it probably shouldn't, I haven't been able to track down what else is setting `ConnectionInformation.become` to `False` before `make_become_cmd`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/playbook/become.py`
Content:
```
1 # (c) 2012-2014, Michael DeHaan <[email protected]>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17
18 # Make coding more python3-ish
19 from __future__ import (absolute_import, division, print_function)
20 __metaclass__ = type
21
22 from ansible import constants as C
23 from ansible.errors import AnsibleError, AnsibleParserError
24 from ansible.playbook.attribute import Attribute, FieldAttribute
25 #from ansible.utils.display import deprecated
26
27 class Become:
28
29 # Privlege escalation
30 _become = FieldAttribute(isa='bool', default=False)
31 _become_method = FieldAttribute(isa='string')
32 _become_user = FieldAttribute(isa='string')
33 _become_pass = FieldAttribute(isa='string')
34
35 def __init__(self):
36 return super(Become, self).__init__()
37
38 def _detect_privilege_escalation_conflict(self, ds):
39
40 # Fail out if user specifies conflicting privilege escalations
41 has_become = 'become' in ds or 'become_user'in ds
42 has_sudo = 'sudo' in ds or 'sudo_user' in ds
43 has_su = 'su' in ds or 'su_user' in ds
44
45 if has_become:
46 msg = 'The become params ("become", "become_user") and'
47 if has_sudo:
48 raise AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg)
49 elif has_su:
50 raise AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg)
51 elif has_sudo and has_su:
52 raise AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
53
54 def _preprocess_data_become(self, ds):
55 """Preprocess the playbook data for become attributes
56
57 This is called from the Base object's preprocess_data() method which
58 in turn is called pretty much anytime any sort of playbook object
59 (plays, tasks, blocks, etc) are created.
60 """
61
62 self._detect_privilege_escalation_conflict(ds)
63
64 # Setting user implies setting become/sudo/su to true
65 if 'become_user' in ds and not ds.get('become', False):
66 ds['become'] = True
67
68 # Privilege escalation, backwards compatibility for sudo/su
69 if 'sudo' in ds or 'sudo_user' in ds:
70 ds['become_method'] = 'sudo'
71 if 'sudo' in ds:
72 ds['become'] = ds['sudo']
73 del ds['sudo']
74 else:
75 ds['become'] = True
76 if 'sudo_user' in ds:
77 ds['become_user'] = ds['sudo_user']
78 del ds['sudo_user']
79
80 #deprecated("Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)")
81
82 elif 'su' in ds or 'su_user' in ds:
83 ds['become_method'] = 'su'
84 if 'su' in ds:
85 ds['become'] = ds['su']
86 del ds['su']
87 else:
88 ds['become'] = True
89 if 'su_user' in ds:
90 ds['become_user'] = ds['su_user']
91 del ds['su_user']
92
93 #deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
94
95 # if we are becoming someone else, but some fields are unset,
96 # make sure they're initialized to the default config values
97 if ds.get('become', False):
98 if ds.get('become_method', None) is None:
99 ds['become_method'] = C.DEFAULT_BECOME_METHOD
100 if ds.get('become_user', None) is None:
101 ds['become_user'] = C.DEFAULT_BECOME_USER
102
103 return ds
104
105 def _get_attr_become(self):
106 '''
107 Override for the 'become' getattr fetcher, used from Base.
108 '''
109 if hasattr(self, '_get_parent_attribute'):
110 return self._get_parent_attribute('become')
111 else:
112 return self._attributes['become']
113
114 def _get_attr_become_method(self):
115 '''
116 Override for the 'become_method' getattr fetcher, used from Base.
117 '''
118 if hasattr(self, '_get_parent_attribute'):
119 return self._get_parent_attribute('become_method')
120 else:
121 return self._attributes['become_method']
122
123 def _get_attr_become_user(self):
124 '''
125 Override for the 'become_user' getattr fetcher, used from Base.
126 '''
127 if hasattr(self, '_get_parent_attribute'):
128 return self._get_parent_attribute('become_user')
129 else:
130 return self._attributes['become_user']
131
132 def _get_attr_become_password(self):
133 '''
134 Override for the 'become_password' getattr fetcher, used from Base.
135 '''
136 if hasattr(self, '_get_parent_attribute'):
137 return self._get_parent_attribute('become_password')
138 else:
139 return self._attributes['become_password']
140
141
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py
--- a/lib/ansible/playbook/become.py
+++ b/lib/ansible/playbook/become.py
@@ -27,7 +27,7 @@
class Become:
# Privlege escalation
- _become = FieldAttribute(isa='bool', default=False)
+ _become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
|
{"golden_diff": "diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py\n--- a/lib/ansible/playbook/become.py\n+++ b/lib/ansible/playbook/become.py\n@@ -27,7 +27,7 @@\n class Become:\n \n # Privlege escalation\n- _become = FieldAttribute(isa='bool', default=False)\n+ _become = FieldAttribute(isa='bool')\n _become_method = FieldAttribute(isa='string')\n _become_user = FieldAttribute(isa='string')\n _become_pass = FieldAttribute(isa='string')\n", "issue": "CLI become options are ultimately ignored\nAt this point I am not exactly sure where this is happening, however the become options provided on the CLI are ultimately ignored.\n\nI have however determined that when `ConnectionInformation` is initiated, that the attributes are properly set via the 'set_options`method. Immediately afterwards,`set_play`is executed and the options are set to`None`.\n\nCommenting out the call to `set_play`, the attributes on `ConnectionInformation` remain correct, but by the time that `make_become_cmd` is executed, `self.become` has been set to `False`.\n\nOther than `set_play` overwriting the variables when it probably shouldn't, I haven't been able to track down what else is setting `ConnectionInformation.become` to `False` before `make_become_cmd`.\n\n", "before_files": [{"content": "# (c) 2012-2014, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError, AnsibleParserError\nfrom ansible.playbook.attribute import Attribute, FieldAttribute\n#from ansible.utils.display import deprecated\n\nclass Become:\n\n # Privlege escalation\n _become = FieldAttribute(isa='bool', default=False)\n _become_method = FieldAttribute(isa='string')\n _become_user = FieldAttribute(isa='string')\n _become_pass = FieldAttribute(isa='string')\n\n def __init__(self):\n return super(Become, self).__init__()\n\n def _detect_privilege_escalation_conflict(self, ds):\n\n # Fail out if user specifies conflicting privilege escalations\n has_become = 'become' in ds or 'become_user'in ds\n has_sudo = 'sudo' in ds or 'sudo_user' in ds\n has_su = 'su' in ds or 'su_user' in ds\n\n if has_become:\n msg = 'The become params (\"become\", \"become_user\") and'\n if has_sudo:\n raise AnsibleParserError('%s sudo params (\"sudo\", \"sudo_user\") cannot be used together' % msg)\n elif has_su:\n raise AnsibleParserError('%s su params (\"su\", \"su_user\") cannot be used together' % msg)\n elif has_sudo and has_su:\n raise AnsibleParserError('sudo params (\"sudo\", \"sudo_user\") and su params (\"su\", \"su_user\") cannot be used together')\n\n def _preprocess_data_become(self, ds):\n \"\"\"Preprocess the playbook data for become attributes\n\n This is called from the Base object's preprocess_data() method which\n in turn is called pretty much anytime any sort of playbook object\n (plays, tasks, blocks, etc) are created.\n \"\"\"\n\n self._detect_privilege_escalation_conflict(ds)\n\n # Setting user implies setting become/sudo/su to true\n if 'become_user' in ds and not ds.get('become', False):\n ds['become'] = True\n\n # Privilege escalation, backwards compatibility for sudo/su\n if 'sudo' in ds or 'sudo_user' in ds:\n ds['become_method'] = 'sudo'\n if 'sudo' in ds:\n ds['become'] = ds['sudo']\n del ds['sudo']\n else:\n ds['become'] = True\n if 'sudo_user' in ds:\n ds['become_user'] = ds['sudo_user']\n del ds['sudo_user']\n\n #deprecated(\"Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)\")\n\n elif 'su' in ds or 'su_user' in ds:\n ds['become_method'] = 'su'\n if 'su' in ds:\n ds['become'] = ds['su']\n del ds['su']\n else:\n ds['become'] = True\n if 'su_user' in ds:\n ds['become_user'] = ds['su_user']\n del ds['su_user']\n\n #deprecated(\"Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)\")\n\n # if we are becoming someone else, but some fields are unset,\n # make sure they're initialized to the default config values\n if ds.get('become', False):\n if ds.get('become_method', None) is None:\n ds['become_method'] = C.DEFAULT_BECOME_METHOD\n if ds.get('become_user', None) is None:\n ds['become_user'] = C.DEFAULT_BECOME_USER\n\n return ds\n\n def _get_attr_become(self):\n '''\n Override for the 'become' getattr fetcher, used from Base.\n '''\n if hasattr(self, '_get_parent_attribute'):\n return self._get_parent_attribute('become')\n else:\n return self._attributes['become']\n\n def _get_attr_become_method(self):\n '''\n Override for the 'become_method' getattr fetcher, used from Base.\n '''\n if hasattr(self, '_get_parent_attribute'):\n return self._get_parent_attribute('become_method')\n else:\n return self._attributes['become_method']\n\n def _get_attr_become_user(self):\n '''\n Override for the 'become_user' getattr fetcher, used from Base.\n '''\n if hasattr(self, '_get_parent_attribute'):\n return self._get_parent_attribute('become_user')\n else:\n return self._attributes['become_user']\n\n def _get_attr_become_password(self):\n '''\n Override for the 'become_password' getattr fetcher, used from Base.\n '''\n if hasattr(self, '_get_parent_attribute'):\n return self._get_parent_attribute('become_password')\n else:\n return self._attributes['become_password']\n\n\n", "path": "lib/ansible/playbook/become.py"}], "after_files": [{"content": "# (c) 2012-2014, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError, AnsibleParserError\nfrom ansible.playbook.attribute import Attribute, FieldAttribute\n#from ansible.utils.display import deprecated\n\nclass Become:\n\n # Privlege escalation\n _become = FieldAttribute(isa='bool')\n _become_method = FieldAttribute(isa='string')\n _become_user = FieldAttribute(isa='string')\n _become_pass = FieldAttribute(isa='string')\n\n def __init__(self):\n return super(Become, self).__init__()\n\n def _detect_privilege_escalation_conflict(self, ds):\n\n # Fail out if user specifies conflicting privilege escalations\n has_become = 'become' in ds or 'become_user'in ds\n has_sudo = 'sudo' in ds or 'sudo_user' in ds\n has_su = 'su' in ds or 'su_user' in ds\n\n if has_become:\n msg = 'The become params (\"become\", \"become_user\") and'\n if has_sudo:\n raise AnsibleParserError('%s sudo params (\"sudo\", \"sudo_user\") cannot be used together' % msg)\n elif has_su:\n raise AnsibleParserError('%s su params (\"su\", \"su_user\") cannot be used together' % msg)\n elif has_sudo and has_su:\n raise AnsibleParserError('sudo params (\"sudo\", \"sudo_user\") and su params (\"su\", \"su_user\") cannot be used together')\n\n def _preprocess_data_become(self, ds):\n \"\"\"Preprocess the playbook data for become attributes\n\n This is called from the Base object's preprocess_data() method which\n in turn is called pretty much anytime any sort of playbook object\n (plays, tasks, blocks, etc) are created.\n \"\"\"\n\n self._detect_privilege_escalation_conflict(ds)\n\n # Setting user implies setting become/sudo/su to true\n if 'become_user' in ds and not ds.get('become', False):\n ds['become'] = True\n\n # Privilege escalation, backwards compatibility for sudo/su\n if 'sudo' in ds or 'sudo_user' in ds:\n ds['become_method'] = 'sudo'\n if 'sudo' in ds:\n ds['become'] = ds['sudo']\n del ds['sudo']\n else:\n ds['become'] = True\n if 'sudo_user' in ds:\n ds['become_user'] = ds['sudo_user']\n del ds['sudo_user']\n\n #deprecated(\"Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)\")\n\n elif 'su' in ds or 'su_user' in ds:\n ds['become_method'] = 'su'\n if 'su' in ds:\n ds['become'] = ds['su']\n del ds['su']\n else:\n ds['become'] = True\n if 'su_user' in ds:\n ds['become_user'] = ds['su_user']\n del ds['su_user']\n\n #deprecated(\"Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)\")\n\n # if we are becoming someone else, but some fields are unset,\n # make sure they're initialized to the default config values\n if ds.get('become', False):\n if ds.get('become_method', None) is None:\n ds['become_method'] = C.DEFAULT_BECOME_METHOD\n if ds.get('become_user', None) is None:\n ds['become_user'] = C.DEFAULT_BECOME_USER\n\n return ds\n\n def _get_attr_become(self):\n '''\n Override for the 'become' getattr fetcher, used from Base.\n '''\n if hasattr(self, '_get_parent_attribute'):\n return self._get_parent_attribute('become')\n else:\n return self._attributes['become']\n\n def _get_attr_become_method(self):\n '''\n Override for the 'become_method' getattr fetcher, used from Base.\n '''\n if hasattr(self, '_get_parent_attribute'):\n return self._get_parent_attribute('become_method')\n else:\n return self._attributes['become_method']\n\n def _get_attr_become_user(self):\n '''\n Override for the 'become_user' getattr fetcher, used from Base.\n '''\n if hasattr(self, '_get_parent_attribute'):\n return self._get_parent_attribute('become_user')\n else:\n return self._attributes['become_user']\n\n def _get_attr_become_password(self):\n '''\n Override for the 'become_password' getattr fetcher, used from Base.\n '''\n if hasattr(self, '_get_parent_attribute'):\n return self._get_parent_attribute('become_password')\n else:\n return self._attributes['become_password']\n\n\n", "path": "lib/ansible/playbook/become.py"}]}
| 2,048 | 139 |
gh_patches_debug_23733
|
rasdani/github-patches
|
git_diff
|
e2nIEE__pandapower-857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ConstControl: Modifying scaling of all loads based on input dataframe
Hello,
I am trying to give an input dataframe to control the scaling of all loads for each time_step in a timeseries simulation
At the end of the simulation, the res_bus[p_mw] is correctly varying according to the given dataframe.
Nevertheless, the results of bus voltages and lines loading appear constant; it sounds like the power flow results don't take into account the load scaling.
This is how I create the controller:
```python
def create_controllers(net, ds):
ConstControl(net, element='load', variable='scaling', element_index=net.load.index,
data_source=ds, profile_name=["Load_p"])
```
where `Load_p` is the dataframe column with the scaling factors.
I have the feeling that this approach should work, so I may be doing something wrong.
Do you have any suggestions about how to handle it?
Best regards,
Michele
ConstControl: Modifying scaling of all loads based on input dataframe
Hello,
I am trying to give an input dataframe to control the scaling of all loads for each time_step in a timeseries simulation
At the end of the simulation, the res_bus[p_mw] is correctly varying according to the given dataframe.
Nevertheless, the results of bus voltages and lines loading appear constant; it sounds like the power flow results don't take into account the load scaling.
This is how I create the controller:
```python
def create_controllers(net, ds):
ConstControl(net, element='load', variable='scaling', element_index=net.load.index,
data_source=ds, profile_name=["Load_p"])
```
where `Load_p` is the dataframe column with the scaling factors.
I have the feeling that this approach should work, so I may be doing something wrong.
Do you have any suggestions about how to handle it?
Best regards,
Michele
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandapower/control/controller/const_control.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6 import numpy as np
7 from pandas import Index
8 from pandapower.control.basic_controller import Controller
9
10 try:
11 import pplog as logging
12 except ImportError:
13 import logging
14
15 logger = logging.getLogger(__name__)
16
17
18 class ConstControl(Controller):
19 """
20 Class representing a generic time series controller for a specified element and variable
21 Control strategy: "No Control" -> just updates timeseries
22
23 INPUT:
24
25 **net** (attrdict) - The net in which the controller resides
26
27 **element** - element table ('sgen', 'load' etc.)
28
29 **variable** - variable ('p_mw', 'q_mvar', 'vm_pu', 'tap_pos' etc.)
30
31 **element_index** (int[]) - IDs of the controlled elements
32
33 **data_source** (obj) - The data source that provides profile data
34
35 **profile_name** (str[]) - The profile names of the elements in the data source
36
37
38 OPTIONAL:
39
40 **scale_factor** (real, 1.0) - Scaling factor for time series input values
41
42 **in_service** (bool, True) - Indicates if the controller is currently in_service
43
44 **recycle** (bool, True) - Re-use of internal-data in a time series loop.
45
46 **drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped
47
48 .. note:: If multiple elements are represented with one controller, the data source must have integer columns. At the moment, only the DFData format is tested for the multiple const control.
49 """
50
51 def __init__(self, net, element, variable, element_index, profile_name=None, data_source=None,
52 scale_factor=1.0, in_service=True, recycle=True, order=0, level=0,
53 drop_same_existing_ctrl=False, set_q_from_cosphi=False, matching_params=None, initial_run=False,
54 **kwargs):
55 # just calling init of the parent
56 if matching_params is None:
57 matching_params = {"element": element, "variable": variable,
58 "element_index": element_index}
59 super().__init__(net, in_service=in_service, recycle=recycle, order=order, level=level,
60 drop_same_existing_ctrl=drop_same_existing_ctrl,
61 matching_params=matching_params, initial_run = initial_run,
62 **kwargs)
63 self.matching_params = {"element": element, "variable": variable,
64 "element_index": element_index}
65
66 # data source for time series values
67 self.data_source = data_source
68 # ids of sgens or loads
69 self.element_index = element_index
70 # element type
71 self.element = element
72 self.variable = variable
73 self.values = None
74 self.profile_name = profile_name
75 self.scale_factor = scale_factor
76 if set_q_from_cosphi:
77 logger.error("Parameter set_q_from_cosphi deprecated!")
78 raise ValueError
79 self.applied = False
80 self.initial_run = initial_run
81 # write functions faster, depending on type of self.element_index
82 if isinstance(self.element_index, int):
83 # use .at if element_index is integer for speedup
84 self.write = "single_index"
85 # commenting this out for now, see issue 609
86 # elif self.net[self.element].index.equals(Index(self.element_index)):
87 # # use : indexer if all elements are in index
88 # self.write = "all_index"
89 else:
90 # use common .loc
91 self.write = "loc"
92 self.set_recycle()
93
94 def set_recycle(self):
95 allowed_elements = ["load", "sgen", "storage", "gen", "ext_grid", "trafo", "trafo3w", "line"]
96 if self.recycle is False or self.element not in allowed_elements:
97 # if recycle is set to False by the user when creating the controller it is deactivated or when
98 # const control controls an element which is not able to be recycled
99 self.recycle = False
100 return
101 # these variables determine what is re-calculated during a time series run
102 recycle = dict(trafo=False, gen=False, bus_pq=False)
103 if self.element in ["sgen", "load", "storage"] and self.variable in ["p_mw", "q_mvar"]:
104 recycle["bus_pq"] = True
105 if self.element in ["gen"] and self.variable in ["p_mw", "vm_pu"] \
106 or self.element in ["ext_grid"] and self.variable in ["vm_pu", "va_degree"]:
107 recycle["gen"] = True
108 if self.element in ["trafo", "trafo3w", "line"]:
109 recycle["trafo"] = True
110 self.recycle = recycle
111
112 def write_to_net(self):
113 """
114 Writes to self.element at index self.element_index in the column self.variable the data
115 from self.values
116 """
117 # write functions faster, depending on type of self.element_index
118 if self.write == "single_index":
119 self._write_to_single_index()
120 elif self.write == "all_index":
121 self._write_to_all_index()
122 elif self.write == "loc":
123 self._write_with_loc()
124 else:
125 raise NotImplementedError("ConstControl: self.write must be one of "
126 "['single_index', 'all_index', 'loc']")
127
128 def time_step(self, time):
129 """
130 Get the values of the element from data source
131 """
132 self.values = self.data_source.get_time_step_value(time_step=time,
133 profile_name=self.profile_name,
134 scale_factor=self.scale_factor)
135 # self.write_to_net()
136
137 def initialize_control(self):
138 """
139 At the beginning of each run_control call reset applied-flag
140 """
141 #
142 if self.data_source is None:
143 self.values = self.net[self.element][self.variable].loc[self.element_index]
144 self.applied = False
145
146 def is_converged(self):
147 """
148 Actual implementation of the convergence criteria: If controller is applied, it can stop
149 """
150 return self.applied
151
152 def control_step(self):
153 """
154 Write to pandapower net by calling write_to_net()
155 """
156 if self.values is not None:
157 self.write_to_net()
158 self.applied = True
159
160 def _write_to_single_index(self):
161 self.net[self.element].at[self.element_index, self.variable] = self.values
162
163 def _write_to_all_index(self):
164 self.net[self.element].loc[:, self.variable] = self.values
165
166 def _write_with_loc(self):
167 self.net[self.element].loc[self.element_index, self.variable] = self.values
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandapower/control/controller/const_control.py b/pandapower/control/controller/const_control.py
--- a/pandapower/control/controller/const_control.py
+++ b/pandapower/control/controller/const_control.py
@@ -100,14 +100,16 @@
return
# these variables determine what is re-calculated during a time series run
recycle = dict(trafo=False, gen=False, bus_pq=False)
- if self.element in ["sgen", "load", "storage"] and self.variable in ["p_mw", "q_mvar"]:
+ if self.element in ["sgen", "load", "storage"] and self.variable in ["p_mw", "q_mvar", "scaling"]:
recycle["bus_pq"] = True
- if self.element in ["gen"] and self.variable in ["p_mw", "vm_pu"] \
+ if self.element in ["gen"] and self.variable in ["p_mw", "vm_pu", "scaling"] \
or self.element in ["ext_grid"] and self.variable in ["vm_pu", "va_degree"]:
recycle["gen"] = True
if self.element in ["trafo", "trafo3w", "line"]:
recycle["trafo"] = True
- self.recycle = recycle
+ # recycle is either the dict what should be recycled
+ # or False if the element + variable combination is not supported
+ self.recycle = recycle if not any(list(recycle.values())) else False
def write_to_net(self):
"""
|
{"golden_diff": "diff --git a/pandapower/control/controller/const_control.py b/pandapower/control/controller/const_control.py\n--- a/pandapower/control/controller/const_control.py\n+++ b/pandapower/control/controller/const_control.py\n@@ -100,14 +100,16 @@\n return\n # these variables determine what is re-calculated during a time series run\n recycle = dict(trafo=False, gen=False, bus_pq=False)\n- if self.element in [\"sgen\", \"load\", \"storage\"] and self.variable in [\"p_mw\", \"q_mvar\"]:\n+ if self.element in [\"sgen\", \"load\", \"storage\"] and self.variable in [\"p_mw\", \"q_mvar\", \"scaling\"]:\n recycle[\"bus_pq\"] = True\n- if self.element in [\"gen\"] and self.variable in [\"p_mw\", \"vm_pu\"] \\\n+ if self.element in [\"gen\"] and self.variable in [\"p_mw\", \"vm_pu\", \"scaling\"] \\\n or self.element in [\"ext_grid\"] and self.variable in [\"vm_pu\", \"va_degree\"]:\n recycle[\"gen\"] = True\n if self.element in [\"trafo\", \"trafo3w\", \"line\"]:\n recycle[\"trafo\"] = True\n- self.recycle = recycle\n+ # recycle is either the dict what should be recycled\n+ # or False if the element + variable combination is not supported\n+ self.recycle = recycle if not any(list(recycle.values())) else False\n \n def write_to_net(self):\n \"\"\"\n", "issue": "ConstControl: Modifying scaling of all loads based on input dataframe\nHello,\r\nI am trying to give an input dataframe to control the scaling of all loads for each time_step in a timeseries simulation\r\nAt the end of the simulation, the res_bus[p_mw] is correctly varying according to the given dataframe.\r\nNevertheless, the results of bus voltages and lines loading appear constant; it sounds like the power flow results don't take into account the load scaling.\r\n\r\nThis is how I create the controller:\r\n```python\r\ndef create_controllers(net, ds):\r\n ConstControl(net, element='load', variable='scaling', element_index=net.load.index,\r\n data_source=ds, profile_name=[\"Load_p\"])\r\n```\r\nwhere `Load_p` is the dataframe column with the scaling factors.\r\nI have the feeling that this approach should work, so I may be doing something wrong.\r\nDo you have any suggestions about how to handle it?\r\n\r\nBest regards,\r\nMichele\nConstControl: Modifying scaling of all loads based on input dataframe\nHello,\r\nI am trying to give an input dataframe to control the scaling of all loads for each time_step in a timeseries simulation\r\nAt the end of the simulation, the res_bus[p_mw] is correctly varying according to the given dataframe.\r\nNevertheless, the results of bus voltages and lines loading appear constant; it sounds like the power flow results don't take into account the load scaling.\r\n\r\nThis is how I create the controller:\r\n```python\r\ndef create_controllers(net, ds):\r\n ConstControl(net, element='load', variable='scaling', element_index=net.load.index,\r\n data_source=ds, profile_name=[\"Load_p\"])\r\n```\r\nwhere `Load_p` is the dataframe column with the scaling factors.\r\nI have the feeling that this approach should work, so I may be doing something wrong.\r\nDo you have any suggestions about how to handle it?\r\n\r\nBest regards,\r\nMichele\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport numpy as np\nfrom pandas import Index\nfrom pandapower.control.basic_controller import Controller\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConstControl(Controller):\n \"\"\"\n Class representing a generic time series controller for a specified element and variable\n Control strategy: \"No Control\" -> just updates timeseries\n\n INPUT:\n\n **net** (attrdict) - The net in which the controller resides\n\n **element** - element table ('sgen', 'load' etc.)\n\n **variable** - variable ('p_mw', 'q_mvar', 'vm_pu', 'tap_pos' etc.)\n\n **element_index** (int[]) - IDs of the controlled elements\n\n **data_source** (obj) - The data source that provides profile data\n\n **profile_name** (str[]) - The profile names of the elements in the data source\n\n\n OPTIONAL:\n\n **scale_factor** (real, 1.0) - Scaling factor for time series input values\n\n **in_service** (bool, True) - Indicates if the controller is currently in_service\n\n **recycle** (bool, True) - Re-use of internal-data in a time series loop.\n\n **drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped\n\n .. note:: If multiple elements are represented with one controller, the data source must have integer columns. At the moment, only the DFData format is tested for the multiple const control.\n \"\"\"\n\n def __init__(self, net, element, variable, element_index, profile_name=None, data_source=None,\n scale_factor=1.0, in_service=True, recycle=True, order=0, level=0,\n drop_same_existing_ctrl=False, set_q_from_cosphi=False, matching_params=None, initial_run=False,\n **kwargs):\n # just calling init of the parent\n if matching_params is None:\n matching_params = {\"element\": element, \"variable\": variable,\n \"element_index\": element_index}\n super().__init__(net, in_service=in_service, recycle=recycle, order=order, level=level,\n drop_same_existing_ctrl=drop_same_existing_ctrl,\n matching_params=matching_params, initial_run = initial_run,\n **kwargs)\n self.matching_params = {\"element\": element, \"variable\": variable,\n \"element_index\": element_index}\n\n # data source for time series values\n self.data_source = data_source\n # ids of sgens or loads\n self.element_index = element_index\n # element type\n self.element = element\n self.variable = variable\n self.values = None\n self.profile_name = profile_name\n self.scale_factor = scale_factor\n if set_q_from_cosphi:\n logger.error(\"Parameter set_q_from_cosphi deprecated!\")\n raise ValueError\n self.applied = False\n self.initial_run = initial_run\n # write functions faster, depending on type of self.element_index\n if isinstance(self.element_index, int):\n # use .at if element_index is integer for speedup\n self.write = \"single_index\"\n # commenting this out for now, see issue 609\n # elif self.net[self.element].index.equals(Index(self.element_index)):\n # # use : indexer if all elements are in index\n # self.write = \"all_index\"\n else:\n # use common .loc\n self.write = \"loc\"\n self.set_recycle()\n\n def set_recycle(self):\n allowed_elements = [\"load\", \"sgen\", \"storage\", \"gen\", \"ext_grid\", \"trafo\", \"trafo3w\", \"line\"]\n if self.recycle is False or self.element not in allowed_elements:\n # if recycle is set to False by the user when creating the controller it is deactivated or when\n # const control controls an element which is not able to be recycled\n self.recycle = False\n return\n # these variables determine what is re-calculated during a time series run\n recycle = dict(trafo=False, gen=False, bus_pq=False)\n if self.element in [\"sgen\", \"load\", \"storage\"] and self.variable in [\"p_mw\", \"q_mvar\"]:\n recycle[\"bus_pq\"] = True\n if self.element in [\"gen\"] and self.variable in [\"p_mw\", \"vm_pu\"] \\\n or self.element in [\"ext_grid\"] and self.variable in [\"vm_pu\", \"va_degree\"]:\n recycle[\"gen\"] = True\n if self.element in [\"trafo\", \"trafo3w\", \"line\"]:\n recycle[\"trafo\"] = True\n self.recycle = recycle\n\n def write_to_net(self):\n \"\"\"\n Writes to self.element at index self.element_index in the column self.variable the data\n from self.values\n \"\"\"\n # write functions faster, depending on type of self.element_index\n if self.write == \"single_index\":\n self._write_to_single_index()\n elif self.write == \"all_index\":\n self._write_to_all_index()\n elif self.write == \"loc\":\n self._write_with_loc()\n else:\n raise NotImplementedError(\"ConstControl: self.write must be one of \"\n \"['single_index', 'all_index', 'loc']\")\n \n def time_step(self, time):\n \"\"\"\n Get the values of the element from data source\n \"\"\"\n self.values = self.data_source.get_time_step_value(time_step=time,\n profile_name=self.profile_name,\n scale_factor=self.scale_factor)\n # self.write_to_net()\n\n def initialize_control(self):\n \"\"\"\n At the beginning of each run_control call reset applied-flag\n \"\"\"\n #\n if self.data_source is None:\n self.values = self.net[self.element][self.variable].loc[self.element_index]\n self.applied = False\n\n def is_converged(self):\n \"\"\"\n Actual implementation of the convergence criteria: If controller is applied, it can stop\n \"\"\"\n return self.applied\n\n def control_step(self):\n \"\"\"\n Write to pandapower net by calling write_to_net()\n \"\"\"\n if self.values is not None:\n self.write_to_net()\n self.applied = True\n\n def _write_to_single_index(self):\n self.net[self.element].at[self.element_index, self.variable] = self.values\n\n def _write_to_all_index(self):\n self.net[self.element].loc[:, self.variable] = self.values\n\n def _write_with_loc(self):\n self.net[self.element].loc[self.element_index, self.variable] = self.values\n", "path": "pandapower/control/controller/const_control.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport numpy as np\nfrom pandas import Index\nfrom pandapower.control.basic_controller import Controller\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConstControl(Controller):\n \"\"\"\n Class representing a generic time series controller for a specified element and variable\n Control strategy: \"No Control\" -> just updates timeseries\n\n INPUT:\n\n **net** (attrdict) - The net in which the controller resides\n\n **element** - element table ('sgen', 'load' etc.)\n\n **variable** - variable ('p_mw', 'q_mvar', 'vm_pu', 'tap_pos' etc.)\n\n **element_index** (int[]) - IDs of the controlled elements\n\n **data_source** (obj) - The data source that provides profile data\n\n **profile_name** (str[]) - The profile names of the elements in the data source\n\n\n OPTIONAL:\n\n **scale_factor** (real, 1.0) - Scaling factor for time series input values\n\n **in_service** (bool, True) - Indicates if the controller is currently in_service\n\n **recycle** (bool, True) - Re-use of internal-data in a time series loop.\n\n **drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped\n\n .. note:: If multiple elements are represented with one controller, the data source must have integer columns. At the moment, only the DFData format is tested for the multiple const control.\n \"\"\"\n\n def __init__(self, net, element, variable, element_index, profile_name=None, data_source=None,\n scale_factor=1.0, in_service=True, recycle=True, order=0, level=0,\n drop_same_existing_ctrl=False, set_q_from_cosphi=False, matching_params=None, initial_run=False,\n **kwargs):\n # just calling init of the parent\n if matching_params is None:\n matching_params = {\"element\": element, \"variable\": variable,\n \"element_index\": element_index}\n super().__init__(net, in_service=in_service, recycle=recycle, order=order, level=level,\n drop_same_existing_ctrl=drop_same_existing_ctrl,\n matching_params=matching_params, initial_run = initial_run,\n **kwargs)\n self.matching_params = {\"element\": element, \"variable\": variable,\n \"element_index\": element_index}\n\n # data source for time series values\n self.data_source = data_source\n # ids of sgens or loads\n self.element_index = element_index\n # element type\n self.element = element\n self.variable = variable\n self.values = None\n self.profile_name = profile_name\n self.scale_factor = scale_factor\n if set_q_from_cosphi:\n logger.error(\"Parameter set_q_from_cosphi deprecated!\")\n raise ValueError\n self.applied = False\n self.initial_run = initial_run\n # write functions faster, depending on type of self.element_index\n if isinstance(self.element_index, int):\n # use .at if element_index is integer for speedup\n self.write = \"single_index\"\n # commenting this out for now, see issue 609\n # elif self.net[self.element].index.equals(Index(self.element_index)):\n # # use : indexer if all elements are in index\n # self.write = \"all_index\"\n else:\n # use common .loc\n self.write = \"loc\"\n self.set_recycle()\n\n def set_recycle(self):\n allowed_elements = [\"load\", \"sgen\", \"storage\", \"gen\", \"ext_grid\", \"trafo\", \"trafo3w\", \"line\"]\n if self.recycle is False or self.element not in allowed_elements:\n # if recycle is set to False by the user when creating the controller it is deactivated or when\n # const control controls an element which is not able to be recycled\n self.recycle = False\n return\n # these variables determine what is re-calculated during a time series run\n recycle = dict(trafo=False, gen=False, bus_pq=False)\n if self.element in [\"sgen\", \"load\", \"storage\"] and self.variable in [\"p_mw\", \"q_mvar\", \"scaling\"]:\n recycle[\"bus_pq\"] = True\n if self.element in [\"gen\"] and self.variable in [\"p_mw\", \"vm_pu\", \"scaling\"] \\\n or self.element in [\"ext_grid\"] and self.variable in [\"vm_pu\", \"va_degree\"]:\n recycle[\"gen\"] = True\n if self.element in [\"trafo\", \"trafo3w\", \"line\"]:\n recycle[\"trafo\"] = True\n # recycle is either the dict what should be recycled\n # or False if the element + variable combination is not supported\n self.recycle = recycle if not any(list(recycle.values())) else False\n\n def write_to_net(self):\n \"\"\"\n Writes to self.element at index self.element_index in the column self.variable the data\n from self.values\n \"\"\"\n # write functions faster, depending on type of self.element_index\n if self.write == \"single_index\":\n self._write_to_single_index()\n elif self.write == \"all_index\":\n self._write_to_all_index()\n elif self.write == \"loc\":\n self._write_with_loc()\n else:\n raise NotImplementedError(\"ConstControl: self.write must be one of \"\n \"['single_index', 'all_index', 'loc']\")\n \n def time_step(self, time):\n \"\"\"\n Get the values of the element from data source\n \"\"\"\n self.values = self.data_source.get_time_step_value(time_step=time,\n profile_name=self.profile_name,\n scale_factor=self.scale_factor)\n # self.write_to_net()\n\n def initialize_control(self):\n \"\"\"\n At the beginning of each run_control call reset applied-flag\n \"\"\"\n #\n if self.data_source is None:\n self.values = self.net[self.element][self.variable].loc[self.element_index]\n self.applied = False\n\n def is_converged(self):\n \"\"\"\n Actual implementation of the convergence criteria: If controller is applied, it can stop\n \"\"\"\n return self.applied\n\n def control_step(self):\n \"\"\"\n Write to pandapower net by calling write_to_net()\n \"\"\"\n if self.values is not None:\n self.write_to_net()\n self.applied = True\n\n def _write_to_single_index(self):\n self.net[self.element].at[self.element_index, self.variable] = self.values\n\n def _write_to_all_index(self):\n self.net[self.element].loc[:, self.variable] = self.values\n\n def _write_with_loc(self):\n self.net[self.element].loc[self.element_index, self.variable] = self.values\n", "path": "pandapower/control/controller/const_control.py"}]}
| 2,563 | 346 |
gh_patches_debug_1532
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-259
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update text before release
The setup.py still states that this is experimental. I think it's grown up a fair bit :) this is the text that ends up on pypi. we should update it to reflect the package's aim
https://github.com/mne-tools/mne-bids/blob/f8f267b45ac36e1600ea9ceb5540299e1bf3ab21/setup.py#L17
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 """Setup MNE-BIDS."""
3 import os
4 from setuptools import setup, find_packages
5
6 # get the version
7 version = None
8 with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
9 for line in (line.strip() for line in fid):
10 if line.startswith('__version__'):
11 version = line.split('=')[1].strip().strip('\'')
12 break
13 if version is None:
14 raise RuntimeError('Could not determine version')
15
16
17 descr = """Experimental code for BIDS using MNE."""
18
19 DISTNAME = 'mne-bids'
20 DESCRIPTION = descr
21 MAINTAINER = 'Mainak Jas'
22 MAINTAINER_EMAIL = '[email protected]'
23 URL = 'https://mne-tools.github.io/mne-bids/'
24 LICENSE = 'BSD (3-clause)'
25 DOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'
26 VERSION = version
27
28 if __name__ == "__main__":
29 setup(name=DISTNAME,
30 maintainer=MAINTAINER,
31 maintainer_email=MAINTAINER_EMAIL,
32 description=DESCRIPTION,
33 license=LICENSE,
34 url=URL,
35 version=VERSION,
36 download_url=DOWNLOAD_URL,
37 long_description=open('README.rst').read(),
38 long_description_content_type='text/x-rst',
39 classifiers=[
40 'Intended Audience :: Science/Research',
41 'Intended Audience :: Developers',
42 'License :: OSI Approved',
43 'Programming Language :: Python',
44 'Topic :: Software Development',
45 'Topic :: Scientific/Engineering',
46 'Operating System :: Microsoft :: Windows',
47 'Operating System :: POSIX',
48 'Operating System :: Unix',
49 'Operating System :: MacOS',
50 ],
51 platforms='any',
52 packages=find_packages(),
53 scripts=['bin/mne_bids']
54 )
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,8 @@
raise RuntimeError('Could not determine version')
-descr = """Experimental code for BIDS using MNE."""
+descr = ('An MNE project for organizing and formatting MEG and EEG data '
+ 'according to the BIDS specification.')
DISTNAME = 'mne-bids'
DESCRIPTION = descr
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,8 @@\n raise RuntimeError('Could not determine version')\n \n \n-descr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n+descr = ('An MNE project for organizing and formatting MEG and EEG data '\n+ 'according to the BIDS specification.')\n \n DISTNAME = 'mne-bids'\n DESCRIPTION = descr\n", "issue": "Update text before release\nThe setup.py still states that this is experimental. I think it's grown up a fair bit :) this is the text that ends up on pypi. we should update it to reflect the package's aim\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/f8f267b45ac36e1600ea9ceb5540299e1bf3ab21/setup.py#L17\n", "before_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne-tools.github.io/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids']\n )\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('An MNE project for organizing and formatting MEG and EEG data '\n 'according to the BIDS specification.')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne-tools.github.io/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids']\n )\n", "path": "setup.py"}]}
| 847 | 97 |
gh_patches_debug_22071
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1919
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failure to get the container id
With #1888 in place, there's a regression when running inside a container. There's an assumption in https://github.com/pre-commit/pre-commit/blob/master/pre_commit/languages/docker.py#L32 that the hostname is the container ID which is not always the case (it's easy enough to set a different hostname with `docker run --hostname foo`). It causes the `docker inspect` command that follows to fail. A more reliable way to get the container id is from `/proc/1/cpuset` or from the first line in `/proc/1/cgroup` which is already checked in `_is_in_docker`.
Thanks @asottile and @okainov for your work on #1387 and pre-commit in general.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/docker.py`
Content:
```
1 import hashlib
2 import json
3 import os
4 import socket
5 from typing import Sequence
6 from typing import Tuple
7
8 import pre_commit.constants as C
9 from pre_commit.hook import Hook
10 from pre_commit.languages import helpers
11 from pre_commit.prefix import Prefix
12 from pre_commit.util import clean_path_on_failure
13 from pre_commit.util import cmd_output_b
14
15 ENVIRONMENT_DIR = 'docker'
16 PRE_COMMIT_LABEL = 'PRE_COMMIT'
17 get_default_version = helpers.basic_get_default_version
18 healthy = helpers.basic_healthy
19
20
21 def _is_in_docker() -> bool:
22 try:
23 with open('/proc/1/cgroup', 'rb') as f:
24 return b'docker' in f.read()
25 except FileNotFoundError:
26 return False
27
28
29 def _get_docker_path(path: str) -> str:
30 if not _is_in_docker():
31 return path
32 hostname = socket.gethostname()
33
34 _, out, _ = cmd_output_b('docker', 'inspect', hostname)
35
36 container, = json.loads(out)
37 for mount in container['Mounts']:
38 src_path = mount['Source']
39 to_path = mount['Destination']
40 if os.path.commonpath((path, to_path)) == to_path:
41 # So there is something in common,
42 # and we can proceed remapping it
43 return path.replace(to_path, src_path)
44 # we're in Docker, but the path is not mounted, cannot really do anything,
45 # so fall back to original path
46 return path
47
48
49 def md5(s: str) -> str: # pragma: win32 no cover
50 return hashlib.md5(s.encode()).hexdigest()
51
52
53 def docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover
54 md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()
55 return f'pre-commit-{md5sum}'
56
57
58 def build_docker_image(
59 prefix: Prefix,
60 *,
61 pull: bool,
62 ) -> None: # pragma: win32 no cover
63 cmd: Tuple[str, ...] = (
64 'docker', 'build',
65 '--tag', docker_tag(prefix),
66 '--label', PRE_COMMIT_LABEL,
67 )
68 if pull:
69 cmd += ('--pull',)
70 # This must come last for old versions of docker. See #477
71 cmd += ('.',)
72 helpers.run_setup_cmd(prefix, cmd)
73
74
75 def install_environment(
76 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
77 ) -> None: # pragma: win32 no cover
78 helpers.assert_version_default('docker', version)
79 helpers.assert_no_additional_deps('docker', additional_dependencies)
80
81 directory = prefix.path(
82 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
83 )
84
85 # Docker doesn't really have relevant disk environment, but pre-commit
86 # still needs to cleanup its state files on failure
87 with clean_path_on_failure(directory):
88 build_docker_image(prefix, pull=True)
89 os.mkdir(directory)
90
91
92 def get_docker_user() -> Tuple[str, ...]: # pragma: win32 no cover
93 try:
94 return ('-u', f'{os.getuid()}:{os.getgid()}')
95 except AttributeError:
96 return ()
97
98
99 def docker_cmd() -> Tuple[str, ...]: # pragma: win32 no cover
100 return (
101 'docker', 'run',
102 '--rm',
103 *get_docker_user(),
104 # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from
105 # The `Z` option tells Docker to label the content with a private
106 # unshared label. Only the current container can use a private volume.
107 '-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z',
108 '--workdir', '/src',
109 )
110
111
112 def run_hook(
113 hook: Hook,
114 file_args: Sequence[str],
115 color: bool,
116 ) -> Tuple[int, bytes]: # pragma: win32 no cover
117 # Rebuild the docker image in case it has gone missing, as many people do
118 # automated cleanup of docker images.
119 build_docker_image(hook.prefix, pull=False)
120
121 entry_exe, *cmd_rest = hook.cmd
122
123 entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))
124 cmd = (*docker_cmd(), *entry_tag, *cmd_rest)
125 return helpers.run_xargs(hook, cmd, file_args, color=color)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py
--- a/pre_commit/languages/docker.py
+++ b/pre_commit/languages/docker.py
@@ -1,7 +1,6 @@
import hashlib
import json
import os
-import socket
from typing import Sequence
from typing import Tuple
@@ -26,12 +25,24 @@
return False
+def _get_container_id() -> str:
+ # It's assumed that we already check /proc/1/cgroup in _is_in_docker. The
+ # cpuset cgroup controller existed since cgroups were introduced so this
+ # way of getting the container ID is pretty reliable.
+ with open('/proc/1/cgroup', 'rb') as f:
+ for line in f.readlines():
+ if line.split(b':')[1] == b'cpuset':
+ return os.path.basename(line.split(b':')[2]).strip().decode()
+ raise RuntimeError('Failed to find the container ID in /proc/1/cgroup.')
+
+
def _get_docker_path(path: str) -> str:
if not _is_in_docker():
return path
- hostname = socket.gethostname()
- _, out, _ = cmd_output_b('docker', 'inspect', hostname)
+ container_id = _get_container_id()
+
+ _, out, _ = cmd_output_b('docker', 'inspect', container_id)
container, = json.loads(out)
for mount in container['Mounts']:
|
{"golden_diff": "diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py\n--- a/pre_commit/languages/docker.py\n+++ b/pre_commit/languages/docker.py\n@@ -1,7 +1,6 @@\n import hashlib\n import json\n import os\n-import socket\n from typing import Sequence\n from typing import Tuple\n \n@@ -26,12 +25,24 @@\n return False\n \n \n+def _get_container_id() -> str:\n+ # It's assumed that we already check /proc/1/cgroup in _is_in_docker. The\n+ # cpuset cgroup controller existed since cgroups were introduced so this\n+ # way of getting the container ID is pretty reliable.\n+ with open('/proc/1/cgroup', 'rb') as f:\n+ for line in f.readlines():\n+ if line.split(b':')[1] == b'cpuset':\n+ return os.path.basename(line.split(b':')[2]).strip().decode()\n+ raise RuntimeError('Failed to find the container ID in /proc/1/cgroup.')\n+\n+\n def _get_docker_path(path: str) -> str:\n if not _is_in_docker():\n return path\n- hostname = socket.gethostname()\n \n- _, out, _ = cmd_output_b('docker', 'inspect', hostname)\n+ container_id = _get_container_id()\n+\n+ _, out, _ = cmd_output_b('docker', 'inspect', container_id)\n \n container, = json.loads(out)\n for mount in container['Mounts']:\n", "issue": "Failure to get the container id\nWith #1888 in place, there's a regression when running inside a container. There's an assumption in https://github.com/pre-commit/pre-commit/blob/master/pre_commit/languages/docker.py#L32 that the hostname is the container ID which is not always the case (it's easy enough to set a different hostname with `docker run --hostname foo`). It causes the `docker inspect` command that follows to fail. A more reliable way to get the container id is from `/proc/1/cpuset` or from the first line in `/proc/1/cgroup` which is already checked in `_is_in_docker`.\r\n\r\nThanks @asottile and @okainov for your work on #1387 and pre-commit in general.\n", "before_files": [{"content": "import hashlib\nimport json\nimport os\nimport socket\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'docker'\nPRE_COMMIT_LABEL = 'PRE_COMMIT'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef _is_in_docker() -> bool:\n try:\n with open('/proc/1/cgroup', 'rb') as f:\n return b'docker' in f.read()\n except FileNotFoundError:\n return False\n\n\ndef _get_docker_path(path: str) -> str:\n if not _is_in_docker():\n return path\n hostname = socket.gethostname()\n\n _, out, _ = cmd_output_b('docker', 'inspect', hostname)\n\n container, = json.loads(out)\n for mount in container['Mounts']:\n src_path = mount['Source']\n to_path = mount['Destination']\n if os.path.commonpath((path, to_path)) == to_path:\n # So there is something in common,\n # and we can proceed remapping it\n return path.replace(to_path, src_path)\n # we're in Docker, but the path is not mounted, cannot really do anything,\n # so fall back to original path\n return path\n\n\ndef md5(s: str) -> str: # pragma: win32 no cover\n return hashlib.md5(s.encode()).hexdigest()\n\n\ndef docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover\n md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()\n return f'pre-commit-{md5sum}'\n\n\ndef build_docker_image(\n prefix: Prefix,\n *,\n pull: bool,\n) -> None: # pragma: win32 no cover\n cmd: Tuple[str, ...] = (\n 'docker', 'build',\n '--tag', docker_tag(prefix),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n # This must come last for old versions of docker. See #477\n cmd += ('.',)\n helpers.run_setup_cmd(prefix, cmd)\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n helpers.assert_version_default('docker', version)\n helpers.assert_no_additional_deps('docker', additional_dependencies)\n\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # Docker doesn't really have relevant disk environment, but pre-commit\n # still needs to cleanup its state files on failure\n with clean_path_on_failure(directory):\n build_docker_image(prefix, pull=True)\n os.mkdir(directory)\n\n\ndef get_docker_user() -> Tuple[str, ...]: # pragma: win32 no cover\n try:\n return ('-u', f'{os.getuid()}:{os.getgid()}')\n except AttributeError:\n return ()\n\n\ndef docker_cmd() -> Tuple[str, ...]: # pragma: win32 no cover\n return (\n 'docker', 'run',\n '--rm',\n *get_docker_user(),\n # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from\n # The `Z` option tells Docker to label the content with a private\n # unshared label. Only the current container can use a private volume.\n '-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z',\n '--workdir', '/src',\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]: # pragma: win32 no cover\n # Rebuild the docker image in case it has gone missing, as many people do\n # automated cleanup of docker images.\n build_docker_image(hook.prefix, pull=False)\n\n entry_exe, *cmd_rest = hook.cmd\n\n entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))\n cmd = (*docker_cmd(), *entry_tag, *cmd_rest)\n return helpers.run_xargs(hook, cmd, file_args, color=color)\n", "path": "pre_commit/languages/docker.py"}], "after_files": [{"content": "import hashlib\nimport json\nimport os\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'docker'\nPRE_COMMIT_LABEL = 'PRE_COMMIT'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef _is_in_docker() -> bool:\n try:\n with open('/proc/1/cgroup', 'rb') as f:\n return b'docker' in f.read()\n except FileNotFoundError:\n return False\n\n\ndef _get_container_id() -> str:\n # It's assumed that we already check /proc/1/cgroup in _is_in_docker. The\n # cpuset cgroup controller existed since cgroups were introduced so this\n # way of getting the container ID is pretty reliable.\n with open('/proc/1/cgroup', 'rb') as f:\n for line in f.readlines():\n if line.split(b':')[1] == b'cpuset':\n return os.path.basename(line.split(b':')[2]).strip().decode()\n raise RuntimeError('Failed to find the container ID in /proc/1/cgroup.')\n\n\ndef _get_docker_path(path: str) -> str:\n if not _is_in_docker():\n return path\n\n container_id = _get_container_id()\n\n _, out, _ = cmd_output_b('docker', 'inspect', container_id)\n\n container, = json.loads(out)\n for mount in container['Mounts']:\n src_path = mount['Source']\n to_path = mount['Destination']\n if os.path.commonpath((path, to_path)) == to_path:\n # So there is something in common,\n # and we can proceed remapping it\n return path.replace(to_path, src_path)\n # we're in Docker, but the path is not mounted, cannot really do anything,\n # so fall back to original path\n return path\n\n\ndef md5(s: str) -> str: # pragma: win32 no cover\n return hashlib.md5(s.encode()).hexdigest()\n\n\ndef docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover\n md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()\n return f'pre-commit-{md5sum}'\n\n\ndef build_docker_image(\n prefix: Prefix,\n *,\n pull: bool,\n) -> None: # pragma: win32 no cover\n cmd: Tuple[str, ...] = (\n 'docker', 'build',\n '--tag', docker_tag(prefix),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n # This must come last for old versions of docker. See #477\n cmd += ('.',)\n helpers.run_setup_cmd(prefix, cmd)\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n helpers.assert_version_default('docker', version)\n helpers.assert_no_additional_deps('docker', additional_dependencies)\n\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # Docker doesn't really have relevant disk environment, but pre-commit\n # still needs to cleanup its state files on failure\n with clean_path_on_failure(directory):\n build_docker_image(prefix, pull=True)\n os.mkdir(directory)\n\n\ndef get_docker_user() -> Tuple[str, ...]: # pragma: win32 no cover\n try:\n return ('-u', f'{os.getuid()}:{os.getgid()}')\n except AttributeError:\n return ()\n\n\ndef docker_cmd() -> Tuple[str, ...]: # pragma: win32 no cover\n return (\n 'docker', 'run',\n '--rm',\n *get_docker_user(),\n # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from\n # The `Z` option tells Docker to label the content with a private\n # unshared label. Only the current container can use a private volume.\n '-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z',\n '--workdir', '/src',\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]: # pragma: win32 no cover\n # Rebuild the docker image in case it has gone missing, as many people do\n # automated cleanup of docker images.\n build_docker_image(hook.prefix, pull=False)\n\n entry_exe, *cmd_rest = hook.cmd\n\n entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))\n cmd = (*docker_cmd(), *entry_tag, *cmd_rest)\n return helpers.run_xargs(hook, cmd, file_args, color=color)\n", "path": "pre_commit/languages/docker.py"}]}
| 1,658 | 332 |
gh_patches_debug_27339
|
rasdani/github-patches
|
git_diff
|
ansible__awx-8487
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In Kubernetes container groups, user-supplied .metadata.labels is ignored
##### ISSUE TYPE
- Bug Report
##### SUMMARY
Due to `awx/main/scheduler/kubernetes.py` overriding the `.metadata.labels` of the pods it creates (instead of merging them with the user-supplied `pod_spec_override`), features such as pod anti-affinity between AWX runners cannot work.
##### ENVIRONMENT
* AWX version: 15.0.1 (also present in devel)
* AWX install method: openshift
* Ansible version: irrelevant
* Operating System: Linux (all versions)
* Web Browser: irrelevant
##### STEPS TO REPRODUCE
1. Create a Kubernetes container group with the below piece of YAML as the pod spec override
1. Run a job out of this instance group
```yaml
apiVersion: v1
kind: Pod
metadata:
labels:
deploymentconfig: ansible-runner
namespace: wwp-test
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: deploymentconfig
operator: In
values:
- ansible-runner
# ...
```
##### EXPECTED RESULTS
The pods run by AWX as part of the container group should contain both the custom labels and the affinity structure.
##### ACTUAL RESULTS
Only the affinity structure shows up in the `Kind: pod` Kubernetes objects, rendering the `podAntiAffinity` clause inoperative (for lack of a label to match on under `metdata`)
##### ADDITIONAL INFORMATION
The cause is the equals sign on [this line](https://github.com/ansible/awx/blob/devel/awx/main/scheduler/kubernetes.py#L132).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx/main/scheduler/kubernetes.py`
Content:
```
1 import collections
2 import time
3 import logging
4 from base64 import b64encode
5
6 from django.conf import settings
7 from kubernetes import client, config
8 from django.utils.functional import cached_property
9
10 from awx.main.utils.common import parse_yaml_or_json
11
12 logger = logging.getLogger('awx.main.scheduler')
13
14
15 class PodManager(object):
16
17 def __init__(self, task=None):
18 self.task = task
19
20 def deploy(self):
21 if not self.credential.kubernetes:
22 raise RuntimeError('Pod deployment cannot occur without a Kubernetes credential')
23
24 self.kube_api.create_namespaced_pod(body=self.pod_definition,
25 namespace=self.namespace,
26 _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
27
28 num_retries = settings.AWX_CONTAINER_GROUP_POD_LAUNCH_RETRIES
29 for retry_attempt in range(num_retries - 1):
30 logger.debug(f"Checking for pod {self.pod_name}. Attempt {retry_attempt + 1} of {num_retries}")
31 pod = self.kube_api.read_namespaced_pod(name=self.pod_name,
32 namespace=self.namespace,
33 _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
34 if pod.status.phase != 'Pending':
35 break
36 else:
37 logger.debug(f"Pod {self.pod_name} is Pending.")
38 time.sleep(settings.AWX_CONTAINER_GROUP_POD_LAUNCH_RETRY_DELAY)
39 continue
40
41 if pod.status.phase == 'Running':
42 logger.debug(f"Pod {self.pod_name} is online.")
43 return pod
44 else:
45 logger.warn(f"Pod {self.pod_name} did not start. Status is {pod.status.phase}.")
46
47 @classmethod
48 def list_active_jobs(self, instance_group):
49 task = collections.namedtuple('Task', 'id instance_group')(
50 id='',
51 instance_group=instance_group
52 )
53 pm = PodManager(task)
54 try:
55 for pod in pm.kube_api.list_namespaced_pod(
56 pm.namespace,
57 label_selector='ansible-awx={}'.format(settings.INSTALL_UUID)
58 ).to_dict().get('items', []):
59 job = pod['metadata'].get('labels', {}).get('ansible-awx-job-id')
60 if job:
61 try:
62 yield int(job)
63 except ValueError:
64 pass
65 except Exception:
66 logger.exception('Failed to list pods for container group {}'.format(instance_group))
67
68 def delete(self):
69 return self.kube_api.delete_namespaced_pod(name=self.pod_name,
70 namespace=self.namespace,
71 _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
72
73 @property
74 def namespace(self):
75 return self.pod_definition['metadata']['namespace']
76
77 @property
78 def credential(self):
79 return self.task.instance_group.credential
80
81 @cached_property
82 def kube_config(self):
83 return generate_tmp_kube_config(self.credential, self.namespace)
84
85 @cached_property
86 def kube_api(self):
87 # this feels a little janky, but it's what k8s' own code does
88 # internally when it reads kube config files from disk:
89 # https://github.com/kubernetes-client/python-base/blob/0b208334ef0247aad9afcaae8003954423b61a0d/config/kube_config.py#L643
90 loader = config.kube_config.KubeConfigLoader(
91 config_dict=self.kube_config
92 )
93 cfg = type.__call__(client.Configuration)
94 loader.load_and_set(cfg)
95 return client.CoreV1Api(api_client=client.ApiClient(
96 configuration=cfg
97 ))
98
99 @property
100 def pod_name(self):
101 return f"awx-job-{self.task.id}"
102
103 @property
104 def pod_definition(self):
105 default_pod_spec = {
106 "apiVersion": "v1",
107 "kind": "Pod",
108 "metadata": {
109 "namespace": settings.AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE
110 },
111 "spec": {
112 "containers": [{
113 "image": settings.AWX_CONTAINER_GROUP_DEFAULT_IMAGE,
114 "tty": True,
115 "stdin": True,
116 "imagePullPolicy": "Always",
117 "args": [
118 'sleep', 'infinity'
119 ]
120 }]
121 }
122 }
123
124 pod_spec_override = {}
125 if self.task and self.task.instance_group.pod_spec_override:
126 pod_spec_override = parse_yaml_or_json(
127 self.task.instance_group.pod_spec_override)
128 pod_spec = {**default_pod_spec, **pod_spec_override}
129
130 if self.task:
131 pod_spec['metadata']['name'] = self.pod_name
132 pod_spec['metadata']['labels'] = {
133 'ansible-awx': settings.INSTALL_UUID,
134 'ansible-awx-job-id': str(self.task.id)
135 }
136 pod_spec['spec']['containers'][0]['name'] = self.pod_name
137
138 return pod_spec
139
140
141 def generate_tmp_kube_config(credential, namespace):
142 host_input = credential.get_input('host')
143 config = {
144 "apiVersion": "v1",
145 "kind": "Config",
146 "preferences": {},
147 "clusters": [
148 {
149 "name": host_input,
150 "cluster": {
151 "server": host_input
152 }
153 }
154 ],
155 "users": [
156 {
157 "name": host_input,
158 "user": {
159 "token": credential.get_input('bearer_token')
160 }
161 }
162 ],
163 "contexts": [
164 {
165 "name": host_input,
166 "context": {
167 "cluster": host_input,
168 "user": host_input,
169 "namespace": namespace
170 }
171 }
172 ],
173 "current-context": host_input
174 }
175
176 if credential.get_input('verify_ssl') and 'ssl_ca_cert' in credential.inputs:
177 config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
178 credential.get_input('ssl_ca_cert').encode() # encode to bytes
179 ).decode() # decode the base64 data into a str
180 else:
181 config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
182 return config
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awx/main/scheduler/kubernetes.py b/awx/main/scheduler/kubernetes.py
--- a/awx/main/scheduler/kubernetes.py
+++ b/awx/main/scheduler/kubernetes.py
@@ -12,6 +12,24 @@
logger = logging.getLogger('awx.main.scheduler')
+def deepmerge(a, b):
+ """
+ Merge dict structures and return the result.
+
+ >>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
+ >>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
+ >>> import pprint; pprint.pprint(deepmerge(a, b))
+ {'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}
+ """
+ if isinstance(a, dict) and isinstance(b, dict):
+ return dict([(k, deepmerge(a.get(k), b.get(k)))
+ for k in set(a.keys()).union(b.keys())])
+ elif b is None:
+ return a
+ else:
+ return b
+
+
class PodManager(object):
def __init__(self, task=None):
@@ -128,11 +146,13 @@
pod_spec = {**default_pod_spec, **pod_spec_override}
if self.task:
- pod_spec['metadata']['name'] = self.pod_name
- pod_spec['metadata']['labels'] = {
- 'ansible-awx': settings.INSTALL_UUID,
- 'ansible-awx-job-id': str(self.task.id)
- }
+ pod_spec['metadata'] = deepmerge(
+ pod_spec.get('metadata', {}),
+ dict(name=self.pod_name,
+ labels={
+ 'ansible-awx': settings.INSTALL_UUID,
+ 'ansible-awx-job-id': str(self.task.id)
+ }))
pod_spec['spec']['containers'][0]['name'] = self.pod_name
return pod_spec
|
{"golden_diff": "diff --git a/awx/main/scheduler/kubernetes.py b/awx/main/scheduler/kubernetes.py\n--- a/awx/main/scheduler/kubernetes.py\n+++ b/awx/main/scheduler/kubernetes.py\n@@ -12,6 +12,24 @@\n logger = logging.getLogger('awx.main.scheduler')\n \n \n+def deepmerge(a, b):\n+ \"\"\"\n+ Merge dict structures and return the result.\n+\n+ >>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}\n+ >>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}\n+ >>> import pprint; pprint.pprint(deepmerge(a, b))\n+ {'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}\n+ \"\"\"\n+ if isinstance(a, dict) and isinstance(b, dict):\n+ return dict([(k, deepmerge(a.get(k), b.get(k)))\n+ for k in set(a.keys()).union(b.keys())])\n+ elif b is None:\n+ return a\n+ else:\n+ return b\n+\n+\n class PodManager(object):\n \n def __init__(self, task=None):\n@@ -128,11 +146,13 @@\n pod_spec = {**default_pod_spec, **pod_spec_override}\n \n if self.task:\n- pod_spec['metadata']['name'] = self.pod_name\n- pod_spec['metadata']['labels'] = {\n- 'ansible-awx': settings.INSTALL_UUID,\n- 'ansible-awx-job-id': str(self.task.id)\n- }\n+ pod_spec['metadata'] = deepmerge(\n+ pod_spec.get('metadata', {}),\n+ dict(name=self.pod_name,\n+ labels={\n+ 'ansible-awx': settings.INSTALL_UUID,\n+ 'ansible-awx-job-id': str(self.task.id)\n+ }))\n pod_spec['spec']['containers'][0]['name'] = self.pod_name\n \n return pod_spec\n", "issue": "In Kubernetes container groups, user-supplied .metadata.labels is ignored\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### SUMMARY\r\n\r\nDue to `awx/main/scheduler/kubernetes.py` overriding the `.metadata.labels` of the pods it creates (instead of merging them with the user-supplied `pod_spec_override`), features such as pod anti-affinity between AWX runners cannot work.\r\n\r\n##### ENVIRONMENT\r\n* AWX version: 15.0.1 (also present in devel)\r\n* AWX install method: openshift\r\n* Ansible version: irrelevant\r\n* Operating System: Linux (all versions)\r\n* Web Browser: irrelevant\r\n\r\n##### STEPS TO REPRODUCE\r\n\r\n1. Create a Kubernetes container group with the below piece of YAML as the pod spec override\r\n1. Run a job out of this instance group\r\n\r\n```yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n labels:\r\n deploymentconfig: ansible-runner\r\n namespace: wwp-test\r\nspec:\r\n affinity:\r\n podAntiAffinity:\r\n requiredDuringSchedulingIgnoredDuringExecution:\r\n - topologyKey: kubernetes.io/hostname\r\n labelSelector:\r\n matchExpressions:\r\n - key: deploymentconfig\r\n operator: In\r\n values:\r\n - ansible-runner\r\n # ...\r\n```\r\n\r\n##### EXPECTED RESULTS\r\n\r\nThe pods run by AWX as part of the container group should contain both the custom labels and the affinity structure.\r\n\r\n##### ACTUAL RESULTS\r\n\r\nOnly the affinity structure shows up in the `Kind: pod` Kubernetes objects, rendering the `podAntiAffinity` clause inoperative (for lack of a label to match on under `metdata`)\r\n\r\n##### ADDITIONAL INFORMATION\r\n\r\nThe cause is the equals sign on [this line](https://github.com/ansible/awx/blob/devel/awx/main/scheduler/kubernetes.py#L132).\n", "before_files": [{"content": "import collections\nimport time\nimport logging\nfrom base64 import b64encode\n\nfrom django.conf import settings\nfrom kubernetes import client, config\nfrom django.utils.functional import cached_property\n\nfrom awx.main.utils.common import parse_yaml_or_json\n\nlogger = logging.getLogger('awx.main.scheduler')\n\n\nclass PodManager(object):\n\n def __init__(self, task=None):\n self.task = task\n\n def deploy(self):\n if not self.credential.kubernetes:\n raise RuntimeError('Pod deployment cannot occur without a Kubernetes credential')\n\n self.kube_api.create_namespaced_pod(body=self.pod_definition,\n namespace=self.namespace,\n _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)\n\n num_retries = settings.AWX_CONTAINER_GROUP_POD_LAUNCH_RETRIES\n for retry_attempt in range(num_retries - 1):\n logger.debug(f\"Checking for pod {self.pod_name}. Attempt {retry_attempt + 1} of {num_retries}\")\n pod = self.kube_api.read_namespaced_pod(name=self.pod_name,\n namespace=self.namespace,\n _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)\n if pod.status.phase != 'Pending':\n break\n else:\n logger.debug(f\"Pod {self.pod_name} is Pending.\")\n time.sleep(settings.AWX_CONTAINER_GROUP_POD_LAUNCH_RETRY_DELAY)\n continue\n\n if pod.status.phase == 'Running':\n logger.debug(f\"Pod {self.pod_name} is online.\")\n return pod\n else:\n logger.warn(f\"Pod {self.pod_name} did not start. Status is {pod.status.phase}.\")\n\n @classmethod\n def list_active_jobs(self, instance_group):\n task = collections.namedtuple('Task', 'id instance_group')(\n id='',\n instance_group=instance_group\n )\n pm = PodManager(task)\n try:\n for pod in pm.kube_api.list_namespaced_pod(\n pm.namespace,\n label_selector='ansible-awx={}'.format(settings.INSTALL_UUID)\n ).to_dict().get('items', []):\n job = pod['metadata'].get('labels', {}).get('ansible-awx-job-id')\n if job:\n try:\n yield int(job)\n except ValueError:\n pass\n except Exception:\n logger.exception('Failed to list pods for container group {}'.format(instance_group))\n\n def delete(self):\n return self.kube_api.delete_namespaced_pod(name=self.pod_name,\n namespace=self.namespace,\n _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)\n\n @property\n def namespace(self):\n return self.pod_definition['metadata']['namespace']\n\n @property\n def credential(self):\n return self.task.instance_group.credential\n\n @cached_property\n def kube_config(self):\n return generate_tmp_kube_config(self.credential, self.namespace)\n\n @cached_property\n def kube_api(self):\n # this feels a little janky, but it's what k8s' own code does\n # internally when it reads kube config files from disk:\n # https://github.com/kubernetes-client/python-base/blob/0b208334ef0247aad9afcaae8003954423b61a0d/config/kube_config.py#L643\n loader = config.kube_config.KubeConfigLoader(\n config_dict=self.kube_config\n )\n cfg = type.__call__(client.Configuration)\n loader.load_and_set(cfg)\n return client.CoreV1Api(api_client=client.ApiClient(\n configuration=cfg\n ))\n\n @property\n def pod_name(self):\n return f\"awx-job-{self.task.id}\"\n\n @property\n def pod_definition(self):\n default_pod_spec = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"namespace\": settings.AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE\n },\n \"spec\": {\n \"containers\": [{\n \"image\": settings.AWX_CONTAINER_GROUP_DEFAULT_IMAGE,\n \"tty\": True,\n \"stdin\": True,\n \"imagePullPolicy\": \"Always\",\n \"args\": [\n 'sleep', 'infinity'\n ]\n }]\n }\n }\n\n pod_spec_override = {}\n if self.task and self.task.instance_group.pod_spec_override:\n pod_spec_override = parse_yaml_or_json(\n self.task.instance_group.pod_spec_override)\n pod_spec = {**default_pod_spec, **pod_spec_override}\n\n if self.task:\n pod_spec['metadata']['name'] = self.pod_name\n pod_spec['metadata']['labels'] = {\n 'ansible-awx': settings.INSTALL_UUID,\n 'ansible-awx-job-id': str(self.task.id)\n }\n pod_spec['spec']['containers'][0]['name'] = self.pod_name\n\n return pod_spec\n\n\ndef generate_tmp_kube_config(credential, namespace):\n host_input = credential.get_input('host')\n config = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Config\",\n \"preferences\": {},\n \"clusters\": [\n {\n \"name\": host_input,\n \"cluster\": {\n \"server\": host_input\n }\n }\n ],\n \"users\": [\n {\n \"name\": host_input,\n \"user\": {\n \"token\": credential.get_input('bearer_token')\n }\n }\n ],\n \"contexts\": [\n {\n \"name\": host_input,\n \"context\": {\n \"cluster\": host_input,\n \"user\": host_input,\n \"namespace\": namespace\n }\n }\n ],\n \"current-context\": host_input\n }\n\n if credential.get_input('verify_ssl') and 'ssl_ca_cert' in credential.inputs:\n config[\"clusters\"][0][\"cluster\"][\"certificate-authority-data\"] = b64encode(\n credential.get_input('ssl_ca_cert').encode() # encode to bytes\n ).decode() # decode the base64 data into a str\n else:\n config[\"clusters\"][0][\"cluster\"][\"insecure-skip-tls-verify\"] = True\n return config\n", "path": "awx/main/scheduler/kubernetes.py"}], "after_files": [{"content": "import collections\nimport time\nimport logging\nfrom base64 import b64encode\n\nfrom django.conf import settings\nfrom kubernetes import client, config\nfrom django.utils.functional import cached_property\n\nfrom awx.main.utils.common import parse_yaml_or_json\n\nlogger = logging.getLogger('awx.main.scheduler')\n\n\ndef deepmerge(a, b):\n \"\"\"\n Merge dict structures and return the result.\n\n >>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}\n >>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}\n >>> import pprint; pprint.pprint(deepmerge(a, b))\n {'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}\n \"\"\"\n if isinstance(a, dict) and isinstance(b, dict):\n return dict([(k, deepmerge(a.get(k), b.get(k)))\n for k in set(a.keys()).union(b.keys())])\n elif b is None:\n return a\n else:\n return b\n\n\nclass PodManager(object):\n\n def __init__(self, task=None):\n self.task = task\n\n def deploy(self):\n if not self.credential.kubernetes:\n raise RuntimeError('Pod deployment cannot occur without a Kubernetes credential')\n\n self.kube_api.create_namespaced_pod(body=self.pod_definition,\n namespace=self.namespace,\n _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)\n\n num_retries = settings.AWX_CONTAINER_GROUP_POD_LAUNCH_RETRIES\n for retry_attempt in range(num_retries - 1):\n logger.debug(f\"Checking for pod {self.pod_name}. Attempt {retry_attempt + 1} of {num_retries}\")\n pod = self.kube_api.read_namespaced_pod(name=self.pod_name,\n namespace=self.namespace,\n _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)\n if pod.status.phase != 'Pending':\n break\n else:\n logger.debug(f\"Pod {self.pod_name} is Pending.\")\n time.sleep(settings.AWX_CONTAINER_GROUP_POD_LAUNCH_RETRY_DELAY)\n continue\n\n if pod.status.phase == 'Running':\n logger.debug(f\"Pod {self.pod_name} is online.\")\n return pod\n else:\n logger.warn(f\"Pod {self.pod_name} did not start. Status is {pod.status.phase}.\")\n\n @classmethod\n def list_active_jobs(self, instance_group):\n task = collections.namedtuple('Task', 'id instance_group')(\n id='',\n instance_group=instance_group\n )\n pm = PodManager(task)\n try:\n for pod in pm.kube_api.list_namespaced_pod(\n pm.namespace,\n label_selector='ansible-awx={}'.format(settings.INSTALL_UUID)\n ).to_dict().get('items', []):\n job = pod['metadata'].get('labels', {}).get('ansible-awx-job-id')\n if job:\n try:\n yield int(job)\n except ValueError:\n pass\n except Exception:\n logger.exception('Failed to list pods for container group {}'.format(instance_group))\n\n def delete(self):\n return self.kube_api.delete_namespaced_pod(name=self.pod_name,\n namespace=self.namespace,\n _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)\n\n @property\n def namespace(self):\n return self.pod_definition['metadata']['namespace']\n\n @property\n def credential(self):\n return self.task.instance_group.credential\n\n @cached_property\n def kube_config(self):\n return generate_tmp_kube_config(self.credential, self.namespace)\n\n @cached_property\n def kube_api(self):\n # this feels a little janky, but it's what k8s' own code does\n # internally when it reads kube config files from disk:\n # https://github.com/kubernetes-client/python-base/blob/0b208334ef0247aad9afcaae8003954423b61a0d/config/kube_config.py#L643\n loader = config.kube_config.KubeConfigLoader(\n config_dict=self.kube_config\n )\n cfg = type.__call__(client.Configuration)\n loader.load_and_set(cfg)\n return client.CoreV1Api(api_client=client.ApiClient(\n configuration=cfg\n ))\n\n @property\n def pod_name(self):\n return f\"awx-job-{self.task.id}\"\n\n @property\n def pod_definition(self):\n default_pod_spec = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"namespace\": settings.AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE\n },\n \"spec\": {\n \"containers\": [{\n \"image\": settings.AWX_CONTAINER_GROUP_DEFAULT_IMAGE,\n \"tty\": True,\n \"stdin\": True,\n \"imagePullPolicy\": \"Always\",\n \"args\": [\n 'sleep', 'infinity'\n ]\n }]\n }\n }\n\n pod_spec_override = {}\n if self.task and self.task.instance_group.pod_spec_override:\n pod_spec_override = parse_yaml_or_json(\n self.task.instance_group.pod_spec_override)\n pod_spec = {**default_pod_spec, **pod_spec_override}\n\n if self.task:\n pod_spec['metadata'] = deepmerge(\n pod_spec.get('metadata', {}),\n dict(name=self.pod_name,\n labels={\n 'ansible-awx': settings.INSTALL_UUID,\n 'ansible-awx-job-id': str(self.task.id)\n }))\n pod_spec['spec']['containers'][0]['name'] = self.pod_name\n\n return pod_spec\n\n\ndef generate_tmp_kube_config(credential, namespace):\n host_input = credential.get_input('host')\n config = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Config\",\n \"preferences\": {},\n \"clusters\": [\n {\n \"name\": host_input,\n \"cluster\": {\n \"server\": host_input\n }\n }\n ],\n \"users\": [\n {\n \"name\": host_input,\n \"user\": {\n \"token\": credential.get_input('bearer_token')\n }\n }\n ],\n \"contexts\": [\n {\n \"name\": host_input,\n \"context\": {\n \"cluster\": host_input,\n \"user\": host_input,\n \"namespace\": namespace\n }\n }\n ],\n \"current-context\": host_input\n }\n\n if credential.get_input('verify_ssl') and 'ssl_ca_cert' in credential.inputs:\n config[\"clusters\"][0][\"cluster\"][\"certificate-authority-data\"] = b64encode(\n credential.get_input('ssl_ca_cert').encode() # encode to bytes\n ).decode() # decode the base64 data into a str\n else:\n config[\"clusters\"][0][\"cluster\"][\"insecure-skip-tls-verify\"] = True\n return config\n", "path": "awx/main/scheduler/kubernetes.py"}]}
| 2,412 | 452 |
gh_patches_debug_15976
|
rasdani/github-patches
|
git_diff
|
WordPress__openverse-api-938
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use smaller image for generating thumbnails for SMK
## Description
<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->
<!-- For example: "I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page." -->
Images linked in the `image_url`s for the SMK provider are all ~2MB. These are large enough to overload our thumbnail service. Some of these requests timeout, such that the frontend falls back to downloading the entire full image. The result is unacceptable load times.
We should update the API to detect the SMK provider and modify the URL used for thumbnail generation.
`image_url` that we receive has the form: https://iip.smk.dk/iiif/jp2/KKSgb5100_34.TIF.jp2/full/!2048,/0/default.jpg Note the`2048`.
The modified URL should be identical, except for a reduced filesize: https://iip.smk.dk/iiif/jp2/KKSgb5100_34.TIF.jp2/full/!400,/0/default.jpg
## Additional context
<!-- Add any other context about the problem here; or delete the section entirely. -->
This change should be a temporary quick-fix that will allow us to re-enable the SMK provider, so we can make these images available again as soon as possible.
In the longer term, https://github.com/WordPress/openverse-catalog/issues/698 tracks updating the provider to provide links to smaller files for the purpose of thumbnail generation.
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/api/views/image_views.py`
Content:
```
1 import io
2 import struct
3
4 from django.conf import settings
5 from django.http.response import FileResponse, HttpResponse
6 from django.utils.decorators import method_decorator
7 from rest_framework.decorators import action
8 from rest_framework.exceptions import NotFound
9 from rest_framework.response import Response
10
11 import piexif
12 import requests
13 from drf_yasg.utils import swagger_auto_schema
14 from PIL import Image as PILImage
15
16 from catalog.api.constants.media_types import IMAGE_TYPE
17 from catalog.api.docs.image_docs import (
18 ImageComplain,
19 ImageDetail,
20 ImageOembed,
21 ImageRelated,
22 ImageSearch,
23 ImageStats,
24 ImageThumbnail,
25 )
26 from catalog.api.models import Image
27 from catalog.api.serializers.image_serializers import (
28 ImageReportRequestSerializer,
29 ImageSearchRequestSerializer,
30 ImageSerializer,
31 OembedRequestSerializer,
32 OembedSerializer,
33 WatermarkRequestSerializer,
34 )
35 from catalog.api.serializers.media_serializers import MediaThumbnailRequestSerializer
36 from catalog.api.utils.exceptions import get_api_exception
37 from catalog.api.utils.throttle import (
38 AnonThumbnailRateThrottle,
39 OAuth2IdThumbnailRateThrottle,
40 )
41 from catalog.api.utils.watermark import watermark
42 from catalog.api.views.media_views import MediaViewSet
43
44
45 @method_decorator(swagger_auto_schema(**ImageSearch.swagger_setup), "list")
46 @method_decorator(swagger_auto_schema(**ImageStats.swagger_setup), "stats")
47 @method_decorator(swagger_auto_schema(**ImageDetail.swagger_setup), "retrieve")
48 @method_decorator(swagger_auto_schema(**ImageRelated.swagger_setup), "related")
49 @method_decorator(swagger_auto_schema(**ImageComplain.swagger_setup), "report")
50 @method_decorator(swagger_auto_schema(**ImageOembed.swagger_setup), "oembed")
51 @method_decorator(swagger_auto_schema(**ImageThumbnail.swagger_setup), "thumbnail")
52 @method_decorator(swagger_auto_schema(auto_schema=None), "watermark")
53 class ImageViewSet(MediaViewSet):
54 """
55 Viewset for all endpoints pertaining to images.
56 """
57
58 model_class = Image
59 query_serializer_class = ImageSearchRequestSerializer
60 default_index = settings.MEDIA_INDEX_MAPPING[IMAGE_TYPE]
61 qa_index = "search-qa-image"
62
63 serializer_class = ImageSerializer
64
65 OEMBED_HEADERS = {
66 "User-Agent": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose="OEmbed"),
67 }
68
69 # Extra actions
70
71 @action(
72 detail=False,
73 url_path="oembed",
74 url_name="oembed",
75 serializer_class=OembedSerializer,
76 )
77 def oembed(self, request, *_, **__):
78 params = OembedRequestSerializer(data=request.query_params)
79 params.is_valid(raise_exception=True)
80
81 context = self.get_serializer_context()
82
83 url = params.validated_data["url"]
84 identifier = url.rsplit("/", 1)[1]
85 try:
86 image = self.get_queryset().get(identifier=identifier)
87 except Image.DoesNotExist:
88 return get_api_exception("Could not find image.", 404)
89 if not (image.height and image.width):
90 image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)
91 width, height = PILImage.open(io.BytesIO(image_file.content)).size
92 context |= {
93 "width": width,
94 "height": height,
95 }
96
97 serializer = self.get_serializer(image, context=context)
98 return Response(data=serializer.data)
99
100 @action(
101 detail=True,
102 url_path="thumb",
103 url_name="thumb",
104 serializer_class=MediaThumbnailRequestSerializer,
105 throttle_classes=[AnonThumbnailRateThrottle, OAuth2IdThumbnailRateThrottle],
106 )
107 def thumbnail(self, request, *_, **__):
108 image = self.get_object()
109
110 image_url = image.url
111 if not image_url:
112 raise get_api_exception("Could not find image.", 404)
113
114 return super().thumbnail(image_url, request)
115
116 @action(detail=True, url_path="watermark", url_name="watermark")
117 def watermark(self, request, *_, **__):
118 if not settings.WATERMARK_ENABLED:
119 raise NotFound("The watermark feature is currently disabled.")
120
121 params = WatermarkRequestSerializer(data=request.query_params)
122 params.is_valid(raise_exception=True)
123
124 image = self.get_object()
125 image_url = image.url
126 image_info = {
127 attr: getattr(image, attr)
128 for attr in ["title", "creator", "license", "license_version"]
129 }
130
131 # Create the actual watermarked image.
132 watermarked, exif = watermark(image_url, image_info, params.data["watermark"])
133 # Re-insert EXIF metadata.
134 if exif:
135 # piexif dump raises InvalidImageDataError which is a child class
136 # of ValueError, and a struct error when the value is not
137 # between -2147483648 and 2147483647
138 # https://github.com/WordPress/openverse-api/issues/849
139 try:
140 exif_bytes = piexif.dump(exif)
141 except (struct.error, ValueError):
142 exif_bytes = None
143 else:
144 exif_bytes = None
145 img_bytes = io.BytesIO()
146 self._save_wrapper(watermarked, exif_bytes, img_bytes)
147
148 if params.data["embed_metadata"]:
149 # Embed ccREL metadata with XMP.
150 work_properties = {
151 "creator": image.creator,
152 "license_url": image.license_url,
153 "attribution": image.attribution,
154 "work_landing_page": image.foreign_landing_url,
155 "identifier": str(image.identifier),
156 }
157
158 # Import inside a function to allow server run without Exempi library
159 import libxmp
160
161 from catalog.api.utils import ccrel
162
163 try:
164 with_xmp = ccrel.embed_xmp_bytes(img_bytes, work_properties)
165 return FileResponse(with_xmp, content_type="image/jpeg")
166 except (libxmp.XMPError, AttributeError):
167 # Just send the EXIF-ified file if libxmp fails to add metadata
168 response = HttpResponse(content_type="image/jpeg")
169 self._save_wrapper(watermarked, exif_bytes, response)
170 return response
171 else:
172 response = HttpResponse(img_bytes, content_type="image/jpeg")
173 self._save_wrapper(watermarked, exif_bytes, response)
174 return response
175
176 @action(
177 detail=True,
178 methods=["post"],
179 serializer_class=ImageReportRequestSerializer,
180 )
181 def report(self, *args, **kwargs):
182 return super().report(*args, **kwargs)
183
184 # Helper functions
185
186 @staticmethod
187 def _save_wrapper(pil_img, exif_bytes, destination):
188 """
189 PIL crashes if exif_bytes=None, so we have to wrap it to avoid littering
190 the code with branches.
191 """
192 if exif_bytes:
193 pil_img.save(destination, "jpeg", exif=exif_bytes)
194 else:
195 pil_img.save(destination, "jpeg")
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/catalog/api/views/image_views.py b/api/catalog/api/views/image_views.py
--- a/api/catalog/api/views/image_views.py
+++ b/api/catalog/api/views/image_views.py
@@ -1,4 +1,5 @@
import io
+import re
import struct
from django.conf import settings
@@ -111,6 +112,13 @@
if not image_url:
raise get_api_exception("Could not find image.", 404)
+ # Hotfix to use scaled down version of the image from SMK
+ # TODO Remove when this issue is addressed:
+ # TODO https://github.com/WordPress/openverse-catalog/issues/698
+ if "iip.smk.dk" in image_url:
+ width = settings.THUMBNAIL_WIDTH_PX
+ image_url = re.sub(r"!\d+,", f"!{width},", image_url)
+
return super().thumbnail(image_url, request)
@action(detail=True, url_path="watermark", url_name="watermark")
|
{"golden_diff": "diff --git a/api/catalog/api/views/image_views.py b/api/catalog/api/views/image_views.py\n--- a/api/catalog/api/views/image_views.py\n+++ b/api/catalog/api/views/image_views.py\n@@ -1,4 +1,5 @@\n import io\n+import re\n import struct\n \n from django.conf import settings\n@@ -111,6 +112,13 @@\n if not image_url:\n raise get_api_exception(\"Could not find image.\", 404)\n \n+ # Hotfix to use scaled down version of the image from SMK\n+ # TODO Remove when this issue is addressed:\n+ # TODO https://github.com/WordPress/openverse-catalog/issues/698\n+ if \"iip.smk.dk\" in image_url:\n+ width = settings.THUMBNAIL_WIDTH_PX\n+ image_url = re.sub(r\"!\\d+,\", f\"!{width},\", image_url)\n+\n return super().thumbnail(image_url, request)\n \n @action(detail=True, url_path=\"watermark\", url_name=\"watermark\")\n", "issue": "Use smaller image for generating thumbnails for SMK\n## Description\r\n<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->\r\n<!-- For example: \"I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page.\" -->\r\nImages linked in the `image_url`s for the SMK provider are all ~2MB. These are large enough to overload our thumbnail service. Some of these requests timeout, such that the frontend falls back to downloading the entire full image. The result is unacceptable load times.\r\n\r\nWe should update the API to detect the SMK provider and modify the URL used for thumbnail generation.\r\n\r\n`image_url` that we receive has the form: https://iip.smk.dk/iiif/jp2/KKSgb5100_34.TIF.jp2/full/!2048,/0/default.jpg Note the`2048`.\r\n\r\nThe modified URL should be identical, except for a reduced filesize: https://iip.smk.dk/iiif/jp2/KKSgb5100_34.TIF.jp2/full/!400,/0/default.jpg\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem here; or delete the section entirely. -->\r\nThis change should be a temporary quick-fix that will allow us to re-enable the SMK provider, so we can make these images available again as soon as possible.\r\n\r\nIn the longer term, https://github.com/WordPress/openverse-catalog/issues/698 tracks updating the provider to provide links to smaller files for the purpose of thumbnail generation.\r\n\r\n## Resolution\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [ ] \ud83d\ude4b I would be interested in resolving this bug.\r\n\n", "before_files": [{"content": "import io\nimport struct\n\nfrom django.conf import settings\nfrom django.http.response import FileResponse, HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\n\nimport piexif\nimport requests\nfrom drf_yasg.utils import swagger_auto_schema\nfrom PIL import Image as PILImage\n\nfrom catalog.api.constants.media_types import IMAGE_TYPE\nfrom catalog.api.docs.image_docs import (\n ImageComplain,\n ImageDetail,\n ImageOembed,\n ImageRelated,\n ImageSearch,\n ImageStats,\n ImageThumbnail,\n)\nfrom catalog.api.models import Image\nfrom catalog.api.serializers.image_serializers import (\n ImageReportRequestSerializer,\n ImageSearchRequestSerializer,\n ImageSerializer,\n OembedRequestSerializer,\n OembedSerializer,\n WatermarkRequestSerializer,\n)\nfrom catalog.api.serializers.media_serializers import MediaThumbnailRequestSerializer\nfrom catalog.api.utils.exceptions import get_api_exception\nfrom catalog.api.utils.throttle import (\n AnonThumbnailRateThrottle,\n OAuth2IdThumbnailRateThrottle,\n)\nfrom catalog.api.utils.watermark import watermark\nfrom catalog.api.views.media_views import MediaViewSet\n\n\n@method_decorator(swagger_auto_schema(**ImageSearch.swagger_setup), \"list\")\n@method_decorator(swagger_auto_schema(**ImageStats.swagger_setup), \"stats\")\n@method_decorator(swagger_auto_schema(**ImageDetail.swagger_setup), \"retrieve\")\n@method_decorator(swagger_auto_schema(**ImageRelated.swagger_setup), \"related\")\n@method_decorator(swagger_auto_schema(**ImageComplain.swagger_setup), \"report\")\n@method_decorator(swagger_auto_schema(**ImageOembed.swagger_setup), \"oembed\")\n@method_decorator(swagger_auto_schema(**ImageThumbnail.swagger_setup), \"thumbnail\")\n@method_decorator(swagger_auto_schema(auto_schema=None), \"watermark\")\nclass ImageViewSet(MediaViewSet):\n \"\"\"\n Viewset for all endpoints pertaining to images.\n \"\"\"\n\n model_class = Image\n query_serializer_class = ImageSearchRequestSerializer\n default_index = settings.MEDIA_INDEX_MAPPING[IMAGE_TYPE]\n qa_index = \"search-qa-image\"\n\n serializer_class = ImageSerializer\n\n OEMBED_HEADERS = {\n \"User-Agent\": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose=\"OEmbed\"),\n }\n\n # Extra actions\n\n @action(\n detail=False,\n url_path=\"oembed\",\n url_name=\"oembed\",\n serializer_class=OembedSerializer,\n )\n def oembed(self, request, *_, **__):\n params = OembedRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n context = self.get_serializer_context()\n\n url = params.validated_data[\"url\"]\n identifier = url.rsplit(\"/\", 1)[1]\n try:\n image = self.get_queryset().get(identifier=identifier)\n except Image.DoesNotExist:\n return get_api_exception(\"Could not find image.\", 404)\n if not (image.height and image.width):\n image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)\n width, height = PILImage.open(io.BytesIO(image_file.content)).size\n context |= {\n \"width\": width,\n \"height\": height,\n }\n\n serializer = self.get_serializer(image, context=context)\n return Response(data=serializer.data)\n\n @action(\n detail=True,\n url_path=\"thumb\",\n url_name=\"thumb\",\n serializer_class=MediaThumbnailRequestSerializer,\n throttle_classes=[AnonThumbnailRateThrottle, OAuth2IdThumbnailRateThrottle],\n )\n def thumbnail(self, request, *_, **__):\n image = self.get_object()\n\n image_url = image.url\n if not image_url:\n raise get_api_exception(\"Could not find image.\", 404)\n\n return super().thumbnail(image_url, request)\n\n @action(detail=True, url_path=\"watermark\", url_name=\"watermark\")\n def watermark(self, request, *_, **__):\n if not settings.WATERMARK_ENABLED:\n raise NotFound(\"The watermark feature is currently disabled.\")\n\n params = WatermarkRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n image = self.get_object()\n image_url = image.url\n image_info = {\n attr: getattr(image, attr)\n for attr in [\"title\", \"creator\", \"license\", \"license_version\"]\n }\n\n # Create the actual watermarked image.\n watermarked, exif = watermark(image_url, image_info, params.data[\"watermark\"])\n # Re-insert EXIF metadata.\n if exif:\n # piexif dump raises InvalidImageDataError which is a child class\n # of ValueError, and a struct error when the value is not\n # between -2147483648 and 2147483647\n # https://github.com/WordPress/openverse-api/issues/849\n try:\n exif_bytes = piexif.dump(exif)\n except (struct.error, ValueError):\n exif_bytes = None\n else:\n exif_bytes = None\n img_bytes = io.BytesIO()\n self._save_wrapper(watermarked, exif_bytes, img_bytes)\n\n if params.data[\"embed_metadata\"]:\n # Embed ccREL metadata with XMP.\n work_properties = {\n \"creator\": image.creator,\n \"license_url\": image.license_url,\n \"attribution\": image.attribution,\n \"work_landing_page\": image.foreign_landing_url,\n \"identifier\": str(image.identifier),\n }\n\n # Import inside a function to allow server run without Exempi library\n import libxmp\n\n from catalog.api.utils import ccrel\n\n try:\n with_xmp = ccrel.embed_xmp_bytes(img_bytes, work_properties)\n return FileResponse(with_xmp, content_type=\"image/jpeg\")\n except (libxmp.XMPError, AttributeError):\n # Just send the EXIF-ified file if libxmp fails to add metadata\n response = HttpResponse(content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n else:\n response = HttpResponse(img_bytes, content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n\n @action(\n detail=True,\n methods=[\"post\"],\n serializer_class=ImageReportRequestSerializer,\n )\n def report(self, *args, **kwargs):\n return super().report(*args, **kwargs)\n\n # Helper functions\n\n @staticmethod\n def _save_wrapper(pil_img, exif_bytes, destination):\n \"\"\"\n PIL crashes if exif_bytes=None, so we have to wrap it to avoid littering\n the code with branches.\n \"\"\"\n if exif_bytes:\n pil_img.save(destination, \"jpeg\", exif=exif_bytes)\n else:\n pil_img.save(destination, \"jpeg\")\n", "path": "api/catalog/api/views/image_views.py"}], "after_files": [{"content": "import io\nimport re\nimport struct\n\nfrom django.conf import settings\nfrom django.http.response import FileResponse, HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\n\nimport piexif\nimport requests\nfrom drf_yasg.utils import swagger_auto_schema\nfrom PIL import Image as PILImage\n\nfrom catalog.api.constants.media_types import IMAGE_TYPE\nfrom catalog.api.docs.image_docs import (\n ImageComplain,\n ImageDetail,\n ImageOembed,\n ImageRelated,\n ImageSearch,\n ImageStats,\n ImageThumbnail,\n)\nfrom catalog.api.models import Image\nfrom catalog.api.serializers.image_serializers import (\n ImageReportRequestSerializer,\n ImageSearchRequestSerializer,\n ImageSerializer,\n OembedRequestSerializer,\n OembedSerializer,\n WatermarkRequestSerializer,\n)\nfrom catalog.api.serializers.media_serializers import MediaThumbnailRequestSerializer\nfrom catalog.api.utils.exceptions import get_api_exception\nfrom catalog.api.utils.throttle import (\n AnonThumbnailRateThrottle,\n OAuth2IdThumbnailRateThrottle,\n)\nfrom catalog.api.utils.watermark import watermark\nfrom catalog.api.views.media_views import MediaViewSet\n\n\n@method_decorator(swagger_auto_schema(**ImageSearch.swagger_setup), \"list\")\n@method_decorator(swagger_auto_schema(**ImageStats.swagger_setup), \"stats\")\n@method_decorator(swagger_auto_schema(**ImageDetail.swagger_setup), \"retrieve\")\n@method_decorator(swagger_auto_schema(**ImageRelated.swagger_setup), \"related\")\n@method_decorator(swagger_auto_schema(**ImageComplain.swagger_setup), \"report\")\n@method_decorator(swagger_auto_schema(**ImageOembed.swagger_setup), \"oembed\")\n@method_decorator(swagger_auto_schema(**ImageThumbnail.swagger_setup), \"thumbnail\")\n@method_decorator(swagger_auto_schema(auto_schema=None), \"watermark\")\nclass ImageViewSet(MediaViewSet):\n \"\"\"\n Viewset for all endpoints pertaining to images.\n \"\"\"\n\n model_class = Image\n query_serializer_class = ImageSearchRequestSerializer\n default_index = settings.MEDIA_INDEX_MAPPING[IMAGE_TYPE]\n qa_index = \"search-qa-image\"\n\n serializer_class = ImageSerializer\n\n OEMBED_HEADERS = {\n \"User-Agent\": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose=\"OEmbed\"),\n }\n\n # Extra actions\n\n @action(\n detail=False,\n url_path=\"oembed\",\n url_name=\"oembed\",\n serializer_class=OembedSerializer,\n )\n def oembed(self, request, *_, **__):\n params = OembedRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n context = self.get_serializer_context()\n\n url = params.validated_data[\"url\"]\n identifier = url.rsplit(\"/\", 1)[1]\n try:\n image = self.get_queryset().get(identifier=identifier)\n except Image.DoesNotExist:\n return get_api_exception(\"Could not find image.\", 404)\n if not (image.height and image.width):\n image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)\n width, height = PILImage.open(io.BytesIO(image_file.content)).size\n context |= {\n \"width\": width,\n \"height\": height,\n }\n\n serializer = self.get_serializer(image, context=context)\n return Response(data=serializer.data)\n\n @action(\n detail=True,\n url_path=\"thumb\",\n url_name=\"thumb\",\n serializer_class=MediaThumbnailRequestSerializer,\n throttle_classes=[AnonThumbnailRateThrottle, OAuth2IdThumbnailRateThrottle],\n )\n def thumbnail(self, request, *_, **__):\n image = self.get_object()\n\n image_url = image.url\n if not image_url:\n raise get_api_exception(\"Could not find image.\", 404)\n\n # Hotfix to use scaled down version of the image from SMK\n # TODO Remove when this issue is addressed:\n # TODO https://github.com/WordPress/openverse-catalog/issues/698\n if \"iip.smk.dk\" in image_url:\n width = settings.THUMBNAIL_WIDTH_PX\n image_url = re.sub(r\"!\\d+,\", f\"!{width},\", image_url)\n\n return super().thumbnail(image_url, request)\n\n @action(detail=True, url_path=\"watermark\", url_name=\"watermark\")\n def watermark(self, request, *_, **__):\n if not settings.WATERMARK_ENABLED:\n raise NotFound(\"The watermark feature is currently disabled.\")\n\n params = WatermarkRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n image = self.get_object()\n image_url = image.url\n image_info = {\n attr: getattr(image, attr)\n for attr in [\"title\", \"creator\", \"license\", \"license_version\"]\n }\n\n # Create the actual watermarked image.\n watermarked, exif = watermark(image_url, image_info, params.data[\"watermark\"])\n # Re-insert EXIF metadata.\n if exif:\n # piexif dump raises InvalidImageDataError which is a child class\n # of ValueError, and a struct error when the value is not\n # between -2147483648 and 2147483647\n # https://github.com/WordPress/openverse-api/issues/849\n try:\n exif_bytes = piexif.dump(exif)\n except (struct.error, ValueError):\n exif_bytes = None\n else:\n exif_bytes = None\n img_bytes = io.BytesIO()\n self._save_wrapper(watermarked, exif_bytes, img_bytes)\n\n if params.data[\"embed_metadata\"]:\n # Embed ccREL metadata with XMP.\n work_properties = {\n \"creator\": image.creator,\n \"license_url\": image.license_url,\n \"attribution\": image.attribution,\n \"work_landing_page\": image.foreign_landing_url,\n \"identifier\": str(image.identifier),\n }\n\n # Import inside a function to allow server run without Exempi library\n import libxmp\n\n from catalog.api.utils import ccrel\n\n try:\n with_xmp = ccrel.embed_xmp_bytes(img_bytes, work_properties)\n return FileResponse(with_xmp, content_type=\"image/jpeg\")\n except (libxmp.XMPError, AttributeError):\n # Just send the EXIF-ified file if libxmp fails to add metadata\n response = HttpResponse(content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n else:\n response = HttpResponse(img_bytes, content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n\n @action(\n detail=True,\n methods=[\"post\"],\n serializer_class=ImageReportRequestSerializer,\n )\n def report(self, *args, **kwargs):\n return super().report(*args, **kwargs)\n\n # Helper functions\n\n @staticmethod\n def _save_wrapper(pil_img, exif_bytes, destination):\n \"\"\"\n PIL crashes if exif_bytes=None, so we have to wrap it to avoid littering\n the code with branches.\n \"\"\"\n if exif_bytes:\n pil_img.save(destination, \"jpeg\", exif=exif_bytes)\n else:\n pil_img.save(destination, \"jpeg\")\n", "path": "api/catalog/api/views/image_views.py"}]}
| 2,599 | 232 |
gh_patches_debug_6310
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-1421
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPI tarball missing required files
### Describe the bug
The tarball uploaded to PyPI does not contain `requirements/*` files which are required to run the `setup.py` file.
### Reproduction steps
```bash
$ wget https://files.pythonhosted.org/packages/source/k/kornia/kornia-0.6.0.tar.gz
$ tar zxf kornia-0.6.0.tar.gz
$ cd kornia-0.6.0
$ python setup.py install
...
Traceback (most recent call last):
File "setup.py", line 43, in <module>
"x": load_requirements("requirements/x.txt"),
File "setup.py", line 38, in load_requirements
with open(filename) as f:
FileNotFoundError: [Errno 2] No such file or directory: 'requirements/x.txt'
```
### Expected behavior
I would expect the `setup.py` to function correctly. I believe there's a setuptools option to control which files get included in the upload tarball.
### Environment
```shell
- PyTorch Version (e.g., 1.0): 1.10
- OS (e.g., Linux): macOS
- How you installed PyTorch (`conda`, `pip`, source): `spack`
- Build command you used (if compiling from source): `python setup.py install`
- Python version: 3.8.11
- CUDA/cuDNN version: N/A
- GPU models and configuration: N/A
- Any other relevant information: N/A
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Welcome to the Kornia setup.py.
2 #
3 import re
4 import sys
5
6 # Make sure that kornia is running on Python 3.6.0 or later
7 # (to avoid running into this bug: https://bugs.python.org/issue29246)
8
9 if sys.version_info < (3, 6, 0):
10 raise RuntimeError("Kornia requires Python 3.6.0 or later.")
11
12
13 from setuptools import find_packages, setup
14
15
16 def find_version(file_path: str) -> str:
17 version_file = open(file_path).read()
18 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
19 if not version_match:
20 raise RuntimeError(f"Unable to find version string in {file_path}")
21 return version_match.group(1)
22
23
24 VERSION = find_version("kornia/_version.py")
25
26
27 # NOTE: kornia MUST only require PyTorch
28 requirements = [
29 'torch>=1.8.1', 'packaging',
30 ]
31
32 # open readme file and set long description
33 with open("README.md", encoding="utf-8") as fh:
34 long_description = fh.read()
35
36
37 def load_requirements(filename: str):
38 with open(filename) as f:
39 return [x.strip() for x in f.readlines() if "-r" != x[0:2]]
40
41
42 requirements_extras = {
43 "x": load_requirements("requirements/x.txt"),
44 "dev": load_requirements("requirements/dev.txt")
45 }
46 requirements_extras["all"] = requirements_extras["x"] + requirements_extras["dev"]
47
48
49 if __name__ == '__main__':
50 setup(
51 name='kornia',
52 version=VERSION,
53 author='Edgar Riba',
54 author_email='[email protected]',
55 url='https://www.kornia.org',
56 download_url='https://github.com/kornia/kornia',
57 license='Apache License 2.0',
58 description='Open Source Differentiable Computer Vision Library for PyTorch',
59 long_description=long_description,
60 long_description_content_type='text/markdown',
61 python_requires='>=3.6',
62 setup_requires=['pytest-runner'],
63 tests_require=['pytest'],
64 packages=find_packages(exclude=('docs', 'test', 'examples')),
65 package_data={"kornia": ["py.typed"]},
66 zip_safe=True,
67 install_requires=requirements,
68 extras_require=requirements_extras,
69 keywords=['computer vision', 'deep learning', 'pytorch'],
70 project_urls={
71 "Bug Tracker": "https://github.com/kornia/kornia/issues",
72 "Documentation": "https://kornia.readthedocs.io/en/latest",
73 "Source Code": "https://github.com/kornia/kornia",
74 },
75 classifiers=[
76 'Environment :: GPU',
77 'Environment :: Console',
78 'Natural Language :: English',
79 # How mature is this project? Common values are
80 # 3 - Alpha, 4 - Beta, 5 - Production/Stable
81 'Development Status :: 4 - Beta',
82 # Indicate who your project is intended for
83 'Intended Audience :: Developers',
84 'Intended Audience :: Education',
85 'Intended Audience :: Science/Research',
86 'Intended Audience :: Information Technology',
87 'Topic :: Software Development :: Libraries',
88 'Topic :: Scientific/Engineering :: Artificial Intelligence',
89 'Topic :: Scientific/Engineering :: Image Processing',
90 # Pick your license as you wish
91 'License :: OSI Approved :: Apache Software License',
92 'Operating System :: OS Independent',
93 # Specify the Python versions you support here. In particular, ensure
94 # that you indicate whether you support Python 2, Python 3 or both.
95 'Programming Language :: Python :: 3',
96 'Programming Language :: Python :: 3.6',
97 'Programming Language :: Python :: 3.7',
98 'Programming Language :: Python :: 3.8',
99 ],
100 )
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,6 +63,7 @@
tests_require=['pytest'],
packages=find_packages(exclude=('docs', 'test', 'examples')),
package_data={"kornia": ["py.typed"]},
+ data_files=[('', ['requirements/x.txt', 'requirements/dev.txt'])],
zip_safe=True,
install_requires=requirements,
extras_require=requirements_extras,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,6 +63,7 @@\n tests_require=['pytest'],\n packages=find_packages(exclude=('docs', 'test', 'examples')),\n package_data={\"kornia\": [\"py.typed\"]},\n+ data_files=[('', ['requirements/x.txt', 'requirements/dev.txt'])],\n zip_safe=True,\n install_requires=requirements,\n extras_require=requirements_extras,\n", "issue": "PyPI tarball missing required files\n### Describe the bug\r\n\r\nThe tarball uploaded to PyPI does not contain `requirements/*` files which are required to run the `setup.py` file.\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\n$ wget https://files.pythonhosted.org/packages/source/k/kornia/kornia-0.6.0.tar.gz\r\n$ tar zxf kornia-0.6.0.tar.gz\r\n$ cd kornia-0.6.0\r\n$ python setup.py install\r\n...\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 43, in <module>\r\n \"x\": load_requirements(\"requirements/x.txt\"),\r\n File \"setup.py\", line 38, in load_requirements\r\n with open(filename) as f:\r\nFileNotFoundError: [Errno 2] No such file or directory: 'requirements/x.txt'\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nI would expect the `setup.py` to function correctly. I believe there's a setuptools option to control which files get included in the upload tarball.\r\n\r\n### Environment\r\n\r\n```shell\r\n- PyTorch Version (e.g., 1.0): 1.10\r\n- OS (e.g., Linux): macOS\r\n- How you installed PyTorch (`conda`, `pip`, source): `spack`\r\n- Build command you used (if compiling from source): `python setup.py install`\r\n- Python version: 3.8.11\r\n- CUDA/cuDNN version: N/A\r\n- GPU models and configuration: N/A\r\n- Any other relevant information: N/A\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "# Welcome to the Kornia setup.py.\n#\nimport re\nimport sys\n\n# Make sure that kornia is running on Python 3.6.0 or later\n# (to avoid running into this bug: https://bugs.python.org/issue29246)\n\nif sys.version_info < (3, 6, 0):\n raise RuntimeError(\"Kornia requires Python 3.6.0 or later.\")\n\n\nfrom setuptools import find_packages, setup\n\n\ndef find_version(file_path: str) -> str:\n version_file = open(file_path).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if not version_match:\n raise RuntimeError(f\"Unable to find version string in {file_path}\")\n return version_match.group(1)\n\n\nVERSION = find_version(\"kornia/_version.py\")\n\n\n# NOTE: kornia MUST only require PyTorch\nrequirements = [\n 'torch>=1.8.1', 'packaging',\n]\n\n# open readme file and set long description\nwith open(\"README.md\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\n\ndef load_requirements(filename: str):\n with open(filename) as f:\n return [x.strip() for x in f.readlines() if \"-r\" != x[0:2]]\n\n\nrequirements_extras = {\n \"x\": load_requirements(\"requirements/x.txt\"),\n \"dev\": load_requirements(\"requirements/dev.txt\")\n}\nrequirements_extras[\"all\"] = requirements_extras[\"x\"] + requirements_extras[\"dev\"]\n\n\nif __name__ == '__main__':\n setup(\n name='kornia',\n version=VERSION,\n author='Edgar Riba',\n author_email='[email protected]',\n url='https://www.kornia.org',\n download_url='https://github.com/kornia/kornia',\n license='Apache License 2.0',\n description='Open Source Differentiable Computer Vision Library for PyTorch',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires='>=3.6',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n packages=find_packages(exclude=('docs', 'test', 'examples')),\n package_data={\"kornia\": [\"py.typed\"]},\n zip_safe=True,\n install_requires=requirements,\n extras_require=requirements_extras,\n keywords=['computer vision', 'deep learning', 'pytorch'],\n project_urls={\n \"Bug Tracker\": \"https://github.com/kornia/kornia/issues\",\n \"Documentation\": \"https://kornia.readthedocs.io/en/latest\",\n \"Source Code\": \"https://github.com/kornia/kornia\",\n },\n classifiers=[\n 'Environment :: GPU',\n 'Environment :: Console',\n 'Natural Language :: English',\n # How mature is this project? Common values are\n # 3 - Alpha, 4 - Beta, 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Image Processing',\n # Pick your license as you wish\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n )\n", "path": "setup.py"}], "after_files": [{"content": "# Welcome to the Kornia setup.py.\n#\nimport re\nimport sys\n\n# Make sure that kornia is running on Python 3.6.0 or later\n# (to avoid running into this bug: https://bugs.python.org/issue29246)\n\nif sys.version_info < (3, 6, 0):\n raise RuntimeError(\"Kornia requires Python 3.6.0 or later.\")\n\n\nfrom setuptools import find_packages, setup\n\n\ndef find_version(file_path: str) -> str:\n version_file = open(file_path).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if not version_match:\n raise RuntimeError(f\"Unable to find version string in {file_path}\")\n return version_match.group(1)\n\n\nVERSION = find_version(\"kornia/_version.py\")\n\n\n# NOTE: kornia MUST only require PyTorch\nrequirements = [\n 'torch>=1.8.1', 'packaging',\n]\n\n# open readme file and set long description\nwith open(\"README.md\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\n\ndef load_requirements(filename: str):\n with open(filename) as f:\n return [x.strip() for x in f.readlines() if \"-r\" != x[0:2]]\n\n\nrequirements_extras = {\n \"x\": load_requirements(\"requirements/x.txt\"),\n \"dev\": load_requirements(\"requirements/dev.txt\")\n}\nrequirements_extras[\"all\"] = requirements_extras[\"x\"] + requirements_extras[\"dev\"]\n\n\nif __name__ == '__main__':\n setup(\n name='kornia',\n version=VERSION,\n author='Edgar Riba',\n author_email='[email protected]',\n url='https://www.kornia.org',\n download_url='https://github.com/kornia/kornia',\n license='Apache License 2.0',\n description='Open Source Differentiable Computer Vision Library for PyTorch',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires='>=3.6',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n packages=find_packages(exclude=('docs', 'test', 'examples')),\n package_data={\"kornia\": [\"py.typed\"]},\n data_files=[('', ['requirements/x.txt', 'requirements/dev.txt'])],\n zip_safe=True,\n install_requires=requirements,\n extras_require=requirements_extras,\n keywords=['computer vision', 'deep learning', 'pytorch'],\n project_urls={\n \"Bug Tracker\": \"https://github.com/kornia/kornia/issues\",\n \"Documentation\": \"https://kornia.readthedocs.io/en/latest\",\n \"Source Code\": \"https://github.com/kornia/kornia\",\n },\n classifiers=[\n 'Environment :: GPU',\n 'Environment :: Console',\n 'Natural Language :: English',\n # How mature is this project? Common values are\n # 3 - Alpha, 4 - Beta, 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Image Processing',\n # Pick your license as you wish\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n )\n", "path": "setup.py"}]}
| 1,654 | 103 |
gh_patches_debug_16698
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-2462
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Outdoor - Orientations
- [x] Ajouter les noms des champs dans les filtres
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mapentity/filters.py`
Content:
```
1 from django.db.models.fields.related import ManyToOneRel
2 from django.conf import settings
3
4 from django_filters import FilterSet, Filter
5 from django_filters.filterset import get_model_field
6 from django.contrib.gis import forms
7
8 from .settings import app_settings, API_SRID
9 from .widgets import HiddenGeometryWidget
10
11
12 class PolygonFilter(Filter):
13
14 field_class = forms.PolygonField
15
16 def __init__(self, *args, **kwargs):
17 kwargs.setdefault('field_name', app_settings['GEOM_FIELD_NAME'])
18 kwargs.setdefault('widget', HiddenGeometryWidget)
19 kwargs.setdefault('lookup_expr', 'intersects')
20 super(PolygonFilter, self).__init__(*args, **kwargs)
21
22
23 class PythonPolygonFilter(PolygonFilter):
24
25 def filter(self, qs, value):
26 if not value:
27 return qs
28 if not value.srid:
29 value.srid = API_SRID
30 value.transform(settings.SRID)
31 filtered = []
32 for o in qs.all():
33 geom = getattr(o, self.field_name)
34 if geom and geom.valid and not geom.empty:
35 if getattr(geom, self.lookup_expr)(value):
36 filtered.append(o.pk)
37 else:
38 filtered.append(o.pk)
39 return qs.filter(pk__in=filtered)
40
41
42 class BaseMapEntityFilterSet(FilterSet):
43 def __init__(self, *args, **kwargs):
44 super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)
45 self.__bypass_labels()
46
47 def __bypass_labels(self):
48 """
49 These hacks allow to bypass field labels. Using either placeholders,
50 empty choices label, etc. This allows to greatly save space in form layout,
51 which is required for concise filter forms.
52 """
53 for fieldname in self.base_filters.keys():
54 field = self.form.fields[fieldname]
55 if isinstance(field, forms.MultiValueField):
56 for i, widget in enumerate(field.widget.widgets):
57 self.__set_placeholder(field.fields[i], widget)
58 elif isinstance(field, forms.ChoiceField):
59 field.empty_label = field.label
60 self.__set_placeholder(field, field.widget)
61 elif isinstance(field, forms.NullBooleanField):
62 choices = [(u'1', field.label)] + field.widget.choices[1:]
63 field.widget.choices = choices
64 self.__set_placeholder(field, field.widget)
65 else:
66 self.__set_placeholder(field, field.widget)
67
68 def __set_placeholder(self, field, widget):
69 field.help_text = '' # Hide help text
70 widget.attrs['placeholder'] = field.label
71 widget.attrs['data-placeholder'] = field.label
72 widget.attrs['title'] = field.label
73 widget.attrs['data-label'] = field.label
74
75 @classmethod
76 def add_filter(cls, name, filter_=None):
77 field = get_model_field(cls._meta.model, name)
78 if filter_ is None:
79 if isinstance(field, ManyToOneRel):
80 filter_ = cls.filter_for_reverse_field(field, name)
81 else:
82 filter_ = cls.filter_for_field(field, name)
83 cls.base_filters[name] = filter_
84
85 @classmethod
86 def add_filters(cls, filters):
87 for name, filter_ in filters.items():
88 filter_.field_name = name
89 cls.add_filter(name, filter_)
90
91
92 class MapEntityFilterSet(BaseMapEntityFilterSet):
93 bbox = PolygonFilter()
94
95 class Meta:
96 fields = ['bbox']
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mapentity/filters.py b/mapentity/filters.py
--- a/mapentity/filters.py
+++ b/mapentity/filters.py
@@ -2,6 +2,7 @@
from django.conf import settings
from django_filters import FilterSet, Filter
+from django_filters.fields import ChoiceField
from django_filters.filterset import get_model_field
from django.contrib.gis import forms
@@ -42,6 +43,9 @@
class BaseMapEntityFilterSet(FilterSet):
def __init__(self, *args, **kwargs):
super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)
+ for filter_ in self.filters.values():
+ if filter_.field_class == ChoiceField:
+ filter_.extra.setdefault('empty_label', filter_.label)
self.__bypass_labels()
def __bypass_labels(self):
|
{"golden_diff": "diff --git a/mapentity/filters.py b/mapentity/filters.py\n--- a/mapentity/filters.py\n+++ b/mapentity/filters.py\n@@ -2,6 +2,7 @@\n from django.conf import settings\n \n from django_filters import FilterSet, Filter\n+from django_filters.fields import ChoiceField\n from django_filters.filterset import get_model_field\n from django.contrib.gis import forms\n \n@@ -42,6 +43,9 @@\n class BaseMapEntityFilterSet(FilterSet):\n def __init__(self, *args, **kwargs):\n super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)\n+ for filter_ in self.filters.values():\n+ if filter_.field_class == ChoiceField:\n+ filter_.extra.setdefault('empty_label', filter_.label)\n self.__bypass_labels()\n \n def __bypass_labels(self):\n", "issue": "Outdoor - Orientations\n- [x] Ajouter les noms des champs dans les filtres\n", "before_files": [{"content": "from django.db.models.fields.related import ManyToOneRel\nfrom django.conf import settings\n\nfrom django_filters import FilterSet, Filter\nfrom django_filters.filterset import get_model_field\nfrom django.contrib.gis import forms\n\nfrom .settings import app_settings, API_SRID\nfrom .widgets import HiddenGeometryWidget\n\n\nclass PolygonFilter(Filter):\n\n field_class = forms.PolygonField\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('field_name', app_settings['GEOM_FIELD_NAME'])\n kwargs.setdefault('widget', HiddenGeometryWidget)\n kwargs.setdefault('lookup_expr', 'intersects')\n super(PolygonFilter, self).__init__(*args, **kwargs)\n\n\nclass PythonPolygonFilter(PolygonFilter):\n\n def filter(self, qs, value):\n if not value:\n return qs\n if not value.srid:\n value.srid = API_SRID\n value.transform(settings.SRID)\n filtered = []\n for o in qs.all():\n geom = getattr(o, self.field_name)\n if geom and geom.valid and not geom.empty:\n if getattr(geom, self.lookup_expr)(value):\n filtered.append(o.pk)\n else:\n filtered.append(o.pk)\n return qs.filter(pk__in=filtered)\n\n\nclass BaseMapEntityFilterSet(FilterSet):\n def __init__(self, *args, **kwargs):\n super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)\n self.__bypass_labels()\n\n def __bypass_labels(self):\n \"\"\"\n These hacks allow to bypass field labels. Using either placeholders,\n empty choices label, etc. This allows to greatly save space in form layout,\n which is required for concise filter forms.\n \"\"\"\n for fieldname in self.base_filters.keys():\n field = self.form.fields[fieldname]\n if isinstance(field, forms.MultiValueField):\n for i, widget in enumerate(field.widget.widgets):\n self.__set_placeholder(field.fields[i], widget)\n elif isinstance(field, forms.ChoiceField):\n field.empty_label = field.label\n self.__set_placeholder(field, field.widget)\n elif isinstance(field, forms.NullBooleanField):\n choices = [(u'1', field.label)] + field.widget.choices[1:]\n field.widget.choices = choices\n self.__set_placeholder(field, field.widget)\n else:\n self.__set_placeholder(field, field.widget)\n\n def __set_placeholder(self, field, widget):\n field.help_text = '' # Hide help text\n widget.attrs['placeholder'] = field.label\n widget.attrs['data-placeholder'] = field.label\n widget.attrs['title'] = field.label\n widget.attrs['data-label'] = field.label\n\n @classmethod\n def add_filter(cls, name, filter_=None):\n field = get_model_field(cls._meta.model, name)\n if filter_ is None:\n if isinstance(field, ManyToOneRel):\n filter_ = cls.filter_for_reverse_field(field, name)\n else:\n filter_ = cls.filter_for_field(field, name)\n cls.base_filters[name] = filter_\n\n @classmethod\n def add_filters(cls, filters):\n for name, filter_ in filters.items():\n filter_.field_name = name\n cls.add_filter(name, filter_)\n\n\nclass MapEntityFilterSet(BaseMapEntityFilterSet):\n bbox = PolygonFilter()\n\n class Meta:\n fields = ['bbox']\n", "path": "mapentity/filters.py"}], "after_files": [{"content": "from django.db.models.fields.related import ManyToOneRel\nfrom django.conf import settings\n\nfrom django_filters import FilterSet, Filter\nfrom django_filters.fields import ChoiceField\nfrom django_filters.filterset import get_model_field\nfrom django.contrib.gis import forms\n\nfrom .settings import app_settings, API_SRID\nfrom .widgets import HiddenGeometryWidget\n\n\nclass PolygonFilter(Filter):\n\n field_class = forms.PolygonField\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('field_name', app_settings['GEOM_FIELD_NAME'])\n kwargs.setdefault('widget', HiddenGeometryWidget)\n kwargs.setdefault('lookup_expr', 'intersects')\n super(PolygonFilter, self).__init__(*args, **kwargs)\n\n\nclass PythonPolygonFilter(PolygonFilter):\n\n def filter(self, qs, value):\n if not value:\n return qs\n if not value.srid:\n value.srid = API_SRID\n value.transform(settings.SRID)\n filtered = []\n for o in qs.all():\n geom = getattr(o, self.field_name)\n if geom and geom.valid and not geom.empty:\n if getattr(geom, self.lookup_expr)(value):\n filtered.append(o.pk)\n else:\n filtered.append(o.pk)\n return qs.filter(pk__in=filtered)\n\n\nclass BaseMapEntityFilterSet(FilterSet):\n def __init__(self, *args, **kwargs):\n super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)\n for filter_ in self.filters.values():\n if filter_.field_class == ChoiceField:\n filter_.extra.setdefault('empty_label', filter_.label)\n self.__bypass_labels()\n\n def __bypass_labels(self):\n \"\"\"\n These hacks allow to bypass field labels. Using either placeholders,\n empty choices label, etc. This allows to greatly save space in form layout,\n which is required for concise filter forms.\n \"\"\"\n for fieldname in self.base_filters.keys():\n field = self.form.fields[fieldname]\n if isinstance(field, forms.MultiValueField):\n for i, widget in enumerate(field.widget.widgets):\n self.__set_placeholder(field.fields[i], widget)\n elif isinstance(field, forms.ChoiceField):\n field.empty_label = field.label\n self.__set_placeholder(field, field.widget)\n elif isinstance(field, forms.NullBooleanField):\n choices = [(u'1', field.label)] + field.widget.choices[1:]\n field.widget.choices = choices\n self.__set_placeholder(field, field.widget)\n else:\n self.__set_placeholder(field, field.widget)\n\n def __set_placeholder(self, field, widget):\n field.help_text = '' # Hide help text\n widget.attrs['placeholder'] = field.label\n widget.attrs['data-placeholder'] = field.label\n widget.attrs['title'] = field.label\n widget.attrs['data-label'] = field.label\n\n @classmethod\n def add_filter(cls, name, filter_=None):\n field = get_model_field(cls._meta.model, name)\n if filter_ is None:\n if isinstance(field, ManyToOneRel):\n filter_ = cls.filter_for_reverse_field(field, name)\n else:\n filter_ = cls.filter_for_field(field, name)\n cls.base_filters[name] = filter_\n\n @classmethod\n def add_filters(cls, filters):\n for name, filter_ in filters.items():\n filter_.field_name = name\n cls.add_filter(name, filter_)\n\n\nclass MapEntityFilterSet(BaseMapEntityFilterSet):\n bbox = PolygonFilter()\n\n class Meta:\n fields = ['bbox']\n", "path": "mapentity/filters.py"}]}
| 1,188 | 186 |
gh_patches_debug_27824
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-976
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve Frequency
## 🚀 Feature
If we would like to log datapoints/second every 100 iterations, we most probably do like this
```python
wps_metric = Frequency(output_transformer=lambda x: x['ntokens'])
wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=100))
```
however, seems like this wont take into account all other iterations while computing the total number of tokens.
```python
class Frequency(Metric):
....
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(event_name, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
```
IMO, should be
```python
class Frequency(Metric):
....
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
```
cc @erip
Improve Frequency
## 🚀 Feature
If we would like to log datapoints/second every 100 iterations, we most probably do like this
```python
wps_metric = Frequency(output_transformer=lambda x: x['ntokens'])
wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=100))
```
however, seems like this wont take into account all other iterations while computing the total number of tokens.
```python
class Frequency(Metric):
....
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(event_name, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
```
IMO, should be
```python
class Frequency(Metric):
....
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
```
cc @erip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/frequency.py`
Content:
```
1 import torch
2 import torch.distributed as dist
3
4 from ignite.engine import Events
5 from ignite.metrics import Metric
6 from ignite.handlers.timing import Timer
7 from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced
8
9
10 class Frequency(Metric):
11 """Provides metrics for the number of examples processed per second.
12
13 Examples:
14
15 .. code-block:: python
16
17 # Compute number of tokens processed
18 wps_metric = Frequency(output_transform=lambda x: x['ntokens'])
19 wps_metric.attach(trainer, name='wps')
20 # Logging with TQDM
21 ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
22 # Progress bar will looks like
23 # Epoch [2/10]: [12/24] 50%|█████ , wps=400 [00:17<1:23]
24 """
25
26 def __init__(self, output_transform=lambda x: x, device=None):
27 self._timer = None
28 self._acc = None
29 self._n = None
30 self._elapsed = None
31 super(Frequency, self).__init__(output_transform=output_transform, device=device)
32
33 @reinit__is_reduced
34 def reset(self):
35 self._timer = Timer()
36 self._acc = 0
37 self._n = 0
38 self._elapsed = 0.0
39 super(Frequency, self).reset()
40
41 @reinit__is_reduced
42 def update(self, output):
43 self._acc += output
44 self._n = self._acc
45 self._elapsed = torch.tensor(self._timer.value(), device=self._device)
46
47 @sync_all_reduce("_n", "_elapsed")
48 def compute(self):
49 time_divisor = 1.0
50
51 if dist.is_available() and dist.is_initialized():
52 time_divisor *= dist.get_world_size()
53
54 # Returns the average processed objects per second across all workers
55 return self._n / self._elapsed.item() * time_divisor
56
57 def completed(self, engine, name):
58 engine.state.metrics[name] = int(self.compute())
59
60 def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
61 engine.add_event_handler(Events.EPOCH_STARTED, self.started)
62 engine.add_event_handler(event_name, self.iteration_completed)
63 engine.add_event_handler(event_name, self.completed, name)
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ignite/metrics/frequency.py b/ignite/metrics/frequency.py
--- a/ignite/metrics/frequency.py
+++ b/ignite/metrics/frequency.py
@@ -19,8 +19,21 @@
wps_metric.attach(trainer, name='wps')
# Logging with TQDM
ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
- # Progress bar will looks like
+ # Progress bar will look like
# Epoch [2/10]: [12/24] 50%|█████ , wps=400 [00:17<1:23]
+
+
+ To compute examples processed per second every 50th iteration:
+
+ .. code-block:: python
+
+ # Compute number of tokens processed
+ wps_metric = Frequency(output_transform=lambda x: x['ntokens'])
+ wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=50))
+ # Logging with TQDM
+ ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
+ # Progress bar will look like
+ # Epoch [2/10]: [50/100] 50%|█████ , wps=400 [00:17<00:35]
"""
def __init__(self, output_transform=lambda x: x, device=None):
@@ -59,5 +72,5 @@
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
- engine.add_event_handler(event_name, self.iteration_completed)
+ engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
|
{"golden_diff": "diff --git a/ignite/metrics/frequency.py b/ignite/metrics/frequency.py\n--- a/ignite/metrics/frequency.py\n+++ b/ignite/metrics/frequency.py\n@@ -19,8 +19,21 @@\n wps_metric.attach(trainer, name='wps')\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n- # Progress bar will looks like\n+ # Progress bar will look like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<1:23]\n+\n+\n+ To compute examples processed per second every 50th iteration:\n+\n+ .. code-block:: python\n+\n+ # Compute number of tokens processed\n+ wps_metric = Frequency(output_transform=lambda x: x['ntokens'])\n+ wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=50))\n+ # Logging with TQDM\n+ ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n+ # Progress bar will look like\n+ # Epoch [2/10]: [50/100] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<00:35]\n \"\"\"\n \n def __init__(self, output_transform=lambda x: x, device=None):\n@@ -59,5 +72,5 @@\n \n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n- engine.add_event_handler(event_name, self.iteration_completed)\n+ engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\n engine.add_event_handler(event_name, self.completed, name)\n", "issue": "Improve Frequency\n## \ud83d\ude80 Feature\r\n\r\nIf we would like to log datapoints/second every 100 iterations, we most probably do like this \r\n```python\r\nwps_metric = Frequency(output_transformer=lambda x: x['ntokens'])\r\nwps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=100))\r\n```\r\nhowever, seems like this wont take into account all other iterations while computing the total number of tokens.\r\n```python\r\nclass Frequency(Metric):\r\n ....\r\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\r\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\r\n engine.add_event_handler(event_name, self.iteration_completed)\r\n engine.add_event_handler(event_name, self.completed, name)\r\n```\r\nIMO, should be \r\n```python\r\nclass Frequency(Metric):\r\n ....\r\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\r\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\r\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\r\n engine.add_event_handler(event_name, self.completed, name)\r\n```\r\n\r\ncc @erip \r\n\nImprove Frequency\n## \ud83d\ude80 Feature\r\n\r\nIf we would like to log datapoints/second every 100 iterations, we most probably do like this \r\n```python\r\nwps_metric = Frequency(output_transformer=lambda x: x['ntokens'])\r\nwps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=100))\r\n```\r\nhowever, seems like this wont take into account all other iterations while computing the total number of tokens.\r\n```python\r\nclass Frequency(Metric):\r\n ....\r\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\r\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\r\n engine.add_event_handler(event_name, self.iteration_completed)\r\n engine.add_event_handler(event_name, self.completed, name)\r\n```\r\nIMO, should be \r\n```python\r\nclass Frequency(Metric):\r\n ....\r\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\r\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\r\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\r\n engine.add_event_handler(event_name, self.completed, name)\r\n```\r\n\r\ncc @erip \r\n\n", "before_files": [{"content": "import torch\nimport torch.distributed as dist\n\nfrom ignite.engine import Events\nfrom ignite.metrics import Metric\nfrom ignite.handlers.timing import Timer\nfrom ignite.metrics.metric import sync_all_reduce, reinit__is_reduced\n\n\nclass Frequency(Metric):\n \"\"\"Provides metrics for the number of examples processed per second.\n\n Examples:\n\n .. code-block:: python\n\n # Compute number of tokens processed\n wps_metric = Frequency(output_transform=lambda x: x['ntokens'])\n wps_metric.attach(trainer, name='wps')\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n # Progress bar will looks like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<1:23]\n \"\"\"\n\n def __init__(self, output_transform=lambda x: x, device=None):\n self._timer = None\n self._acc = None\n self._n = None\n self._elapsed = None\n super(Frequency, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self):\n self._timer = Timer()\n self._acc = 0\n self._n = 0\n self._elapsed = 0.0\n super(Frequency, self).reset()\n\n @reinit__is_reduced\n def update(self, output):\n self._acc += output\n self._n = self._acc\n self._elapsed = torch.tensor(self._timer.value(), device=self._device)\n\n @sync_all_reduce(\"_n\", \"_elapsed\")\n def compute(self):\n time_divisor = 1.0\n\n if dist.is_available() and dist.is_initialized():\n time_divisor *= dist.get_world_size()\n\n # Returns the average processed objects per second across all workers\n return self._n / self._elapsed.item() * time_divisor\n\n def completed(self, engine, name):\n engine.state.metrics[name] = int(self.compute())\n\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n engine.add_event_handler(event_name, self.iteration_completed)\n engine.add_event_handler(event_name, self.completed, name)\n", "path": "ignite/metrics/frequency.py"}], "after_files": [{"content": "import torch\nimport torch.distributed as dist\n\nfrom ignite.engine import Events\nfrom ignite.metrics import Metric\nfrom ignite.handlers.timing import Timer\nfrom ignite.metrics.metric import sync_all_reduce, reinit__is_reduced\n\n\nclass Frequency(Metric):\n \"\"\"Provides metrics for the number of examples processed per second.\n\n Examples:\n\n .. code-block:: python\n\n # Compute number of tokens processed\n wps_metric = Frequency(output_transform=lambda x: x['ntokens'])\n wps_metric.attach(trainer, name='wps')\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n # Progress bar will look like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<1:23]\n\n\n To compute examples processed per second every 50th iteration:\n\n .. code-block:: python\n\n # Compute number of tokens processed\n wps_metric = Frequency(output_transform=lambda x: x['ntokens'])\n wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=50))\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n # Progress bar will look like\n # Epoch [2/10]: [50/100] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<00:35]\n \"\"\"\n\n def __init__(self, output_transform=lambda x: x, device=None):\n self._timer = None\n self._acc = None\n self._n = None\n self._elapsed = None\n super(Frequency, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self):\n self._timer = Timer()\n self._acc = 0\n self._n = 0\n self._elapsed = 0.0\n super(Frequency, self).reset()\n\n @reinit__is_reduced\n def update(self, output):\n self._acc += output\n self._n = self._acc\n self._elapsed = torch.tensor(self._timer.value(), device=self._device)\n\n @sync_all_reduce(\"_n\", \"_elapsed\")\n def compute(self):\n time_divisor = 1.0\n\n if dist.is_available() and dist.is_initialized():\n time_divisor *= dist.get_world_size()\n\n # Returns the average processed objects per second across all workers\n return self._n / self._elapsed.item() * time_divisor\n\n def completed(self, engine, name):\n engine.state.metrics[name] = int(self.compute())\n\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\n engine.add_event_handler(event_name, self.completed, name)\n", "path": "ignite/metrics/frequency.py"}]}
| 1,421 | 425 |
gh_patches_debug_29420
|
rasdani/github-patches
|
git_diff
|
scipy__scipy-14478
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: n-d interpolation parameter provided to geometric_slerp
This was first reported by @BvB93 during addition of type hints to the related code.
In short, [`geometric_slerp()`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.geometric_slerp.html) was intended to be a bit like `np.linspace` on the surface of an n-dimensional circle/sphere, interpolating between start and end points on that surface. However, it somehow slipped through that we allow arbitrary dimensions for the interpolation points/array, and this leads to weird/inconsistent/incorrect results.
Examples are below, where the "intended" 1-dimensional case produces the correct output--two points with 1 in the middle of the unit circle between the start/end. Notice the confusing discrepancy between the degenerate path output shapes vs. non-degenerate. While the non-degenerate outputs respect the input shape for `ndim > 1`, their numerical results are incorrect--the `x` coordinate isn't reflecting the movement along the unit circle to the halfway point between `[0, 1]` and `[1, 0]`.
What I'd like to do is raise an exception if `np.asarray(t).ndim > 1` and just move on, but as Bas points out we do have *de facto* support (no error raised) for arbitrary dimensions whether I like it or not, and I don't want my own view on it to exempt it from backwards compatibility concerns. That said, I wonder if we can basically say that this is a clear bug so it trumps backward compatibility?
```python
import numpy as np
import scipy
from scipy.spatial import geometric_slerp
print("scipy.__version__:", scipy.__version__)
arr1 = np.array([0, 1])
arr2 = np.array([1, 0])
for t in [[0, 0.5],
[[0, 0.5]],
[[[[[[[[[0, 0.5]]]]]]]]]]:
print("t dims:", np.asarray(t).ndim)
path = geometric_slerp(start=arr1,
end=arr2,
t=t)
print("path:\n", path)
degenerate_path = geometric_slerp(start=arr1,
end=arr1,
t=t)
print("degenerate_path:\n", degenerate_path)
```
```
scipy.__version__: 1.8.0.dev0+1472.130a1e6
t dims: 1
path:
[[0. 1. ]
[0.70710678 0.70710678]]
degenerate_path:
[[0. 1.]
[0. 1.]]
t dims: 2
path:
[[[0. 0.70710678]]]
degenerate_path:
[[0. 1.]
[0. 1.]]
t dims: 9
path:
[[[[[[[[[[0. 0.70710678]]]]]]]]]]
degenerate_path:
[[0. 1.]
[0. 1.]]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy/spatial/_geometric_slerp.py`
Content:
```
1 from __future__ import annotations
2
3 __all__ = ['geometric_slerp']
4
5 import warnings
6 from typing import TYPE_CHECKING
7
8 import numpy as np
9 from scipy.spatial.distance import euclidean
10
11 if TYPE_CHECKING:
12 import numpy.typing as npt
13
14
15 def _geometric_slerp(start, end, t):
16 # create an orthogonal basis using QR decomposition
17 basis = np.vstack([start, end])
18 Q, R = np.linalg.qr(basis.T)
19 signs = 2 * (np.diag(R) >= 0) - 1
20 Q = Q.T * signs.T[:, np.newaxis]
21 R = R.T * signs.T[:, np.newaxis]
22
23 # calculate the angle between `start` and `end`
24 c = np.dot(start, end)
25 s = np.linalg.det(R)
26 omega = np.arctan2(s, c)
27
28 # interpolate
29 start, end = Q
30 s = np.sin(t * omega)
31 c = np.cos(t * omega)
32 return start * c[:, np.newaxis] + end * s[:, np.newaxis]
33
34
35 def geometric_slerp(
36 start: npt.ArrayLike,
37 end: npt.ArrayLike,
38 t: npt.ArrayLike,
39 tol: float = 1e-7,
40 ) -> np.ndarray:
41 """
42 Geometric spherical linear interpolation.
43
44 The interpolation occurs along a unit-radius
45 great circle arc in arbitrary dimensional space.
46
47 Parameters
48 ----------
49 start : (n_dimensions, ) array-like
50 Single n-dimensional input coordinate in a 1-D array-like
51 object. `n` must be greater than 1.
52 end : (n_dimensions, ) array-like
53 Single n-dimensional input coordinate in a 1-D array-like
54 object. `n` must be greater than 1.
55 t: float or (n_points,) array-like
56 A float or array-like of doubles representing interpolation
57 parameters, with values required in the inclusive interval
58 between 0 and 1. A common approach is to generate the array
59 with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.
60 Ascending, descending, and scrambled orders are permitted.
61 tol: float
62 The absolute tolerance for determining if the start and end
63 coordinates are antipodes.
64
65 Returns
66 -------
67 result : (t.size, D)
68 An array of doubles containing the interpolated
69 spherical path and including start and
70 end when 0 and 1 t are used. The
71 interpolated values should correspond to the
72 same sort order provided in the t array. The result
73 may be 1-dimensional if ``t`` is a float.
74
75 Raises
76 ------
77 ValueError
78 If ``start`` and ``end`` are antipodes, not on the
79 unit n-sphere, or for a variety of degenerate conditions.
80
81 Notes
82 -----
83 The implementation is based on the mathematical formula provided in [1]_,
84 and the first known presentation of this algorithm, derived from study of
85 4-D geometry, is credited to Glenn Davis in a footnote of the original
86 quaternion Slerp publication by Ken Shoemake [2]_.
87
88 .. versionadded:: 1.5.0
89
90 References
91 ----------
92 .. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp
93 .. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.
94 ACM SIGGRAPH Computer Graphics, 19(3): 245-254.
95
96 See Also
97 --------
98 scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions
99
100 Examples
101 --------
102 Interpolate four linearly-spaced values on the circumference of
103 a circle spanning 90 degrees:
104
105 >>> from scipy.spatial import geometric_slerp
106 >>> import matplotlib.pyplot as plt
107 >>> fig = plt.figure()
108 >>> ax = fig.add_subplot(111)
109 >>> start = np.array([1, 0])
110 >>> end = np.array([0, 1])
111 >>> t_vals = np.linspace(0, 1, 4)
112 >>> result = geometric_slerp(start,
113 ... end,
114 ... t_vals)
115
116 The interpolated results should be at 30 degree intervals
117 recognizable on the unit circle:
118
119 >>> ax.scatter(result[...,0], result[...,1], c='k')
120 >>> circle = plt.Circle((0, 0), 1, color='grey')
121 >>> ax.add_artist(circle)
122 >>> ax.set_aspect('equal')
123 >>> plt.show()
124
125 Attempting to interpolate between antipodes on a circle is
126 ambiguous because there are two possible paths, and on a
127 sphere there are infinite possible paths on the geodesic surface.
128 Nonetheless, one of the ambiguous paths is returned along
129 with a warning:
130
131 >>> opposite_pole = np.array([-1, 0])
132 >>> with np.testing.suppress_warnings() as sup:
133 ... sup.filter(UserWarning)
134 ... geometric_slerp(start,
135 ... opposite_pole,
136 ... t_vals)
137 array([[ 1.00000000e+00, 0.00000000e+00],
138 [ 5.00000000e-01, 8.66025404e-01],
139 [-5.00000000e-01, 8.66025404e-01],
140 [-1.00000000e+00, 1.22464680e-16]])
141
142 Extend the original example to a sphere and plot interpolation
143 points in 3D:
144
145 >>> from mpl_toolkits.mplot3d import proj3d
146 >>> fig = plt.figure()
147 >>> ax = fig.add_subplot(111, projection='3d')
148
149 Plot the unit sphere for reference (optional):
150
151 >>> u = np.linspace(0, 2 * np.pi, 100)
152 >>> v = np.linspace(0, np.pi, 100)
153 >>> x = np.outer(np.cos(u), np.sin(v))
154 >>> y = np.outer(np.sin(u), np.sin(v))
155 >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
156 >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
157
158 Interpolating over a larger number of points
159 may provide the appearance of a smooth curve on
160 the surface of the sphere, which is also useful
161 for discretized integration calculations on a
162 sphere surface:
163
164 >>> start = np.array([1, 0, 0])
165 >>> end = np.array([0, 0, 1])
166 >>> t_vals = np.linspace(0, 1, 200)
167 >>> result = geometric_slerp(start,
168 ... end,
169 ... t_vals)
170 >>> ax.plot(result[...,0],
171 ... result[...,1],
172 ... result[...,2],
173 ... c='k')
174 >>> plt.show()
175 """
176
177 start = np.asarray(start, dtype=np.float64)
178 end = np.asarray(end, dtype=np.float64)
179
180 if start.ndim != 1 or end.ndim != 1:
181 raise ValueError("Start and end coordinates "
182 "must be one-dimensional")
183
184 if start.size != end.size:
185 raise ValueError("The dimensions of start and "
186 "end must match (have same size)")
187
188 if start.size < 2 or end.size < 2:
189 raise ValueError("The start and end coordinates must "
190 "both be in at least two-dimensional "
191 "space")
192
193 if np.array_equal(start, end):
194 return np.linspace(start, start, np.asarray(t).size)
195
196 # for points that violate equation for n-sphere
197 for coord in [start, end]:
198 if not np.allclose(np.linalg.norm(coord), 1.0,
199 rtol=1e-9,
200 atol=0):
201 raise ValueError("start and end are not"
202 " on a unit n-sphere")
203
204 if not isinstance(tol, float):
205 raise ValueError("tol must be a float")
206 else:
207 tol = np.fabs(tol)
208
209 coord_dist = euclidean(start, end)
210
211 # diameter of 2 within tolerance means antipodes, which is a problem
212 # for all unit n-spheres (even the 0-sphere would have an ambiguous path)
213 if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):
214 warnings.warn("start and end are antipodes"
215 " using the specified tolerance;"
216 " this may cause ambiguous slerp paths")
217
218 t = np.asarray(t, dtype=np.float64)
219
220 if t.size == 0:
221 return np.empty((0, start.size))
222
223 if t.min() < 0 or t.max() > 1:
224 raise ValueError("interpolation parameter must be in [0, 1]")
225
226 if t.ndim == 0:
227 return _geometric_slerp(start,
228 end,
229 np.atleast_1d(t)).ravel()
230 else:
231 return _geometric_slerp(start,
232 end,
233 t)
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scipy/spatial/_geometric_slerp.py b/scipy/spatial/_geometric_slerp.py
--- a/scipy/spatial/_geometric_slerp.py
+++ b/scipy/spatial/_geometric_slerp.py
@@ -52,8 +52,8 @@
end : (n_dimensions, ) array-like
Single n-dimensional input coordinate in a 1-D array-like
object. `n` must be greater than 1.
- t: float or (n_points,) array-like
- A float or array-like of doubles representing interpolation
+ t: float or (n_points,) 1D array-like
+ A float or 1D array-like of doubles representing interpolation
parameters, with values required in the inclusive interval
between 0 and 1. A common approach is to generate the array
with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.
@@ -176,6 +176,11 @@
start = np.asarray(start, dtype=np.float64)
end = np.asarray(end, dtype=np.float64)
+ t = np.asarray(t)
+
+ if t.ndim > 1:
+ raise ValueError("The interpolation parameter "
+ "value must be one dimensional.")
if start.ndim != 1 or end.ndim != 1:
raise ValueError("Start and end coordinates "
@@ -191,7 +196,7 @@
"space")
if np.array_equal(start, end):
- return np.linspace(start, start, np.asarray(t).size)
+ return np.linspace(start, start, t.size)
# for points that violate equation for n-sphere
for coord in [start, end]:
|
{"golden_diff": "diff --git a/scipy/spatial/_geometric_slerp.py b/scipy/spatial/_geometric_slerp.py\n--- a/scipy/spatial/_geometric_slerp.py\n+++ b/scipy/spatial/_geometric_slerp.py\n@@ -52,8 +52,8 @@\n end : (n_dimensions, ) array-like\n Single n-dimensional input coordinate in a 1-D array-like\n object. `n` must be greater than 1.\n- t: float or (n_points,) array-like\n- A float or array-like of doubles representing interpolation\n+ t: float or (n_points,) 1D array-like\n+ A float or 1D array-like of doubles representing interpolation\n parameters, with values required in the inclusive interval\n between 0 and 1. A common approach is to generate the array\n with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.\n@@ -176,6 +176,11 @@\n \n start = np.asarray(start, dtype=np.float64)\n end = np.asarray(end, dtype=np.float64)\n+ t = np.asarray(t)\n+\n+ if t.ndim > 1:\n+ raise ValueError(\"The interpolation parameter \"\n+ \"value must be one dimensional.\")\n \n if start.ndim != 1 or end.ndim != 1:\n raise ValueError(\"Start and end coordinates \"\n@@ -191,7 +196,7 @@\n \"space\")\n \n if np.array_equal(start, end):\n- return np.linspace(start, start, np.asarray(t).size)\n+ return np.linspace(start, start, t.size)\n \n # for points that violate equation for n-sphere\n for coord in [start, end]:\n", "issue": "BUG: n-d interpolation parameter provided to geometric_slerp\nThis was first reported by @BvB93 during addition of type hints to the related code.\r\n\r\nIn short, [`geometric_slerp()`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.geometric_slerp.html) was intended to be a bit like `np.linspace` on the surface of an n-dimensional circle/sphere, interpolating between start and end points on that surface. However, it somehow slipped through that we allow arbitrary dimensions for the interpolation points/array, and this leads to weird/inconsistent/incorrect results.\r\n\r\nExamples are below, where the \"intended\" 1-dimensional case produces the correct output--two points with 1 in the middle of the unit circle between the start/end. Notice the confusing discrepancy between the degenerate path output shapes vs. non-degenerate. While the non-degenerate outputs respect the input shape for `ndim > 1`, their numerical results are incorrect--the `x` coordinate isn't reflecting the movement along the unit circle to the halfway point between `[0, 1]` and `[1, 0]`.\r\n\r\nWhat I'd like to do is raise an exception if `np.asarray(t).ndim > 1` and just move on, but as Bas points out we do have *de facto* support (no error raised) for arbitrary dimensions whether I like it or not, and I don't want my own view on it to exempt it from backwards compatibility concerns. That said, I wonder if we can basically say that this is a clear bug so it trumps backward compatibility?\r\n\r\n```python\r\nimport numpy as np\r\nimport scipy\r\nfrom scipy.spatial import geometric_slerp\r\n\r\nprint(\"scipy.__version__:\", scipy.__version__)\r\n\r\narr1 = np.array([0, 1])\r\narr2 = np.array([1, 0])\r\n\r\nfor t in [[0, 0.5],\r\n [[0, 0.5]],\r\n [[[[[[[[[0, 0.5]]]]]]]]]]:\r\n print(\"t dims:\", np.asarray(t).ndim)\r\n path = geometric_slerp(start=arr1,\r\n end=arr2,\r\n t=t)\r\n print(\"path:\\n\", path)\r\n degenerate_path = geometric_slerp(start=arr1,\r\n end=arr1,\r\n t=t)\r\n print(\"degenerate_path:\\n\", degenerate_path)\r\n```\r\n\r\n```\r\nscipy.__version__: 1.8.0.dev0+1472.130a1e6\r\nt dims: 1\r\npath:\r\n [[0. 1. ]\r\n [0.70710678 0.70710678]]\r\ndegenerate_path:\r\n [[0. 1.]\r\n [0. 1.]]\r\nt dims: 2\r\npath:\r\n [[[0. 0.70710678]]]\r\ndegenerate_path:\r\n [[0. 1.]\r\n [0. 1.]]\r\nt dims: 9\r\npath:\r\n [[[[[[[[[[0. 0.70710678]]]]]]]]]]\r\ndegenerate_path:\r\n [[0. 1.]\r\n [0. 1.]]\r\n\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\n__all__ = ['geometric_slerp']\n\nimport warnings\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nfrom scipy.spatial.distance import euclidean\n\nif TYPE_CHECKING:\n import numpy.typing as npt\n\n\ndef _geometric_slerp(start, end, t):\n # create an orthogonal basis using QR decomposition\n basis = np.vstack([start, end])\n Q, R = np.linalg.qr(basis.T)\n signs = 2 * (np.diag(R) >= 0) - 1\n Q = Q.T * signs.T[:, np.newaxis]\n R = R.T * signs.T[:, np.newaxis]\n\n # calculate the angle between `start` and `end`\n c = np.dot(start, end)\n s = np.linalg.det(R)\n omega = np.arctan2(s, c)\n\n # interpolate\n start, end = Q\n s = np.sin(t * omega)\n c = np.cos(t * omega)\n return start * c[:, np.newaxis] + end * s[:, np.newaxis]\n\n\ndef geometric_slerp(\n start: npt.ArrayLike,\n end: npt.ArrayLike,\n t: npt.ArrayLike,\n tol: float = 1e-7,\n) -> np.ndarray:\n \"\"\"\n Geometric spherical linear interpolation.\n\n The interpolation occurs along a unit-radius\n great circle arc in arbitrary dimensional space.\n\n Parameters\n ----------\n start : (n_dimensions, ) array-like\n Single n-dimensional input coordinate in a 1-D array-like\n object. `n` must be greater than 1.\n end : (n_dimensions, ) array-like\n Single n-dimensional input coordinate in a 1-D array-like\n object. `n` must be greater than 1.\n t: float or (n_points,) array-like\n A float or array-like of doubles representing interpolation\n parameters, with values required in the inclusive interval\n between 0 and 1. A common approach is to generate the array\n with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.\n Ascending, descending, and scrambled orders are permitted.\n tol: float\n The absolute tolerance for determining if the start and end\n coordinates are antipodes.\n\n Returns\n -------\n result : (t.size, D)\n An array of doubles containing the interpolated\n spherical path and including start and\n end when 0 and 1 t are used. The\n interpolated values should correspond to the\n same sort order provided in the t array. The result\n may be 1-dimensional if ``t`` is a float.\n\n Raises\n ------\n ValueError\n If ``start`` and ``end`` are antipodes, not on the\n unit n-sphere, or for a variety of degenerate conditions.\n\n Notes\n -----\n The implementation is based on the mathematical formula provided in [1]_,\n and the first known presentation of this algorithm, derived from study of\n 4-D geometry, is credited to Glenn Davis in a footnote of the original\n quaternion Slerp publication by Ken Shoemake [2]_.\n\n .. versionadded:: 1.5.0\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp\n .. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.\n ACM SIGGRAPH Computer Graphics, 19(3): 245-254.\n\n See Also\n --------\n scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions\n\n Examples\n --------\n Interpolate four linearly-spaced values on the circumference of\n a circle spanning 90 degrees:\n\n >>> from scipy.spatial import geometric_slerp\n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> start = np.array([1, 0])\n >>> end = np.array([0, 1])\n >>> t_vals = np.linspace(0, 1, 4)\n >>> result = geometric_slerp(start,\n ... end,\n ... t_vals)\n\n The interpolated results should be at 30 degree intervals\n recognizable on the unit circle:\n\n >>> ax.scatter(result[...,0], result[...,1], c='k')\n >>> circle = plt.Circle((0, 0), 1, color='grey')\n >>> ax.add_artist(circle)\n >>> ax.set_aspect('equal')\n >>> plt.show()\n\n Attempting to interpolate between antipodes on a circle is\n ambiguous because there are two possible paths, and on a\n sphere there are infinite possible paths on the geodesic surface.\n Nonetheless, one of the ambiguous paths is returned along\n with a warning:\n\n >>> opposite_pole = np.array([-1, 0])\n >>> with np.testing.suppress_warnings() as sup:\n ... sup.filter(UserWarning)\n ... geometric_slerp(start,\n ... opposite_pole,\n ... t_vals)\n array([[ 1.00000000e+00, 0.00000000e+00],\n [ 5.00000000e-01, 8.66025404e-01],\n [-5.00000000e-01, 8.66025404e-01],\n [-1.00000000e+00, 1.22464680e-16]])\n\n Extend the original example to a sphere and plot interpolation\n points in 3D:\n\n >>> from mpl_toolkits.mplot3d import proj3d\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111, projection='3d')\n\n Plot the unit sphere for reference (optional):\n\n >>> u = np.linspace(0, 2 * np.pi, 100)\n >>> v = np.linspace(0, np.pi, 100)\n >>> x = np.outer(np.cos(u), np.sin(v))\n >>> y = np.outer(np.sin(u), np.sin(v))\n >>> z = np.outer(np.ones(np.size(u)), np.cos(v))\n >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)\n\n Interpolating over a larger number of points\n may provide the appearance of a smooth curve on\n the surface of the sphere, which is also useful\n for discretized integration calculations on a\n sphere surface:\n\n >>> start = np.array([1, 0, 0])\n >>> end = np.array([0, 0, 1])\n >>> t_vals = np.linspace(0, 1, 200)\n >>> result = geometric_slerp(start,\n ... end,\n ... t_vals)\n >>> ax.plot(result[...,0],\n ... result[...,1],\n ... result[...,2],\n ... c='k')\n >>> plt.show()\n \"\"\"\n\n start = np.asarray(start, dtype=np.float64)\n end = np.asarray(end, dtype=np.float64)\n\n if start.ndim != 1 or end.ndim != 1:\n raise ValueError(\"Start and end coordinates \"\n \"must be one-dimensional\")\n\n if start.size != end.size:\n raise ValueError(\"The dimensions of start and \"\n \"end must match (have same size)\")\n\n if start.size < 2 or end.size < 2:\n raise ValueError(\"The start and end coordinates must \"\n \"both be in at least two-dimensional \"\n \"space\")\n\n if np.array_equal(start, end):\n return np.linspace(start, start, np.asarray(t).size)\n\n # for points that violate equation for n-sphere\n for coord in [start, end]:\n if not np.allclose(np.linalg.norm(coord), 1.0,\n rtol=1e-9,\n atol=0):\n raise ValueError(\"start and end are not\"\n \" on a unit n-sphere\")\n\n if not isinstance(tol, float):\n raise ValueError(\"tol must be a float\")\n else:\n tol = np.fabs(tol)\n\n coord_dist = euclidean(start, end)\n\n # diameter of 2 within tolerance means antipodes, which is a problem\n # for all unit n-spheres (even the 0-sphere would have an ambiguous path)\n if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):\n warnings.warn(\"start and end are antipodes\"\n \" using the specified tolerance;\"\n \" this may cause ambiguous slerp paths\")\n\n t = np.asarray(t, dtype=np.float64)\n\n if t.size == 0:\n return np.empty((0, start.size))\n\n if t.min() < 0 or t.max() > 1:\n raise ValueError(\"interpolation parameter must be in [0, 1]\")\n\n if t.ndim == 0:\n return _geometric_slerp(start,\n end,\n np.atleast_1d(t)).ravel()\n else:\n return _geometric_slerp(start,\n end,\n t)\n", "path": "scipy/spatial/_geometric_slerp.py"}], "after_files": [{"content": "from __future__ import annotations\n\n__all__ = ['geometric_slerp']\n\nimport warnings\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nfrom scipy.spatial.distance import euclidean\n\nif TYPE_CHECKING:\n import numpy.typing as npt\n\n\ndef _geometric_slerp(start, end, t):\n # create an orthogonal basis using QR decomposition\n basis = np.vstack([start, end])\n Q, R = np.linalg.qr(basis.T)\n signs = 2 * (np.diag(R) >= 0) - 1\n Q = Q.T * signs.T[:, np.newaxis]\n R = R.T * signs.T[:, np.newaxis]\n\n # calculate the angle between `start` and `end`\n c = np.dot(start, end)\n s = np.linalg.det(R)\n omega = np.arctan2(s, c)\n\n # interpolate\n start, end = Q\n s = np.sin(t * omega)\n c = np.cos(t * omega)\n return start * c[:, np.newaxis] + end * s[:, np.newaxis]\n\n\ndef geometric_slerp(\n start: npt.ArrayLike,\n end: npt.ArrayLike,\n t: npt.ArrayLike,\n tol: float = 1e-7,\n) -> np.ndarray:\n \"\"\"\n Geometric spherical linear interpolation.\n\n The interpolation occurs along a unit-radius\n great circle arc in arbitrary dimensional space.\n\n Parameters\n ----------\n start : (n_dimensions, ) array-like\n Single n-dimensional input coordinate in a 1-D array-like\n object. `n` must be greater than 1.\n end : (n_dimensions, ) array-like\n Single n-dimensional input coordinate in a 1-D array-like\n object. `n` must be greater than 1.\n t: float or (n_points,) 1D array-like\n A float or 1D array-like of doubles representing interpolation\n parameters, with values required in the inclusive interval\n between 0 and 1. A common approach is to generate the array\n with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.\n Ascending, descending, and scrambled orders are permitted.\n tol: float\n The absolute tolerance for determining if the start and end\n coordinates are antipodes.\n\n Returns\n -------\n result : (t.size, D)\n An array of doubles containing the interpolated\n spherical path and including start and\n end when 0 and 1 t are used. The\n interpolated values should correspond to the\n same sort order provided in the t array. The result\n may be 1-dimensional if ``t`` is a float.\n\n Raises\n ------\n ValueError\n If ``start`` and ``end`` are antipodes, not on the\n unit n-sphere, or for a variety of degenerate conditions.\n\n Notes\n -----\n The implementation is based on the mathematical formula provided in [1]_,\n and the first known presentation of this algorithm, derived from study of\n 4-D geometry, is credited to Glenn Davis in a footnote of the original\n quaternion Slerp publication by Ken Shoemake [2]_.\n\n .. versionadded:: 1.5.0\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp\n .. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.\n ACM SIGGRAPH Computer Graphics, 19(3): 245-254.\n\n See Also\n --------\n scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions\n\n Examples\n --------\n Interpolate four linearly-spaced values on the circumference of\n a circle spanning 90 degrees:\n\n >>> from scipy.spatial import geometric_slerp\n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> start = np.array([1, 0])\n >>> end = np.array([0, 1])\n >>> t_vals = np.linspace(0, 1, 4)\n >>> result = geometric_slerp(start,\n ... end,\n ... t_vals)\n\n The interpolated results should be at 30 degree intervals\n recognizable on the unit circle:\n\n >>> ax.scatter(result[...,0], result[...,1], c='k')\n >>> circle = plt.Circle((0, 0), 1, color='grey')\n >>> ax.add_artist(circle)\n >>> ax.set_aspect('equal')\n >>> plt.show()\n\n Attempting to interpolate between antipodes on a circle is\n ambiguous because there are two possible paths, and on a\n sphere there are infinite possible paths on the geodesic surface.\n Nonetheless, one of the ambiguous paths is returned along\n with a warning:\n\n >>> opposite_pole = np.array([-1, 0])\n >>> with np.testing.suppress_warnings() as sup:\n ... sup.filter(UserWarning)\n ... geometric_slerp(start,\n ... opposite_pole,\n ... t_vals)\n array([[ 1.00000000e+00, 0.00000000e+00],\n [ 5.00000000e-01, 8.66025404e-01],\n [-5.00000000e-01, 8.66025404e-01],\n [-1.00000000e+00, 1.22464680e-16]])\n\n Extend the original example to a sphere and plot interpolation\n points in 3D:\n\n >>> from mpl_toolkits.mplot3d import proj3d\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111, projection='3d')\n\n Plot the unit sphere for reference (optional):\n\n >>> u = np.linspace(0, 2 * np.pi, 100)\n >>> v = np.linspace(0, np.pi, 100)\n >>> x = np.outer(np.cos(u), np.sin(v))\n >>> y = np.outer(np.sin(u), np.sin(v))\n >>> z = np.outer(np.ones(np.size(u)), np.cos(v))\n >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)\n\n Interpolating over a larger number of points\n may provide the appearance of a smooth curve on\n the surface of the sphere, which is also useful\n for discretized integration calculations on a\n sphere surface:\n\n >>> start = np.array([1, 0, 0])\n >>> end = np.array([0, 0, 1])\n >>> t_vals = np.linspace(0, 1, 200)\n >>> result = geometric_slerp(start,\n ... end,\n ... t_vals)\n >>> ax.plot(result[...,0],\n ... result[...,1],\n ... result[...,2],\n ... c='k')\n >>> plt.show()\n \"\"\"\n\n start = np.asarray(start, dtype=np.float64)\n end = np.asarray(end, dtype=np.float64)\n t = np.asarray(t)\n\n if t.ndim > 1:\n raise ValueError(\"The interpolation parameter \"\n \"value must be one dimensional.\")\n\n if start.ndim != 1 or end.ndim != 1:\n raise ValueError(\"Start and end coordinates \"\n \"must be one-dimensional\")\n\n if start.size != end.size:\n raise ValueError(\"The dimensions of start and \"\n \"end must match (have same size)\")\n\n if start.size < 2 or end.size < 2:\n raise ValueError(\"The start and end coordinates must \"\n \"both be in at least two-dimensional \"\n \"space\")\n\n if np.array_equal(start, end):\n return np.linspace(start, start, t.size)\n\n # for points that violate equation for n-sphere\n for coord in [start, end]:\n if not np.allclose(np.linalg.norm(coord), 1.0,\n rtol=1e-9,\n atol=0):\n raise ValueError(\"start and end are not\"\n \" on a unit n-sphere\")\n\n if not isinstance(tol, float):\n raise ValueError(\"tol must be a float\")\n else:\n tol = np.fabs(tol)\n\n coord_dist = euclidean(start, end)\n\n # diameter of 2 within tolerance means antipodes, which is a problem\n # for all unit n-spheres (even the 0-sphere would have an ambiguous path)\n if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):\n warnings.warn(\"start and end are antipodes\"\n \" using the specified tolerance;\"\n \" this may cause ambiguous slerp paths\")\n\n t = np.asarray(t, dtype=np.float64)\n\n if t.size == 0:\n return np.empty((0, start.size))\n\n if t.min() < 0 or t.max() > 1:\n raise ValueError(\"interpolation parameter must be in [0, 1]\")\n\n if t.ndim == 0:\n return _geometric_slerp(start,\n end,\n np.atleast_1d(t)).ravel()\n else:\n return _geometric_slerp(start,\n end,\n t)\n", "path": "scipy/spatial/_geometric_slerp.py"}]}
| 3,630 | 390 |
gh_patches_debug_24779
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-485
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip-compile fails if output file already exists
Given a requirements.in file which consists only of the line:
```
-e ..
```
(that is, pull in requirements from the setup.py file one folder up from my requirements.in file)
The first pip-compile run works fine. The second one throws an error. If I delete the file that was generated by the first run, it will work again. There are no permissions issues (tried it with full perms on the file to check).
##### Environment Versions
1. os: OSX 10.11.6
1. python version: 2.7
1. pip version: 9.0.1
1. pip-tools version: 1.8.2
##### Steps to replicate
1. pip-compile -v -o requirements.txt
**SUCCESS**
2. pip-compile -v -o requirements.txt
**FAILURE**
##### Expected result
An updated (or identical) requirements.txt file
##### Actual result
```python
Traceback (most recent call last):
File "/Users/<username>/.virtualenvs/<virtualenv>/bin/pip-compile", line 11, in <module>
sys.exit(cli())
File "/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/piptools/scripts/compile.py", line 131, in cli
key = key_from_req(ireq.req)
File "/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/piptools/utils.py", line 41, in key_from_req
key = req.name
AttributeError: 'NoneType' object has no attribute 'name'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/scripts/compile.py`
Content:
```
1 # coding: utf-8
2 from __future__ import (absolute_import, division, print_function,
3 unicode_literals)
4
5 import optparse
6 import os
7 import sys
8 import tempfile
9
10 import pip
11 from pip.req import InstallRequirement, parse_requirements
12
13 from .. import click
14 from ..exceptions import PipToolsError
15 from ..logging import log
16 from ..repositories import LocalRequirementsRepository, PyPIRepository
17 from ..resolver import Resolver
18 from ..utils import (assert_compatible_pip_version, is_pinned_requirement,
19 key_from_req, dedup)
20 from ..writer import OutputWriter
21
22 # Make sure we're using a compatible version of pip
23 assert_compatible_pip_version()
24
25 DEFAULT_REQUIREMENTS_FILE = 'requirements.in'
26
27
28 class PipCommand(pip.basecommand.Command):
29 name = 'PipCommand'
30
31
32 @click.command()
33 @click.version_option()
34 @click.option('-v', '--verbose', is_flag=True, help="Show more output")
35 @click.option('-n', '--dry-run', is_flag=True, help="Only show what would happen, don't change anything")
36 @click.option('-p', '--pre', is_flag=True, default=None, help="Allow resolving to prereleases (default is not)")
37 @click.option('-r', '--rebuild', is_flag=True, help="Clear any caches upfront, rebuild from scratch")
38 @click.option('-f', '--find-links', multiple=True, help="Look for archives in this directory or on this HTML page", envvar='PIP_FIND_LINKS') # noqa
39 @click.option('-i', '--index-url', help="Change index URL (defaults to PyPI)", envvar='PIP_INDEX_URL')
40 @click.option('--extra-index-url', multiple=True, help="Add additional index URL to search", envvar='PIP_EXTRA_INDEX_URL') # noqa
41 @click.option('--client-cert', help="Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.") # noqa
42 @click.option('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',
43 help="Mark this host as trusted, even though it does not have "
44 "valid or any HTTPS.")
45 @click.option('--header/--no-header', is_flag=True, default=True,
46 help="Add header to generated file")
47 @click.option('--index/--no-index', is_flag=True, default=True,
48 help="Add index URL to generated file")
49 @click.option('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,
50 default=True, help="Add trusted host option to generated file")
51 @click.option('--annotate/--no-annotate', is_flag=True, default=True,
52 help="Annotate results, indicating where dependencies come from")
53 @click.option('-U', '--upgrade', is_flag=True, default=False,
54 help='Try to upgrade all dependencies to their latest versions')
55 @click.option('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,
56 help="Specify particular packages to upgrade.")
57 @click.option('-o', '--output-file', nargs=1, type=str, default=None,
58 help=('Output file name. Required if more than one input file is given. '
59 'Will be derived from input file otherwise.'))
60 @click.option('--allow-unsafe', is_flag=True, default=False,
61 help="Pin packages considered unsafe: pip, setuptools & distribute")
62 @click.option('--generate-hashes', is_flag=True, default=False,
63 help="Generate pip 8 style hashes in the resulting requirements file.")
64 @click.option('--max-rounds', default=10,
65 help="Maximum number of rounds before resolving the requirements aborts.")
66 @click.argument('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))
67 def cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,
68 client_cert, trusted_host, header, index, emit_trusted_host, annotate,
69 upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,
70 src_files, max_rounds):
71 """Compiles requirements.txt from requirements.in specs."""
72 log.verbose = verbose
73
74 if len(src_files) == 0:
75 if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
76 src_files = (DEFAULT_REQUIREMENTS_FILE,)
77 elif os.path.exists('setup.py'):
78 src_files = ('setup.py',)
79 if not output_file:
80 output_file = 'requirements.txt'
81 else:
82 raise click.BadParameter(("If you do not specify an input file, "
83 "the default is {} or setup.py").format(DEFAULT_REQUIREMENTS_FILE))
84
85 if len(src_files) == 1 and src_files[0] == '-':
86 if not output_file:
87 raise click.BadParameter('--output-file is required if input is from stdin')
88
89 if len(src_files) > 1 and not output_file:
90 raise click.BadParameter('--output-file is required if two or more input files are given.')
91
92 if output_file:
93 dst_file = output_file
94 else:
95 base_name, _, _ = src_files[0].rpartition('.')
96 dst_file = base_name + '.txt'
97
98 if upgrade and upgrade_packages:
99 raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')
100
101 ###
102 # Setup
103 ###
104
105 pip_command = get_pip_command()
106
107 pip_args = []
108 if find_links:
109 for link in find_links:
110 pip_args.extend(['-f', link])
111 if index_url:
112 pip_args.extend(['-i', index_url])
113 if extra_index_url:
114 for extra_index in extra_index_url:
115 pip_args.extend(['--extra-index-url', extra_index])
116 if client_cert:
117 pip_args.extend(['--client-cert', client_cert])
118 if pre:
119 pip_args.extend(['--pre'])
120 if trusted_host:
121 for host in trusted_host:
122 pip_args.extend(['--trusted-host', host])
123
124 pip_options, _ = pip_command.parse_args(pip_args)
125
126 session = pip_command._build_session(pip_options)
127 repository = PyPIRepository(pip_options, session)
128
129 # Pre-parse the inline package upgrade specs: they should take precedence
130 # over the stuff in the requirements files
131 upgrade_packages = [InstallRequirement.from_line(pkg)
132 for pkg in upgrade_packages]
133
134 # Proxy with a LocalRequirementsRepository if --upgrade is not specified
135 # (= default invocation)
136 if not (upgrade or upgrade_packages) and os.path.exists(dst_file):
137 ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)
138 existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}
139 repository = LocalRequirementsRepository(existing_pins, repository)
140
141 log.debug('Using indexes:')
142 # remove duplicate index urls before processing
143 repository.finder.index_urls = list(dedup(repository.finder.index_urls))
144 for index_url in repository.finder.index_urls:
145 log.debug(' {}'.format(index_url))
146
147 if repository.finder.find_links:
148 log.debug('')
149 log.debug('Configuration:')
150 for find_link in repository.finder.find_links:
151 log.debug(' -f {}'.format(find_link))
152
153 ###
154 # Parsing/collecting initial requirements
155 ###
156
157 constraints = []
158 for src_file in src_files:
159 is_setup_file = os.path.basename(src_file) == 'setup.py'
160 if is_setup_file or src_file == '-':
161 # pip requires filenames and not files. Since we want to support
162 # piping from stdin, we need to briefly save the input from stdin
163 # to a temporary file and have pip read that. also used for
164 # reading requirements from install_requires in setup.py.
165 tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)
166 if is_setup_file:
167 from distutils.core import run_setup
168 dist = run_setup(src_file)
169 tmpfile.write('\n'.join(dist.install_requires))
170 else:
171 tmpfile.write(sys.stdin.read())
172 tmpfile.flush()
173 constraints.extend(parse_requirements(
174 tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))
175 else:
176 constraints.extend(parse_requirements(
177 src_file, finder=repository.finder, session=repository.session, options=pip_options))
178
179 # Check the given base set of constraints first
180 Resolver.check_constraints(constraints)
181
182 # The requirement objects are modified in-place so we need to save off the list of primary packages first
183 primary_packages = {key_from_req(ireq.req) for ireq in constraints if not ireq.constraint}
184
185 try:
186 resolver = Resolver(constraints, repository, prereleases=pre,
187 clear_caches=rebuild, allow_unsafe=allow_unsafe)
188 results = resolver.resolve(max_rounds=max_rounds)
189 if generate_hashes:
190 hashes = resolver.resolve_hashes(results)
191 else:
192 hashes = None
193 except PipToolsError as e:
194 log.error(str(e))
195 sys.exit(2)
196
197 log.debug('')
198
199 ##
200 # Output
201 ##
202
203 # Compute reverse dependency annotations statically, from the
204 # dependency cache that the resolver has populated by now.
205 #
206 # TODO (1a): reverse deps for any editable package are lost
207 # what SHOULD happen is that they are cached in memory, just
208 # not persisted to disk!
209 #
210 # TODO (1b): perhaps it's easiest if the dependency cache has an API
211 # that could take InstallRequirements directly, like:
212 #
213 # cache.set(ireq, ...)
214 #
215 # then, when ireq is editable, it would store in
216 #
217 # editables[egg_name][link_without_fragment] = deps
218 # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}
219 #
220 # otherwise:
221 #
222 # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}
223 #
224 reverse_dependencies = None
225 if annotate:
226 reverse_dependencies = resolver.reverse_dependencies(results)
227
228 writer = OutputWriter(src_files, dst_file, dry_run=dry_run,
229 emit_header=header, emit_index=index,
230 emit_trusted_host=emit_trusted_host,
231 annotate=annotate,
232 generate_hashes=generate_hashes,
233 default_index_url=repository.DEFAULT_INDEX_URL,
234 index_urls=repository.finder.index_urls,
235 trusted_hosts=pip_options.trusted_hosts,
236 format_control=repository.finder.format_control)
237 writer.write(results=results,
238 reverse_dependencies=reverse_dependencies,
239 primary_packages=primary_packages,
240 markers={key_from_req(ireq.req): ireq.markers
241 for ireq in constraints if ireq.markers},
242 hashes=hashes)
243
244 if dry_run:
245 log.warning('Dry-run, so nothing updated.')
246
247
248 def get_pip_command():
249 # Use pip's parser for pip.conf management and defaults.
250 # General options (find_links, index_url, extra_index_url, trusted_host,
251 # and pre) are defered to pip.
252 pip_command = PipCommand()
253 index_opts = pip.cmdoptions.make_option_group(
254 pip.cmdoptions.index_group,
255 pip_command.parser,
256 )
257 pip_command.parser.insert_option_group(0, index_opts)
258 pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False))
259
260 return pip_command
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/piptools/scripts/compile.py b/piptools/scripts/compile.py
--- a/piptools/scripts/compile.py
+++ b/piptools/scripts/compile.py
@@ -179,9 +179,6 @@
# Check the given base set of constraints first
Resolver.check_constraints(constraints)
- # The requirement objects are modified in-place so we need to save off the list of primary packages first
- primary_packages = {key_from_req(ireq.req) for ireq in constraints if not ireq.constraint}
-
try:
resolver = Resolver(constraints, repository, prereleases=pre,
clear_caches=rebuild, allow_unsafe=allow_unsafe)
@@ -236,7 +233,7 @@
format_control=repository.finder.format_control)
writer.write(results=results,
reverse_dependencies=reverse_dependencies,
- primary_packages=primary_packages,
+ primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},
markers={key_from_req(ireq.req): ireq.markers
for ireq in constraints if ireq.markers},
hashes=hashes)
|
{"golden_diff": "diff --git a/piptools/scripts/compile.py b/piptools/scripts/compile.py\n--- a/piptools/scripts/compile.py\n+++ b/piptools/scripts/compile.py\n@@ -179,9 +179,6 @@\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n \n- # The requirement objects are modified in-place so we need to save off the list of primary packages first\n- primary_packages = {key_from_req(ireq.req) for ireq in constraints if not ireq.constraint}\n-\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n@@ -236,7 +233,7 @@\n format_control=repository.finder.format_control)\n writer.write(results=results,\n reverse_dependencies=reverse_dependencies,\n- primary_packages=primary_packages,\n+ primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n", "issue": "pip-compile fails if output file already exists\nGiven a requirements.in file which consists only of the line:\r\n```\r\n-e ..\r\n```\r\n(that is, pull in requirements from the setup.py file one folder up from my requirements.in file)\r\n\r\nThe first pip-compile run works fine. The second one throws an error. If I delete the file that was generated by the first run, it will work again. There are no permissions issues (tried it with full perms on the file to check).\r\n\r\n##### Environment Versions\r\n\r\n1. os: OSX 10.11.6\r\n1. python version: 2.7\r\n1. pip version: 9.0.1\r\n1. pip-tools version: 1.8.2\r\n\r\n##### Steps to replicate\r\n\r\n1. pip-compile -v -o requirements.txt\r\n**SUCCESS**\r\n2. pip-compile -v -o requirements.txt \r\n**FAILURE**\r\n\r\n##### Expected result\r\nAn updated (or identical) requirements.txt file\r\n\r\n\r\n##### Actual result\r\n```python\r\nTraceback (most recent call last):\r\n File \"/Users/<username>/.virtualenvs/<virtualenv>/bin/pip-compile\", line 11, in <module>\r\n sys.exit(cli())\r\n File \"/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/piptools/scripts/compile.py\", line 131, in cli\r\n key = key_from_req(ireq.req)\r\n File \"/Users/<username>/.virtualenvs/<virtualenv>/lib/python2.7/site-packages/piptools/utils.py\", line 41, in key_from_req\r\n key = req.name\r\nAttributeError: 'NoneType' object has no attribute 'name'\r\n```\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport optparse\nimport os\nimport sys\nimport tempfile\n\nimport pip\nfrom pip.req import InstallRequirement, parse_requirements\n\nfrom .. import click\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..repositories import LocalRequirementsRepository, PyPIRepository\nfrom ..resolver import Resolver\nfrom ..utils import (assert_compatible_pip_version, is_pinned_requirement,\n key_from_req, dedup)\nfrom ..writer import OutputWriter\n\n# Make sure we're using a compatible version of pip\nassert_compatible_pip_version()\n\nDEFAULT_REQUIREMENTS_FILE = 'requirements.in'\n\n\nclass PipCommand(pip.basecommand.Command):\n name = 'PipCommand'\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', is_flag=True, help=\"Show more output\")\[email protected]('-n', '--dry-run', is_flag=True, help=\"Only show what would happen, don't change anything\")\[email protected]('-p', '--pre', is_flag=True, default=None, help=\"Allow resolving to prereleases (default is not)\")\[email protected]('-r', '--rebuild', is_flag=True, help=\"Clear any caches upfront, rebuild from scratch\")\[email protected]('-f', '--find-links', multiple=True, help=\"Look for archives in this directory or on this HTML page\", envvar='PIP_FIND_LINKS') # noqa\[email protected]('-i', '--index-url', help=\"Change index URL (defaults to PyPI)\", envvar='PIP_INDEX_URL')\[email protected]('--extra-index-url', multiple=True, help=\"Add additional index URL to search\", envvar='PIP_EXTRA_INDEX_URL') # noqa\[email protected]('--client-cert', help=\"Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.\") # noqa\[email protected]('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',\n help=\"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS.\")\[email protected]('--header/--no-header', is_flag=True, default=True,\n help=\"Add header to generated file\")\[email protected]('--index/--no-index', is_flag=True, default=True,\n help=\"Add index URL to generated file\")\[email protected]('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,\n default=True, help=\"Add trusted host option to generated file\")\[email protected]('--annotate/--no-annotate', is_flag=True, default=True,\n help=\"Annotate results, indicating where dependencies come from\")\[email protected]('-U', '--upgrade', is_flag=True, default=False,\n help='Try to upgrade all dependencies to their latest versions')\[email protected]('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,\n help=\"Specify particular packages to upgrade.\")\[email protected]('-o', '--output-file', nargs=1, type=str, default=None,\n help=('Output file name. Required if more than one input file is given. '\n 'Will be derived from input file otherwise.'))\[email protected]('--allow-unsafe', is_flag=True, default=False,\n help=\"Pin packages considered unsafe: pip, setuptools & distribute\")\[email protected]('--generate-hashes', is_flag=True, default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\")\[email protected]('--max-rounds', default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\")\[email protected]('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))\ndef cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,\n client_cert, trusted_host, header, index, emit_trusted_host, annotate,\n upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,\n src_files, max_rounds):\n \"\"\"Compiles requirements.txt from requirements.in specs.\"\"\"\n log.verbose = verbose\n\n if len(src_files) == 0:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n if not output_file:\n output_file = 'requirements.txt'\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n\n if len(src_files) == 1 and src_files[0] == '-':\n if not output_file:\n raise click.BadParameter('--output-file is required if input is from stdin')\n\n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n\n if output_file:\n dst_file = output_file\n else:\n base_name, _, _ = src_files[0].rpartition('.')\n dst_file = base_name + '.txt'\n\n if upgrade and upgrade_packages:\n raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')\n\n ###\n # Setup\n ###\n\n pip_command = get_pip_command()\n\n pip_args = []\n if find_links:\n for link in find_links:\n pip_args.extend(['-f', link])\n if index_url:\n pip_args.extend(['-i', index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n pip_args.extend(['--extra-index-url', extra_index])\n if client_cert:\n pip_args.extend(['--client-cert', client_cert])\n if pre:\n pip_args.extend(['--pre'])\n if trusted_host:\n for host in trusted_host:\n pip_args.extend(['--trusted-host', host])\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n\n # Pre-parse the inline package upgrade specs: they should take precedence\n # over the stuff in the requirements files\n upgrade_packages = [InstallRequirement.from_line(pkg)\n for pkg in upgrade_packages]\n\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n if not (upgrade or upgrade_packages) and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}\n repository = LocalRequirementsRepository(existing_pins, repository)\n\n log.debug('Using indexes:')\n # remove duplicate index urls before processing\n repository.finder.index_urls = list(dedup(repository.finder.index_urls))\n for index_url in repository.finder.index_urls:\n log.debug(' {}'.format(index_url))\n\n if repository.finder.find_links:\n log.debug('')\n log.debug('Configuration:')\n for find_link in repository.finder.find_links:\n log.debug(' -f {}'.format(find_link))\n\n ###\n # Parsing/collecting initial requirements\n ###\n\n constraints = []\n for src_file in src_files:\n is_setup_file = os.path.basename(src_file) == 'setup.py'\n if is_setup_file or src_file == '-':\n # pip requires filenames and not files. Since we want to support\n # piping from stdin, we need to briefly save the input from stdin\n # to a temporary file and have pip read that. also used for\n # reading requirements from install_requires in setup.py.\n tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n if is_setup_file:\n from distutils.core import run_setup\n dist = run_setup(src_file)\n tmpfile.write('\\n'.join(dist.install_requires))\n else:\n tmpfile.write(sys.stdin.read())\n tmpfile.flush()\n constraints.extend(parse_requirements(\n tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))\n else:\n constraints.extend(parse_requirements(\n src_file, finder=repository.finder, session=repository.session, options=pip_options))\n\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n\n # The requirement objects are modified in-place so we need to save off the list of primary packages first\n primary_packages = {key_from_req(ireq.req) for ireq in constraints if not ireq.constraint}\n\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n results = resolver.resolve(max_rounds=max_rounds)\n if generate_hashes:\n hashes = resolver.resolve_hashes(results)\n else:\n hashes = None\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n log.debug('')\n\n ##\n # Output\n ##\n\n # Compute reverse dependency annotations statically, from the\n # dependency cache that the resolver has populated by now.\n #\n # TODO (1a): reverse deps for any editable package are lost\n # what SHOULD happen is that they are cached in memory, just\n # not persisted to disk!\n #\n # TODO (1b): perhaps it's easiest if the dependency cache has an API\n # that could take InstallRequirements directly, like:\n #\n # cache.set(ireq, ...)\n #\n # then, when ireq is editable, it would store in\n #\n # editables[egg_name][link_without_fragment] = deps\n # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}\n #\n # otherwise:\n #\n # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}\n #\n reverse_dependencies = None\n if annotate:\n reverse_dependencies = resolver.reverse_dependencies(results)\n\n writer = OutputWriter(src_files, dst_file, dry_run=dry_run,\n emit_header=header, emit_index=index,\n emit_trusted_host=emit_trusted_host,\n annotate=annotate,\n generate_hashes=generate_hashes,\n default_index_url=repository.DEFAULT_INDEX_URL,\n index_urls=repository.finder.index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n format_control=repository.finder.format_control)\n writer.write(results=results,\n reverse_dependencies=reverse_dependencies,\n primary_packages=primary_packages,\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n\n if dry_run:\n log.warning('Dry-run, so nothing updated.')\n\n\ndef get_pip_command():\n # Use pip's parser for pip.conf management and defaults.\n # General options (find_links, index_url, extra_index_url, trusted_host,\n # and pre) are defered to pip.\n pip_command = PipCommand()\n index_opts = pip.cmdoptions.make_option_group(\n pip.cmdoptions.index_group,\n pip_command.parser,\n )\n pip_command.parser.insert_option_group(0, index_opts)\n pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False))\n\n return pip_command\n", "path": "piptools/scripts/compile.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport optparse\nimport os\nimport sys\nimport tempfile\n\nimport pip\nfrom pip.req import InstallRequirement, parse_requirements\n\nfrom .. import click\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..repositories import LocalRequirementsRepository, PyPIRepository\nfrom ..resolver import Resolver\nfrom ..utils import (assert_compatible_pip_version, is_pinned_requirement,\n key_from_req, dedup)\nfrom ..writer import OutputWriter\n\n# Make sure we're using a compatible version of pip\nassert_compatible_pip_version()\n\nDEFAULT_REQUIREMENTS_FILE = 'requirements.in'\n\n\nclass PipCommand(pip.basecommand.Command):\n name = 'PipCommand'\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', is_flag=True, help=\"Show more output\")\[email protected]('-n', '--dry-run', is_flag=True, help=\"Only show what would happen, don't change anything\")\[email protected]('-p', '--pre', is_flag=True, default=None, help=\"Allow resolving to prereleases (default is not)\")\[email protected]('-r', '--rebuild', is_flag=True, help=\"Clear any caches upfront, rebuild from scratch\")\[email protected]('-f', '--find-links', multiple=True, help=\"Look for archives in this directory or on this HTML page\", envvar='PIP_FIND_LINKS') # noqa\[email protected]('-i', '--index-url', help=\"Change index URL (defaults to PyPI)\", envvar='PIP_INDEX_URL')\[email protected]('--extra-index-url', multiple=True, help=\"Add additional index URL to search\", envvar='PIP_EXTRA_INDEX_URL') # noqa\[email protected]('--client-cert', help=\"Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.\") # noqa\[email protected]('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',\n help=\"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS.\")\[email protected]('--header/--no-header', is_flag=True, default=True,\n help=\"Add header to generated file\")\[email protected]('--index/--no-index', is_flag=True, default=True,\n help=\"Add index URL to generated file\")\[email protected]('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,\n default=True, help=\"Add trusted host option to generated file\")\[email protected]('--annotate/--no-annotate', is_flag=True, default=True,\n help=\"Annotate results, indicating where dependencies come from\")\[email protected]('-U', '--upgrade', is_flag=True, default=False,\n help='Try to upgrade all dependencies to their latest versions')\[email protected]('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,\n help=\"Specify particular packages to upgrade.\")\[email protected]('-o', '--output-file', nargs=1, type=str, default=None,\n help=('Output file name. Required if more than one input file is given. '\n 'Will be derived from input file otherwise.'))\[email protected]('--allow-unsafe', is_flag=True, default=False,\n help=\"Pin packages considered unsafe: pip, setuptools & distribute\")\[email protected]('--generate-hashes', is_flag=True, default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\")\[email protected]('--max-rounds', default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\")\[email protected]('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))\ndef cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,\n client_cert, trusted_host, header, index, emit_trusted_host, annotate,\n upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,\n src_files, max_rounds):\n \"\"\"Compiles requirements.txt from requirements.in specs.\"\"\"\n log.verbose = verbose\n\n if len(src_files) == 0:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n if not output_file:\n output_file = 'requirements.txt'\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n\n if len(src_files) == 1 and src_files[0] == '-':\n if not output_file:\n raise click.BadParameter('--output-file is required if input is from stdin')\n\n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n\n if output_file:\n dst_file = output_file\n else:\n base_name, _, _ = src_files[0].rpartition('.')\n dst_file = base_name + '.txt'\n\n if upgrade and upgrade_packages:\n raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')\n\n ###\n # Setup\n ###\n\n pip_command = get_pip_command()\n\n pip_args = []\n if find_links:\n for link in find_links:\n pip_args.extend(['-f', link])\n if index_url:\n pip_args.extend(['-i', index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n pip_args.extend(['--extra-index-url', extra_index])\n if client_cert:\n pip_args.extend(['--client-cert', client_cert])\n if pre:\n pip_args.extend(['--pre'])\n if trusted_host:\n for host in trusted_host:\n pip_args.extend(['--trusted-host', host])\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n\n # Pre-parse the inline package upgrade specs: they should take precedence\n # over the stuff in the requirements files\n upgrade_packages = [InstallRequirement.from_line(pkg)\n for pkg in upgrade_packages]\n\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n if not (upgrade or upgrade_packages) and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}\n repository = LocalRequirementsRepository(existing_pins, repository)\n\n log.debug('Using indexes:')\n # remove duplicate index urls before processing\n repository.finder.index_urls = list(dedup(repository.finder.index_urls))\n for index_url in repository.finder.index_urls:\n log.debug(' {}'.format(index_url))\n\n if repository.finder.find_links:\n log.debug('')\n log.debug('Configuration:')\n for find_link in repository.finder.find_links:\n log.debug(' -f {}'.format(find_link))\n\n ###\n # Parsing/collecting initial requirements\n ###\n\n constraints = []\n for src_file in src_files:\n is_setup_file = os.path.basename(src_file) == 'setup.py'\n if is_setup_file or src_file == '-':\n # pip requires filenames and not files. Since we want to support\n # piping from stdin, we need to briefly save the input from stdin\n # to a temporary file and have pip read that. also used for\n # reading requirements from install_requires in setup.py.\n tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n if is_setup_file:\n from distutils.core import run_setup\n dist = run_setup(src_file)\n tmpfile.write('\\n'.join(dist.install_requires))\n else:\n tmpfile.write(sys.stdin.read())\n tmpfile.flush()\n constraints.extend(parse_requirements(\n tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))\n else:\n constraints.extend(parse_requirements(\n src_file, finder=repository.finder, session=repository.session, options=pip_options))\n\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n results = resolver.resolve(max_rounds=max_rounds)\n if generate_hashes:\n hashes = resolver.resolve_hashes(results)\n else:\n hashes = None\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n log.debug('')\n\n ##\n # Output\n ##\n\n # Compute reverse dependency annotations statically, from the\n # dependency cache that the resolver has populated by now.\n #\n # TODO (1a): reverse deps for any editable package are lost\n # what SHOULD happen is that they are cached in memory, just\n # not persisted to disk!\n #\n # TODO (1b): perhaps it's easiest if the dependency cache has an API\n # that could take InstallRequirements directly, like:\n #\n # cache.set(ireq, ...)\n #\n # then, when ireq is editable, it would store in\n #\n # editables[egg_name][link_without_fragment] = deps\n # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}\n #\n # otherwise:\n #\n # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}\n #\n reverse_dependencies = None\n if annotate:\n reverse_dependencies = resolver.reverse_dependencies(results)\n\n writer = OutputWriter(src_files, dst_file, dry_run=dry_run,\n emit_header=header, emit_index=index,\n emit_trusted_host=emit_trusted_host,\n annotate=annotate,\n generate_hashes=generate_hashes,\n default_index_url=repository.DEFAULT_INDEX_URL,\n index_urls=repository.finder.index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n format_control=repository.finder.format_control)\n writer.write(results=results,\n reverse_dependencies=reverse_dependencies,\n primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n\n if dry_run:\n log.warning('Dry-run, so nothing updated.')\n\n\ndef get_pip_command():\n # Use pip's parser for pip.conf management and defaults.\n # General options (find_links, index_url, extra_index_url, trusted_host,\n # and pre) are defered to pip.\n pip_command = PipCommand()\n index_opts = pip.cmdoptions.make_option_group(\n pip.cmdoptions.index_group,\n pip_command.parser,\n )\n pip_command.parser.insert_option_group(0, index_opts)\n pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False))\n\n return pip_command\n", "path": "piptools/scripts/compile.py"}]}
| 3,939 | 258 |
gh_patches_debug_24002
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-831
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_K8S_31 failure with RuntimeDefault configured for workloads
**Describe the bug**
Please see #710
**To Reproduce**
Please see #710
**Expected behavior**
Please see #710
**Additional context**
The bug reported in #710 needs to be fixed for workloads in https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/Seccomp.py#L44:L48 as well.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
....
spec:
...
...
template:
...
...
spec:
....
....
securityContext:
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
```
**Related PRs**
#711
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/kubernetes/checks/Seccomp.py`
Content:
```
1 import dpath
2
3 from checkov.common.models.enums import CheckCategories, CheckResult
4 from checkov.kubernetes.base_spec_check import BaseK8Check
5
6
7 class Seccomp(BaseK8Check):
8
9 def __init__(self):
10 # CIS-1.5 5.7.2
11 name = "Ensure that the seccomp profile is set to docker/default or runtime/default"
12 id = "CKV_K8S_31"
13 # Location: Pod.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod
14 # Location: CronJob.spec.jobTemplate.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod
15 # Location: *.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod
16 # Location: *.spec.securityContext.seccompProfile.type
17 supported_kind = ['Pod', 'Deployment', 'DaemonSet', 'StatefulSet', 'ReplicaSet', 'ReplicationController', 'Job', 'CronJob']
18 categories = [CheckCategories.KUBERNETES]
19 super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)
20
21 def get_resource_id(self, conf):
22 if "namespace" in conf["metadata"]:
23 return "{}.{}.{}".format(conf["kind"], conf["metadata"]["name"], conf["metadata"]["namespace"])
24 else:
25 return "{}.{}.default".format(conf["kind"], conf["metadata"]["name"])
26
27 def scan_spec_conf(self, conf):
28 metadata = {}
29
30 if conf['kind'] == 'Pod':
31 security_profile = dpath.search(conf, 'spec/securityContext/seccompProfile/type')
32 if security_profile:
33 security_profile = dpath.get(conf, 'spec/securityContext/seccompProfile/type')
34 return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED
35 if "metadata" in conf:
36 metadata = conf["metadata"]
37 elif conf['kind'] == 'CronJob':
38 if "spec" in conf:
39 if "jobTemplate" in conf["spec"]:
40 if "spec" in conf["spec"]["jobTemplate"]:
41 if "template" in conf["spec"]["jobTemplate"]["spec"]:
42 if "metadata" in conf["spec"]["jobTemplate"]["spec"]["template"]:
43 metadata = conf["spec"]["jobTemplate"]["spec"]["template"]["metadata"]
44 else:
45 if "spec" in conf:
46 if "template" in conf["spec"]:
47 if "metadata" in conf["spec"]["template"]:
48 metadata = conf["spec"]["template"]["metadata"]
49
50 if metadata:
51 if "annotations" in metadata and isinstance(metadata['annotations'], dict):
52 if "seccomp.security.alpha.kubernetes.io/pod" in metadata["annotations"]:
53 if ("docker/default" in metadata["annotations"]["seccomp.security.alpha.kubernetes.io/pod"] or
54 "runtime/default" in metadata["annotations"]["seccomp.security.alpha.kubernetes.io/pod"]):
55 return CheckResult.PASSED
56 return CheckResult.FAILED
57
58
59 check = Seccomp()
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/kubernetes/checks/Seccomp.py b/checkov/kubernetes/checks/Seccomp.py
--- a/checkov/kubernetes/checks/Seccomp.py
+++ b/checkov/kubernetes/checks/Seccomp.py
@@ -34,6 +34,20 @@
return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED
if "metadata" in conf:
metadata = conf["metadata"]
+ if conf['kind'] == 'Deployment':
+ security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')
+ if security_profile:
+ security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')
+ return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED
+ if "metadata" in conf:
+ metadata = conf["metadata"]
+ if conf['kind'] == 'StatefulSet':
+ security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')
+ if security_profile:
+ security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')
+ return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED
+ if "metadata" in conf:
+ metadata = conf["metadata"]
elif conf['kind'] == 'CronJob':
if "spec" in conf:
if "jobTemplate" in conf["spec"]:
|
{"golden_diff": "diff --git a/checkov/kubernetes/checks/Seccomp.py b/checkov/kubernetes/checks/Seccomp.py\n--- a/checkov/kubernetes/checks/Seccomp.py\n+++ b/checkov/kubernetes/checks/Seccomp.py\n@@ -34,6 +34,20 @@\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"]\n+ if conf['kind'] == 'Deployment':\n+ security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n+ if security_profile:\n+ security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n+ return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n+ if \"metadata\" in conf:\n+ metadata = conf[\"metadata\"]\n+ if conf['kind'] == 'StatefulSet':\n+ security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n+ if security_profile:\n+ security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n+ return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n+ if \"metadata\" in conf:\n+ metadata = conf[\"metadata\"] \n elif conf['kind'] == 'CronJob':\n if \"spec\" in conf:\n if \"jobTemplate\" in conf[\"spec\"]:\n", "issue": "CKV_K8S_31 failure with RuntimeDefault configured for workloads\n**Describe the bug**\r\nPlease see #710\r\n\r\n**To Reproduce**\r\nPlease see #710\r\n\r\n**Expected behavior**\r\nPlease see #710\r\n\r\n**Additional context**\r\nThe bug reported in #710 needs to be fixed for workloads in https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/Seccomp.py#L44:L48 as well.\r\n\r\n```yaml\r\napiVersion: apps/v1\r\nkind: Deployment\r\nmetadata:\r\n....\r\nspec:\r\n...\r\n...\r\n template:\r\n ...\r\n ...\r\n spec:\r\n ....\r\n ....\r\n securityContext:\r\n allowPrivilegeEscalation: false\r\n seccompProfile:\r\n type: RuntimeDefault\r\n```\r\n\r\n**Related PRs**\r\n#711 \r\n\n", "before_files": [{"content": "import dpath\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.kubernetes.base_spec_check import BaseK8Check\n\n\nclass Seccomp(BaseK8Check):\n\n def __init__(self):\n # CIS-1.5 5.7.2\n name = \"Ensure that the seccomp profile is set to docker/default or runtime/default\"\n id = \"CKV_K8S_31\"\n # Location: Pod.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: CronJob.spec.jobTemplate.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: *.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: *.spec.securityContext.seccompProfile.type\n supported_kind = ['Pod', 'Deployment', 'DaemonSet', 'StatefulSet', 'ReplicaSet', 'ReplicationController', 'Job', 'CronJob']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)\n\n def get_resource_id(self, conf):\n if \"namespace\" in conf[\"metadata\"]:\n return \"{}.{}.{}\".format(conf[\"kind\"], conf[\"metadata\"][\"name\"], conf[\"metadata\"][\"namespace\"])\n else:\n return \"{}.{}.default\".format(conf[\"kind\"], conf[\"metadata\"][\"name\"])\n\n def scan_spec_conf(self, conf):\n metadata = {}\n\n if conf['kind'] == 'Pod':\n security_profile = dpath.search(conf, 'spec/securityContext/seccompProfile/type')\n if security_profile:\n security_profile = dpath.get(conf, 'spec/securityContext/seccompProfile/type')\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"]\n elif conf['kind'] == 'CronJob':\n if \"spec\" in conf:\n if \"jobTemplate\" in conf[\"spec\"]:\n if \"spec\" in conf[\"spec\"][\"jobTemplate\"]:\n if \"template\" in conf[\"spec\"][\"jobTemplate\"][\"spec\"]:\n if \"metadata\" in conf[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"]:\n metadata = conf[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"][\"metadata\"]\n else:\n if \"spec\" in conf:\n if \"template\" in conf[\"spec\"]:\n if \"metadata\" in conf[\"spec\"][\"template\"]:\n metadata = conf[\"spec\"][\"template\"][\"metadata\"]\n\n if metadata:\n if \"annotations\" in metadata and isinstance(metadata['annotations'], dict):\n if \"seccomp.security.alpha.kubernetes.io/pod\" in metadata[\"annotations\"]:\n if (\"docker/default\" in metadata[\"annotations\"][\"seccomp.security.alpha.kubernetes.io/pod\"] or\n \"runtime/default\" in metadata[\"annotations\"][\"seccomp.security.alpha.kubernetes.io/pod\"]):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = Seccomp()\n", "path": "checkov/kubernetes/checks/Seccomp.py"}], "after_files": [{"content": "import dpath\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.kubernetes.base_spec_check import BaseK8Check\n\n\nclass Seccomp(BaseK8Check):\n\n def __init__(self):\n # CIS-1.5 5.7.2\n name = \"Ensure that the seccomp profile is set to docker/default or runtime/default\"\n id = \"CKV_K8S_31\"\n # Location: Pod.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: CronJob.spec.jobTemplate.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: *.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: *.spec.securityContext.seccompProfile.type\n supported_kind = ['Pod', 'Deployment', 'DaemonSet', 'StatefulSet', 'ReplicaSet', 'ReplicationController', 'Job', 'CronJob']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)\n\n def get_resource_id(self, conf):\n if \"namespace\" in conf[\"metadata\"]:\n return \"{}.{}.{}\".format(conf[\"kind\"], conf[\"metadata\"][\"name\"], conf[\"metadata\"][\"namespace\"])\n else:\n return \"{}.{}.default\".format(conf[\"kind\"], conf[\"metadata\"][\"name\"])\n\n def scan_spec_conf(self, conf):\n metadata = {}\n\n if conf['kind'] == 'Pod':\n security_profile = dpath.search(conf, 'spec/securityContext/seccompProfile/type')\n if security_profile:\n security_profile = dpath.get(conf, 'spec/securityContext/seccompProfile/type')\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"]\n if conf['kind'] == 'Deployment':\n security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n if security_profile:\n security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"]\n if conf['kind'] == 'StatefulSet':\n security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n if security_profile:\n security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"] \n elif conf['kind'] == 'CronJob':\n if \"spec\" in conf:\n if \"jobTemplate\" in conf[\"spec\"]:\n if \"spec\" in conf[\"spec\"][\"jobTemplate\"]:\n if \"template\" in conf[\"spec\"][\"jobTemplate\"][\"spec\"]:\n if \"metadata\" in conf[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"]:\n metadata = conf[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"][\"metadata\"]\n else:\n if \"spec\" in conf:\n if \"template\" in conf[\"spec\"]:\n if \"metadata\" in conf[\"spec\"][\"template\"]:\n metadata = conf[\"spec\"][\"template\"][\"metadata\"]\n\n if metadata:\n if \"annotations\" in metadata and isinstance(metadata['annotations'], dict):\n if \"seccomp.security.alpha.kubernetes.io/pod\" in metadata[\"annotations\"]:\n if (\"docker/default\" in metadata[\"annotations\"][\"seccomp.security.alpha.kubernetes.io/pod\"] or\n \"runtime/default\" in metadata[\"annotations\"][\"seccomp.security.alpha.kubernetes.io/pod\"]):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = Seccomp()\n", "path": "checkov/kubernetes/checks/Seccomp.py"}]}
| 1,215 | 338 |
gh_patches_debug_30738
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-506
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--associate-public-ip-address option with --security-group-ids
#501 #502
when I ran command with --subnet-id, it works fine but when I add --security-group-ids, it does not work.
It seems that same modifications are required.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/ec2runinstances.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 This customization adds two new parameters to the ``ec2 run-instance``
15 command. The first, ``--secondary-private-ip-addresses`` allows a list
16 of IP addresses within the specified subnet to be associated with the
17 new instance. The second, ``--secondary-ip-address-count`` allows you
18 to specify how many additional IP addresses you want but the actual
19 address will be assigned for you.
20
21 This functionality (and much more) is also available using the
22 ``--network-interfaces`` complex argument. This just makes two of
23 the most commonly used features available more easily.
24 """
25 from awscli.arguments import CustomArgument
26
27 # --secondary-private-ip-address
28 SECONDARY_PRIVATE_IP_ADDRESSES_DOCS = (
29 '[EC2-VPC] A secondary private IP address for the network interface '
30 'or instance. You can specify this multiple times to assign multiple '
31 'secondary IP addresses. If you want additional private IP addresses '
32 'but do not need a specific address, use the '
33 '--secondary-private-ip-address-count option.')
34
35 # --secondary-private-ip-address-count
36 SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = (
37 '[EC2-VPC] The number of secondary IP addresses to assign to '
38 'the network interface or instance.')
39
40 # --associate-public-ip-address
41 ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = (
42 '[EC2-VPC] If specified a public IP address will be assigned '
43 'to the new instance in a VPC.')
44
45 def _add_params(argument_table, operation, **kwargs):
46 arg = SecondaryPrivateIpAddressesArgument(
47 name='secondary-private-ip-addresses',
48 help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS)
49 argument_table['secondary-private-ip-addresses'] = arg
50 arg = SecondaryPrivateIpAddressCountArgument(
51 name='secondary-private-ip-address-count',
52 help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)
53 argument_table['secondary-private-ip-address-count'] = arg
54 arg = AssociatePublicIpAddressArgument(
55 name='associate-public-ip-address',
56 help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,
57 action='store_true', group_name='associate_public_ip')
58 argument_table['associate-public-ip-address'] = arg
59 arg = NoAssociatePublicIpAddressArgument(
60 name='no-associate-public-ip-address',
61 help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,
62 action='store_false', group_name='associate_public_ip')
63 argument_table['no-associate-public-ip-address'] = arg
64
65
66 def _check_args(parsed_args, **kwargs):
67 # This function checks the parsed args. If the user specified
68 # the --network-interfaces option with any of the scalar options we
69 # raise an error.
70 arg_dict = vars(parsed_args)
71 if arg_dict['network_interfaces']:
72 for key in ('secondary_private_ip_addresses',
73 'secondary_private_ip_address_count',
74 'associate_public_ip_address'):
75 if arg_dict[key]:
76 msg = ('Mixing the --network-interfaces option '
77 'with the simple, scalar options is '
78 'not supported.')
79 raise ValueError(msg)
80
81
82 def _fix_subnet(operation, endpoint, params, **kwargs):
83 # If the user has supplied a --subnet-id option AND we also
84 # have inserted an AssociatePublicIpAddress into the network_interfaces
85 # structure, we need to move the subnetId value down into the
86 # network_interfaces structure or we will get a client error from EC2.
87 if 'network_interfaces' in params:
88 ni = params['network_interfaces']
89 if 'AssociatePublicIpAddress' in ni[0]:
90 if 'subnet_id' in params:
91 ni[0]['SubnetId'] = params['subnet_id']
92 del params['subnet_id']
93
94 EVENTS = [
95 ('building-argument-table.ec2.run-instances', _add_params),
96 ('operation-args-parsed.ec2.run-instances', _check_args),
97 ('before-parameter-build.ec2.RunInstances', _fix_subnet),
98 ]
99
100
101 def register_runinstances(event_handler):
102 # Register all of the events for customizing BundleInstance
103 for event, handler in EVENTS:
104 event_handler.register(event, handler)
105
106
107 def _build_network_interfaces(params, key, value):
108 # Build up the NetworkInterfaces data structure
109 if 'network_interfaces' not in params:
110 params['network_interfaces'] = [{'DeviceIndex': 0}]
111
112 if key == 'PrivateIpAddresses':
113 if 'PrivateIpAddresses' not in params['network_interfaces'][0]:
114 params['network_interfaces'][0]['PrivateIpAddresses'] = value
115 else:
116 params['network_interfaces'][0][key] = value
117
118
119 class SecondaryPrivateIpAddressesArgument(CustomArgument):
120
121 def add_to_parser(self, parser, cli_name=None):
122 parser.add_argument(self.cli_name, dest=self.py_name,
123 default=self._default, nargs='*')
124
125 def add_to_params(self, parameters, value):
126 if value:
127 value = [{'PrivateIpAddress': v, 'Primary': False} for
128 v in value]
129 _build_network_interfaces(parameters,
130 'PrivateIpAddresses',
131 value)
132
133
134 class SecondaryPrivateIpAddressCountArgument(CustomArgument):
135
136 def add_to_parser(self, parser, cli_name=None):
137 parser.add_argument(self.cli_name, dest=self.py_name,
138 default=self._default, type=int)
139
140 def add_to_params(self, parameters, value):
141 if value:
142 _build_network_interfaces(parameters,
143 'SecondaryPrivateIpAddressCount',
144 value)
145
146
147 class AssociatePublicIpAddressArgument(CustomArgument):
148
149 def add_to_params(self, parameters, value):
150 if value is True:
151 _build_network_interfaces(parameters,
152 'AssociatePublicIpAddress',
153 value)
154
155
156 class NoAssociatePublicIpAddressArgument(CustomArgument):
157
158 def add_to_params(self, parameters, value):
159 if value is False:
160 _build_network_interfaces(parameters,
161 'AssociatePublicIpAddress',
162 value)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awscli/customizations/ec2runinstances.py b/awscli/customizations/ec2runinstances.py
--- a/awscli/customizations/ec2runinstances.py
+++ b/awscli/customizations/ec2runinstances.py
@@ -79,22 +79,33 @@
raise ValueError(msg)
-def _fix_subnet(operation, endpoint, params, **kwargs):
- # If the user has supplied a --subnet-id option AND we also
- # have inserted an AssociatePublicIpAddress into the network_interfaces
- # structure, we need to move the subnetId value down into the
- # network_interfaces structure or we will get a client error from EC2.
+def _fix_args(operation, endpoint, params, **kwargs):
+ # The RunInstances request provides some parameters
+ # such as --subnet-id and --security-group-id that can be specified
+ # as separate options only if the request DOES NOT include a
+ # NetworkInterfaces structure. In those cases, the values for
+ # these parameters must be specified inside the NetworkInterfaces
+ # structure. This function checks for those parameters
+ # and fixes them if necessary.
+ # NOTE: If the user is a default VPC customer, RunInstances
+ # allows them to specify the security group by name or by id.
+ # However, in this scenario we can only support id because
+ # we can't place a group name in the NetworkInterfaces structure.
if 'network_interfaces' in params:
ni = params['network_interfaces']
if 'AssociatePublicIpAddress' in ni[0]:
if 'subnet_id' in params:
ni[0]['SubnetId'] = params['subnet_id']
del params['subnet_id']
+ if 'security_group_ids' in params:
+ ni[0]['Groups'] = params['security_group_ids']
+ del params['security_group_ids']
+
EVENTS = [
('building-argument-table.ec2.run-instances', _add_params),
('operation-args-parsed.ec2.run-instances', _check_args),
- ('before-parameter-build.ec2.RunInstances', _fix_subnet),
+ ('before-parameter-build.ec2.RunInstances', _fix_args),
]
|
{"golden_diff": "diff --git a/awscli/customizations/ec2runinstances.py b/awscli/customizations/ec2runinstances.py\n--- a/awscli/customizations/ec2runinstances.py\n+++ b/awscli/customizations/ec2runinstances.py\n@@ -79,22 +79,33 @@\n raise ValueError(msg)\n \n \n-def _fix_subnet(operation, endpoint, params, **kwargs):\n- # If the user has supplied a --subnet-id option AND we also\n- # have inserted an AssociatePublicIpAddress into the network_interfaces\n- # structure, we need to move the subnetId value down into the\n- # network_interfaces structure or we will get a client error from EC2.\n+def _fix_args(operation, endpoint, params, **kwargs):\n+ # The RunInstances request provides some parameters\n+ # such as --subnet-id and --security-group-id that can be specified\n+ # as separate options only if the request DOES NOT include a\n+ # NetworkInterfaces structure. In those cases, the values for\n+ # these parameters must be specified inside the NetworkInterfaces\n+ # structure. This function checks for those parameters\n+ # and fixes them if necessary.\n+ # NOTE: If the user is a default VPC customer, RunInstances\n+ # allows them to specify the security group by name or by id.\n+ # However, in this scenario we can only support id because\n+ # we can't place a group name in the NetworkInterfaces structure.\n if 'network_interfaces' in params:\n ni = params['network_interfaces']\n if 'AssociatePublicIpAddress' in ni[0]:\n if 'subnet_id' in params:\n ni[0]['SubnetId'] = params['subnet_id']\n del params['subnet_id']\n+ if 'security_group_ids' in params:\n+ ni[0]['Groups'] = params['security_group_ids']\n+ del params['security_group_ids']\n+\n \n EVENTS = [\n ('building-argument-table.ec2.run-instances', _add_params),\n ('operation-args-parsed.ec2.run-instances', _check_args),\n- ('before-parameter-build.ec2.RunInstances', _fix_subnet),\n+ ('before-parameter-build.ec2.RunInstances', _fix_args),\n ]\n", "issue": "--associate-public-ip-address option with --security-group-ids\n#501 #502\n\nwhen I ran command with --subnet-id, it works fine but when I add --security-group-ids, it does not work.\nIt seems that same modifications are required.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds two new parameters to the ``ec2 run-instance``\ncommand. The first, ``--secondary-private-ip-addresses`` allows a list\nof IP addresses within the specified subnet to be associated with the\nnew instance. The second, ``--secondary-ip-address-count`` allows you\nto specify how many additional IP addresses you want but the actual\naddress will be assigned for you.\n\nThis functionality (and much more) is also available using the\n``--network-interfaces`` complex argument. This just makes two of\nthe most commonly used features available more easily.\n\"\"\"\nfrom awscli.arguments import CustomArgument\n\n# --secondary-private-ip-address\nSECONDARY_PRIVATE_IP_ADDRESSES_DOCS = (\n '[EC2-VPC] A secondary private IP address for the network interface '\n 'or instance. You can specify this multiple times to assign multiple '\n 'secondary IP addresses. If you want additional private IP addresses '\n 'but do not need a specific address, use the '\n '--secondary-private-ip-address-count option.')\n\n# --secondary-private-ip-address-count\nSECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = (\n '[EC2-VPC] The number of secondary IP addresses to assign to '\n 'the network interface or instance.')\n\n# --associate-public-ip-address\nASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = (\n '[EC2-VPC] If specified a public IP address will be assigned '\n 'to the new instance in a VPC.')\n\ndef _add_params(argument_table, operation, **kwargs):\n arg = SecondaryPrivateIpAddressesArgument(\n name='secondary-private-ip-addresses',\n help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS)\n argument_table['secondary-private-ip-addresses'] = arg\n arg = SecondaryPrivateIpAddressCountArgument(\n name='secondary-private-ip-address-count',\n help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)\n argument_table['secondary-private-ip-address-count'] = arg\n arg = AssociatePublicIpAddressArgument(\n name='associate-public-ip-address',\n help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n action='store_true', group_name='associate_public_ip')\n argument_table['associate-public-ip-address'] = arg\n arg = NoAssociatePublicIpAddressArgument(\n name='no-associate-public-ip-address',\n help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n action='store_false', group_name='associate_public_ip')\n argument_table['no-associate-public-ip-address'] = arg\n\n\ndef _check_args(parsed_args, **kwargs):\n # This function checks the parsed args. If the user specified\n # the --network-interfaces option with any of the scalar options we\n # raise an error.\n arg_dict = vars(parsed_args)\n if arg_dict['network_interfaces']:\n for key in ('secondary_private_ip_addresses',\n 'secondary_private_ip_address_count',\n 'associate_public_ip_address'):\n if arg_dict[key]:\n msg = ('Mixing the --network-interfaces option '\n 'with the simple, scalar options is '\n 'not supported.')\n raise ValueError(msg)\n\n\ndef _fix_subnet(operation, endpoint, params, **kwargs):\n # If the user has supplied a --subnet-id option AND we also\n # have inserted an AssociatePublicIpAddress into the network_interfaces\n # structure, we need to move the subnetId value down into the\n # network_interfaces structure or we will get a client error from EC2.\n if 'network_interfaces' in params:\n ni = params['network_interfaces']\n if 'AssociatePublicIpAddress' in ni[0]:\n if 'subnet_id' in params:\n ni[0]['SubnetId'] = params['subnet_id']\n del params['subnet_id']\n\nEVENTS = [\n ('building-argument-table.ec2.run-instances', _add_params),\n ('operation-args-parsed.ec2.run-instances', _check_args),\n ('before-parameter-build.ec2.RunInstances', _fix_subnet),\n ]\n\n\ndef register_runinstances(event_handler):\n # Register all of the events for customizing BundleInstance\n for event, handler in EVENTS:\n event_handler.register(event, handler)\n\n\ndef _build_network_interfaces(params, key, value):\n # Build up the NetworkInterfaces data structure\n if 'network_interfaces' not in params:\n params['network_interfaces'] = [{'DeviceIndex': 0}]\n\n if key == 'PrivateIpAddresses':\n if 'PrivateIpAddresses' not in params['network_interfaces'][0]:\n params['network_interfaces'][0]['PrivateIpAddresses'] = value\n else:\n params['network_interfaces'][0][key] = value\n\n\nclass SecondaryPrivateIpAddressesArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, nargs='*')\n\n def add_to_params(self, parameters, value):\n if value:\n value = [{'PrivateIpAddress': v, 'Primary': False} for\n v in value]\n _build_network_interfaces(parameters,\n 'PrivateIpAddresses',\n value)\n\n\nclass SecondaryPrivateIpAddressCountArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, type=int)\n\n def add_to_params(self, parameters, value):\n if value:\n _build_network_interfaces(parameters,\n 'SecondaryPrivateIpAddressCount',\n value)\n\n\nclass AssociatePublicIpAddressArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value is True:\n _build_network_interfaces(parameters,\n 'AssociatePublicIpAddress',\n value)\n\n\nclass NoAssociatePublicIpAddressArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value is False:\n _build_network_interfaces(parameters,\n 'AssociatePublicIpAddress',\n value)\n", "path": "awscli/customizations/ec2runinstances.py"}], "after_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds two new parameters to the ``ec2 run-instance``\ncommand. The first, ``--secondary-private-ip-addresses`` allows a list\nof IP addresses within the specified subnet to be associated with the\nnew instance. The second, ``--secondary-ip-address-count`` allows you\nto specify how many additional IP addresses you want but the actual\naddress will be assigned for you.\n\nThis functionality (and much more) is also available using the\n``--network-interfaces`` complex argument. This just makes two of\nthe most commonly used features available more easily.\n\"\"\"\nfrom awscli.arguments import CustomArgument\n\n# --secondary-private-ip-address\nSECONDARY_PRIVATE_IP_ADDRESSES_DOCS = (\n '[EC2-VPC] A secondary private IP address for the network interface '\n 'or instance. You can specify this multiple times to assign multiple '\n 'secondary IP addresses. If you want additional private IP addresses '\n 'but do not need a specific address, use the '\n '--secondary-private-ip-address-count option.')\n\n# --secondary-private-ip-address-count\nSECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = (\n '[EC2-VPC] The number of secondary IP addresses to assign to '\n 'the network interface or instance.')\n\n# --associate-public-ip-address\nASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = (\n '[EC2-VPC] If specified a public IP address will be assigned '\n 'to the new instance in a VPC.')\n\ndef _add_params(argument_table, operation, **kwargs):\n arg = SecondaryPrivateIpAddressesArgument(\n name='secondary-private-ip-addresses',\n help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS)\n argument_table['secondary-private-ip-addresses'] = arg\n arg = SecondaryPrivateIpAddressCountArgument(\n name='secondary-private-ip-address-count',\n help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)\n argument_table['secondary-private-ip-address-count'] = arg\n arg = AssociatePublicIpAddressArgument(\n name='associate-public-ip-address',\n help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n action='store_true', group_name='associate_public_ip')\n argument_table['associate-public-ip-address'] = arg\n arg = NoAssociatePublicIpAddressArgument(\n name='no-associate-public-ip-address',\n help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n action='store_false', group_name='associate_public_ip')\n argument_table['no-associate-public-ip-address'] = arg\n\n\ndef _check_args(parsed_args, **kwargs):\n # This function checks the parsed args. If the user specified\n # the --network-interfaces option with any of the scalar options we\n # raise an error.\n arg_dict = vars(parsed_args)\n if arg_dict['network_interfaces']:\n for key in ('secondary_private_ip_addresses',\n 'secondary_private_ip_address_count',\n 'associate_public_ip_address'):\n if arg_dict[key]:\n msg = ('Mixing the --network-interfaces option '\n 'with the simple, scalar options is '\n 'not supported.')\n raise ValueError(msg)\n\n\ndef _fix_args(operation, endpoint, params, **kwargs):\n # The RunInstances request provides some parameters\n # such as --subnet-id and --security-group-id that can be specified\n # as separate options only if the request DOES NOT include a\n # NetworkInterfaces structure. In those cases, the values for\n # these parameters must be specified inside the NetworkInterfaces\n # structure. This function checks for those parameters\n # and fixes them if necessary.\n # NOTE: If the user is a default VPC customer, RunInstances\n # allows them to specify the security group by name or by id.\n # However, in this scenario we can only support id because\n # we can't place a group name in the NetworkInterfaces structure.\n if 'network_interfaces' in params:\n ni = params['network_interfaces']\n if 'AssociatePublicIpAddress' in ni[0]:\n if 'subnet_id' in params:\n ni[0]['SubnetId'] = params['subnet_id']\n del params['subnet_id']\n if 'security_group_ids' in params:\n ni[0]['Groups'] = params['security_group_ids']\n del params['security_group_ids']\n\n\nEVENTS = [\n ('building-argument-table.ec2.run-instances', _add_params),\n ('operation-args-parsed.ec2.run-instances', _check_args),\n ('before-parameter-build.ec2.RunInstances', _fix_args),\n ]\n\n\ndef register_runinstances(event_handler):\n # Register all of the events for customizing BundleInstance\n for event, handler in EVENTS:\n event_handler.register(event, handler)\n\n\ndef _build_network_interfaces(params, key, value):\n # Build up the NetworkInterfaces data structure\n if 'network_interfaces' not in params:\n params['network_interfaces'] = [{'DeviceIndex': 0}]\n\n if key == 'PrivateIpAddresses':\n if 'PrivateIpAddresses' not in params['network_interfaces'][0]:\n params['network_interfaces'][0]['PrivateIpAddresses'] = value\n else:\n params['network_interfaces'][0][key] = value\n\n\nclass SecondaryPrivateIpAddressesArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, nargs='*')\n\n def add_to_params(self, parameters, value):\n if value:\n value = [{'PrivateIpAddress': v, 'Primary': False} for\n v in value]\n _build_network_interfaces(parameters,\n 'PrivateIpAddresses',\n value)\n\n\nclass SecondaryPrivateIpAddressCountArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, type=int)\n\n def add_to_params(self, parameters, value):\n if value:\n _build_network_interfaces(parameters,\n 'SecondaryPrivateIpAddressCount',\n value)\n\n\nclass AssociatePublicIpAddressArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value is True:\n _build_network_interfaces(parameters,\n 'AssociatePublicIpAddress',\n value)\n\n\nclass NoAssociatePublicIpAddressArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value is False:\n _build_network_interfaces(parameters,\n 'AssociatePublicIpAddress',\n value)\n", "path": "awscli/customizations/ec2runinstances.py"}]}
| 2,095 | 485 |
gh_patches_debug_17707
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-974
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RecursionError with custom node
In a custom Node, I try to initialize with `CtrlNode.__init__(self, name, terminals=terminals)`, but I get a RecursionError:
``` python
Traceback (most recent call last):
File "/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Flowchart.py", line 871, in nodeMenuTriggered
self.chart.createNode(nodeType, pos=pos)
File "/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Flowchart.py", line 174, in createNode
node = self.library.getNodeType(nodeType)(name)
File "/data/libs.git/JML/python/TBA/nodes.py", line 37, in __init__
CtrlNode.__init__(self, name, terminals=terminals)
File "/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/library/common.py", line 89, in __init__
if hasattr(self, 'uiTemplate'):
File "/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Node.py", line 193, in __getattr__
if attr not in self.terminals:
File "/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Node.py", line 193, in __getattr__
if attr not in self.terminals:
(...)
File "/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Node.py", line 193, in __getattr__
if attr not in self.terminals:
RecursionError: maximum recursion depth exceeded while calling a Python object
```
The problem is that `__getattr__` checks for `self.terminals` which is not yet defined, so `__getattr__` is called again and so on.
I think putting the `if ui is None:` block after `Node.__init__` in `CtrlNode.__init__` would do the trick.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/flowchart/library/common.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from ...Qt import QtCore, QtGui
3 from ...widgets.SpinBox import SpinBox
4 #from ...SignalProxy import SignalProxy
5 from ...WidgetGroup import WidgetGroup
6 #from ColorMapper import ColorMapper
7 from ..Node import Node
8 import numpy as np
9 from ...widgets.ColorButton import ColorButton
10 try:
11 import metaarray
12 HAVE_METAARRAY = True
13 except:
14 HAVE_METAARRAY = False
15
16
17 def generateUi(opts):
18 """Convenience function for generating common UI types"""
19 widget = QtGui.QWidget()
20 l = QtGui.QFormLayout()
21 l.setSpacing(0)
22 widget.setLayout(l)
23 ctrls = {}
24 row = 0
25 for opt in opts:
26 if len(opt) == 2:
27 k, t = opt
28 o = {}
29 elif len(opt) == 3:
30 k, t, o = opt
31 else:
32 raise Exception("Widget specification must be (name, type) or (name, type, {opts})")
33
34 ## clean out these options so they don't get sent to SpinBox
35 hidden = o.pop('hidden', False)
36 tip = o.pop('tip', None)
37
38 if t == 'intSpin':
39 w = QtGui.QSpinBox()
40 if 'max' in o:
41 w.setMaximum(o['max'])
42 if 'min' in o:
43 w.setMinimum(o['min'])
44 if 'value' in o:
45 w.setValue(o['value'])
46 elif t == 'doubleSpin':
47 w = QtGui.QDoubleSpinBox()
48 if 'max' in o:
49 w.setMaximum(o['max'])
50 if 'min' in o:
51 w.setMinimum(o['min'])
52 if 'value' in o:
53 w.setValue(o['value'])
54 elif t == 'spin':
55 w = SpinBox()
56 w.setOpts(**o)
57 elif t == 'check':
58 w = QtGui.QCheckBox()
59 if 'checked' in o:
60 w.setChecked(o['checked'])
61 elif t == 'combo':
62 w = QtGui.QComboBox()
63 for i in o['values']:
64 w.addItem(i)
65 #elif t == 'colormap':
66 #w = ColorMapper()
67 elif t == 'color':
68 w = ColorButton()
69 else:
70 raise Exception("Unknown widget type '%s'" % str(t))
71
72 if tip is not None:
73 w.setToolTip(tip)
74 w.setObjectName(k)
75 l.addRow(k, w)
76 if hidden:
77 w.hide()
78 label = l.labelForField(w)
79 label.hide()
80
81 ctrls[k] = w
82 w.rowNum = row
83 row += 1
84 group = WidgetGroup(widget)
85 return widget, group, ctrls
86
87
88 class CtrlNode(Node):
89 """Abstract class for nodes with auto-generated control UI"""
90
91 sigStateChanged = QtCore.Signal(object)
92
93 def __init__(self, name, ui=None, terminals=None):
94 if ui is None:
95 if hasattr(self, 'uiTemplate'):
96 ui = self.uiTemplate
97 else:
98 ui = []
99 if terminals is None:
100 terminals = {'In': {'io': 'in'}, 'Out': {'io': 'out', 'bypass': 'In'}}
101 Node.__init__(self, name=name, terminals=terminals)
102
103 self.ui, self.stateGroup, self.ctrls = generateUi(ui)
104 self.stateGroup.sigChanged.connect(self.changed)
105
106 def ctrlWidget(self):
107 return self.ui
108
109 def changed(self):
110 self.update()
111 self.sigStateChanged.emit(self)
112
113 def process(self, In, display=True):
114 out = self.processData(In)
115 return {'Out': out}
116
117 def saveState(self):
118 state = Node.saveState(self)
119 state['ctrl'] = self.stateGroup.state()
120 return state
121
122 def restoreState(self, state):
123 Node.restoreState(self, state)
124 if self.stateGroup is not None:
125 self.stateGroup.setState(state.get('ctrl', {}))
126
127 def hideRow(self, name):
128 w = self.ctrls[name]
129 l = self.ui.layout().labelForField(w)
130 w.hide()
131 l.hide()
132
133 def showRow(self, name):
134 w = self.ctrls[name]
135 l = self.ui.layout().labelForField(w)
136 w.show()
137 l.show()
138
139
140 class PlottingCtrlNode(CtrlNode):
141 """Abstract class for CtrlNodes that can connect to plots."""
142
143 def __init__(self, name, ui=None, terminals=None):
144 #print "PlottingCtrlNode.__init__ called."
145 CtrlNode.__init__(self, name, ui=ui, terminals=terminals)
146 self.plotTerminal = self.addOutput('plot', optional=True)
147
148 def connected(self, term, remote):
149 CtrlNode.connected(self, term, remote)
150 if term is not self.plotTerminal:
151 return
152 node = remote.node()
153 node.sigPlotChanged.connect(self.connectToPlot)
154 self.connectToPlot(node)
155
156 def disconnected(self, term, remote):
157 CtrlNode.disconnected(self, term, remote)
158 if term is not self.plotTerminal:
159 return
160 remote.node().sigPlotChanged.disconnect(self.connectToPlot)
161 self.disconnectFromPlot(remote.node().getPlot())
162
163 def connectToPlot(self, node):
164 """Define what happens when the node is connected to a plot"""
165 raise Exception("Must be re-implemented in subclass")
166
167 def disconnectFromPlot(self, plot):
168 """Define what happens when the node is disconnected from a plot"""
169 raise Exception("Must be re-implemented in subclass")
170
171 def process(self, In, display=True):
172 out = CtrlNode.process(self, In, display)
173 out['plot'] = None
174 return out
175
176
177 def metaArrayWrapper(fn):
178 def newFn(self, data, *args, **kargs):
179 if HAVE_METAARRAY and (hasattr(data, 'implements') and data.implements('MetaArray')):
180 d1 = fn(self, data.view(np.ndarray), *args, **kargs)
181 info = data.infoCopy()
182 if d1.shape != data.shape:
183 for i in range(data.ndim):
184 if 'values' in info[i]:
185 info[i]['values'] = info[i]['values'][:d1.shape[i]]
186 return metaarray.MetaArray(d1, info=info)
187 else:
188 return fn(self, data, *args, **kargs)
189 return newFn
190
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/flowchart/library/common.py b/pyqtgraph/flowchart/library/common.py
--- a/pyqtgraph/flowchart/library/common.py
+++ b/pyqtgraph/flowchart/library/common.py
@@ -91,14 +91,15 @@
sigStateChanged = QtCore.Signal(object)
def __init__(self, name, ui=None, terminals=None):
+ if terminals is None:
+ terminals = {'In': {'io': 'in'}, 'Out': {'io': 'out', 'bypass': 'In'}}
+ Node.__init__(self, name=name, terminals=terminals)
+
if ui is None:
if hasattr(self, 'uiTemplate'):
ui = self.uiTemplate
else:
ui = []
- if terminals is None:
- terminals = {'In': {'io': 'in'}, 'Out': {'io': 'out', 'bypass': 'In'}}
- Node.__init__(self, name=name, terminals=terminals)
self.ui, self.stateGroup, self.ctrls = generateUi(ui)
self.stateGroup.sigChanged.connect(self.changed)
|
{"golden_diff": "diff --git a/pyqtgraph/flowchart/library/common.py b/pyqtgraph/flowchart/library/common.py\n--- a/pyqtgraph/flowchart/library/common.py\n+++ b/pyqtgraph/flowchart/library/common.py\n@@ -91,14 +91,15 @@\n sigStateChanged = QtCore.Signal(object)\n \n def __init__(self, name, ui=None, terminals=None):\n+ if terminals is None:\n+ terminals = {'In': {'io': 'in'}, 'Out': {'io': 'out', 'bypass': 'In'}}\n+ Node.__init__(self, name=name, terminals=terminals)\n+ \n if ui is None:\n if hasattr(self, 'uiTemplate'):\n ui = self.uiTemplate\n else:\n ui = []\n- if terminals is None:\n- terminals = {'In': {'io': 'in'}, 'Out': {'io': 'out', 'bypass': 'In'}}\n- Node.__init__(self, name=name, terminals=terminals)\n \n self.ui, self.stateGroup, self.ctrls = generateUi(ui)\n self.stateGroup.sigChanged.connect(self.changed)\n", "issue": "RecursionError with custom node\nIn a custom Node, I try to initialize with `CtrlNode.__init__(self, name, terminals=terminals)`, but I get a RecursionError:\n\n``` python\nTraceback (most recent call last):\n File \"/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Flowchart.py\", line 871, in nodeMenuTriggered\n self.chart.createNode(nodeType, pos=pos)\n File \"/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Flowchart.py\", line 174, in createNode\n node = self.library.getNodeType(nodeType)(name)\n File \"/data/libs.git/JML/python/TBA/nodes.py\", line 37, in __init__\n CtrlNode.__init__(self, name, terminals=terminals)\n File \"/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/library/common.py\", line 89, in __init__\n if hasattr(self, 'uiTemplate'):\n File \"/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Node.py\", line 193, in __getattr__\n if attr not in self.terminals:\n File \"/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Node.py\", line 193, in __getattr__\n if attr not in self.terminals:\n(...)\n File \"/usr/lib/python3.5/site-packages/pyqtgraph/flowchart/Node.py\", line 193, in __getattr__\n if attr not in self.terminals:\nRecursionError: maximum recursion depth exceeded while calling a Python object\n```\n\nThe problem is that `__getattr__` checks for `self.terminals` which is not yet defined, so `__getattr__` is called again and so on.\n\nI think putting the `if ui is None:` block after `Node.__init__` in `CtrlNode.__init__` would do the trick.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom ...Qt import QtCore, QtGui\nfrom ...widgets.SpinBox import SpinBox\n#from ...SignalProxy import SignalProxy\nfrom ...WidgetGroup import WidgetGroup\n#from ColorMapper import ColorMapper\nfrom ..Node import Node\nimport numpy as np\nfrom ...widgets.ColorButton import ColorButton\ntry:\n import metaarray\n HAVE_METAARRAY = True\nexcept:\n HAVE_METAARRAY = False\n\n\ndef generateUi(opts):\n \"\"\"Convenience function for generating common UI types\"\"\"\n widget = QtGui.QWidget()\n l = QtGui.QFormLayout()\n l.setSpacing(0)\n widget.setLayout(l)\n ctrls = {}\n row = 0\n for opt in opts:\n if len(opt) == 2:\n k, t = opt\n o = {}\n elif len(opt) == 3:\n k, t, o = opt\n else:\n raise Exception(\"Widget specification must be (name, type) or (name, type, {opts})\")\n \n ## clean out these options so they don't get sent to SpinBox\n hidden = o.pop('hidden', False)\n tip = o.pop('tip', None)\n\n if t == 'intSpin':\n w = QtGui.QSpinBox()\n if 'max' in o:\n w.setMaximum(o['max'])\n if 'min' in o:\n w.setMinimum(o['min'])\n if 'value' in o:\n w.setValue(o['value'])\n elif t == 'doubleSpin':\n w = QtGui.QDoubleSpinBox()\n if 'max' in o:\n w.setMaximum(o['max'])\n if 'min' in o:\n w.setMinimum(o['min']) \n if 'value' in o:\n w.setValue(o['value'])\n elif t == 'spin':\n w = SpinBox()\n w.setOpts(**o)\n elif t == 'check':\n w = QtGui.QCheckBox()\n if 'checked' in o:\n w.setChecked(o['checked'])\n elif t == 'combo':\n w = QtGui.QComboBox()\n for i in o['values']:\n w.addItem(i)\n #elif t == 'colormap':\n #w = ColorMapper()\n elif t == 'color':\n w = ColorButton()\n else:\n raise Exception(\"Unknown widget type '%s'\" % str(t))\n\n if tip is not None:\n w.setToolTip(tip)\n w.setObjectName(k)\n l.addRow(k, w)\n if hidden:\n w.hide()\n label = l.labelForField(w)\n label.hide()\n \n ctrls[k] = w\n w.rowNum = row\n row += 1\n group = WidgetGroup(widget)\n return widget, group, ctrls\n\n\nclass CtrlNode(Node):\n \"\"\"Abstract class for nodes with auto-generated control UI\"\"\"\n \n sigStateChanged = QtCore.Signal(object)\n \n def __init__(self, name, ui=None, terminals=None):\n if ui is None:\n if hasattr(self, 'uiTemplate'):\n ui = self.uiTemplate\n else:\n ui = []\n if terminals is None:\n terminals = {'In': {'io': 'in'}, 'Out': {'io': 'out', 'bypass': 'In'}}\n Node.__init__(self, name=name, terminals=terminals)\n \n self.ui, self.stateGroup, self.ctrls = generateUi(ui)\n self.stateGroup.sigChanged.connect(self.changed)\n \n def ctrlWidget(self):\n return self.ui\n \n def changed(self):\n self.update()\n self.sigStateChanged.emit(self)\n\n def process(self, In, display=True):\n out = self.processData(In)\n return {'Out': out}\n \n def saveState(self):\n state = Node.saveState(self)\n state['ctrl'] = self.stateGroup.state()\n return state\n \n def restoreState(self, state):\n Node.restoreState(self, state)\n if self.stateGroup is not None:\n self.stateGroup.setState(state.get('ctrl', {}))\n \n def hideRow(self, name):\n w = self.ctrls[name]\n l = self.ui.layout().labelForField(w)\n w.hide()\n l.hide()\n \n def showRow(self, name):\n w = self.ctrls[name]\n l = self.ui.layout().labelForField(w)\n w.show()\n l.show()\n\n\nclass PlottingCtrlNode(CtrlNode):\n \"\"\"Abstract class for CtrlNodes that can connect to plots.\"\"\"\n \n def __init__(self, name, ui=None, terminals=None):\n #print \"PlottingCtrlNode.__init__ called.\"\n CtrlNode.__init__(self, name, ui=ui, terminals=terminals)\n self.plotTerminal = self.addOutput('plot', optional=True)\n \n def connected(self, term, remote):\n CtrlNode.connected(self, term, remote)\n if term is not self.plotTerminal:\n return\n node = remote.node()\n node.sigPlotChanged.connect(self.connectToPlot)\n self.connectToPlot(node) \n \n def disconnected(self, term, remote):\n CtrlNode.disconnected(self, term, remote)\n if term is not self.plotTerminal:\n return\n remote.node().sigPlotChanged.disconnect(self.connectToPlot)\n self.disconnectFromPlot(remote.node().getPlot()) \n \n def connectToPlot(self, node):\n \"\"\"Define what happens when the node is connected to a plot\"\"\"\n raise Exception(\"Must be re-implemented in subclass\")\n \n def disconnectFromPlot(self, plot):\n \"\"\"Define what happens when the node is disconnected from a plot\"\"\"\n raise Exception(\"Must be re-implemented in subclass\")\n\n def process(self, In, display=True):\n out = CtrlNode.process(self, In, display)\n out['plot'] = None\n return out\n\n\ndef metaArrayWrapper(fn):\n def newFn(self, data, *args, **kargs):\n if HAVE_METAARRAY and (hasattr(data, 'implements') and data.implements('MetaArray')):\n d1 = fn(self, data.view(np.ndarray), *args, **kargs)\n info = data.infoCopy()\n if d1.shape != data.shape:\n for i in range(data.ndim):\n if 'values' in info[i]:\n info[i]['values'] = info[i]['values'][:d1.shape[i]]\n return metaarray.MetaArray(d1, info=info)\n else:\n return fn(self, data, *args, **kargs)\n return newFn\n\n", "path": "pyqtgraph/flowchart/library/common.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom ...Qt import QtCore, QtGui\nfrom ...widgets.SpinBox import SpinBox\n#from ...SignalProxy import SignalProxy\nfrom ...WidgetGroup import WidgetGroup\n#from ColorMapper import ColorMapper\nfrom ..Node import Node\nimport numpy as np\nfrom ...widgets.ColorButton import ColorButton\ntry:\n import metaarray\n HAVE_METAARRAY = True\nexcept:\n HAVE_METAARRAY = False\n\n\ndef generateUi(opts):\n \"\"\"Convenience function for generating common UI types\"\"\"\n widget = QtGui.QWidget()\n l = QtGui.QFormLayout()\n l.setSpacing(0)\n widget.setLayout(l)\n ctrls = {}\n row = 0\n for opt in opts:\n if len(opt) == 2:\n k, t = opt\n o = {}\n elif len(opt) == 3:\n k, t, o = opt\n else:\n raise Exception(\"Widget specification must be (name, type) or (name, type, {opts})\")\n \n ## clean out these options so they don't get sent to SpinBox\n hidden = o.pop('hidden', False)\n tip = o.pop('tip', None)\n\n if t == 'intSpin':\n w = QtGui.QSpinBox()\n if 'max' in o:\n w.setMaximum(o['max'])\n if 'min' in o:\n w.setMinimum(o['min'])\n if 'value' in o:\n w.setValue(o['value'])\n elif t == 'doubleSpin':\n w = QtGui.QDoubleSpinBox()\n if 'max' in o:\n w.setMaximum(o['max'])\n if 'min' in o:\n w.setMinimum(o['min']) \n if 'value' in o:\n w.setValue(o['value'])\n elif t == 'spin':\n w = SpinBox()\n w.setOpts(**o)\n elif t == 'check':\n w = QtGui.QCheckBox()\n if 'checked' in o:\n w.setChecked(o['checked'])\n elif t == 'combo':\n w = QtGui.QComboBox()\n for i in o['values']:\n w.addItem(i)\n #elif t == 'colormap':\n #w = ColorMapper()\n elif t == 'color':\n w = ColorButton()\n else:\n raise Exception(\"Unknown widget type '%s'\" % str(t))\n\n if tip is not None:\n w.setToolTip(tip)\n w.setObjectName(k)\n l.addRow(k, w)\n if hidden:\n w.hide()\n label = l.labelForField(w)\n label.hide()\n \n ctrls[k] = w\n w.rowNum = row\n row += 1\n group = WidgetGroup(widget)\n return widget, group, ctrls\n\n\nclass CtrlNode(Node):\n \"\"\"Abstract class for nodes with auto-generated control UI\"\"\"\n \n sigStateChanged = QtCore.Signal(object)\n \n def __init__(self, name, ui=None, terminals=None):\n if terminals is None:\n terminals = {'In': {'io': 'in'}, 'Out': {'io': 'out', 'bypass': 'In'}}\n Node.__init__(self, name=name, terminals=terminals)\n \n if ui is None:\n if hasattr(self, 'uiTemplate'):\n ui = self.uiTemplate\n else:\n ui = []\n \n self.ui, self.stateGroup, self.ctrls = generateUi(ui)\n self.stateGroup.sigChanged.connect(self.changed)\n \n def ctrlWidget(self):\n return self.ui\n \n def changed(self):\n self.update()\n self.sigStateChanged.emit(self)\n\n def process(self, In, display=True):\n out = self.processData(In)\n return {'Out': out}\n \n def saveState(self):\n state = Node.saveState(self)\n state['ctrl'] = self.stateGroup.state()\n return state\n \n def restoreState(self, state):\n Node.restoreState(self, state)\n if self.stateGroup is not None:\n self.stateGroup.setState(state.get('ctrl', {}))\n \n def hideRow(self, name):\n w = self.ctrls[name]\n l = self.ui.layout().labelForField(w)\n w.hide()\n l.hide()\n \n def showRow(self, name):\n w = self.ctrls[name]\n l = self.ui.layout().labelForField(w)\n w.show()\n l.show()\n\n\nclass PlottingCtrlNode(CtrlNode):\n \"\"\"Abstract class for CtrlNodes that can connect to plots.\"\"\"\n \n def __init__(self, name, ui=None, terminals=None):\n #print \"PlottingCtrlNode.__init__ called.\"\n CtrlNode.__init__(self, name, ui=ui, terminals=terminals)\n self.plotTerminal = self.addOutput('plot', optional=True)\n \n def connected(self, term, remote):\n CtrlNode.connected(self, term, remote)\n if term is not self.plotTerminal:\n return\n node = remote.node()\n node.sigPlotChanged.connect(self.connectToPlot)\n self.connectToPlot(node) \n \n def disconnected(self, term, remote):\n CtrlNode.disconnected(self, term, remote)\n if term is not self.plotTerminal:\n return\n remote.node().sigPlotChanged.disconnect(self.connectToPlot)\n self.disconnectFromPlot(remote.node().getPlot()) \n \n def connectToPlot(self, node):\n \"\"\"Define what happens when the node is connected to a plot\"\"\"\n raise Exception(\"Must be re-implemented in subclass\")\n \n def disconnectFromPlot(self, plot):\n \"\"\"Define what happens when the node is disconnected from a plot\"\"\"\n raise Exception(\"Must be re-implemented in subclass\")\n\n def process(self, In, display=True):\n out = CtrlNode.process(self, In, display)\n out['plot'] = None\n return out\n\n\ndef metaArrayWrapper(fn):\n def newFn(self, data, *args, **kargs):\n if HAVE_METAARRAY and (hasattr(data, 'implements') and data.implements('MetaArray')):\n d1 = fn(self, data.view(np.ndarray), *args, **kargs)\n info = data.infoCopy()\n if d1.shape != data.shape:\n for i in range(data.ndim):\n if 'values' in info[i]:\n info[i]['values'] = info[i]['values'][:d1.shape[i]]\n return metaarray.MetaArray(d1, info=info)\n else:\n return fn(self, data, *args, **kargs)\n return newFn\n\n", "path": "pyqtgraph/flowchart/library/common.py"}]}
| 2,544 | 248 |
gh_patches_debug_36921
|
rasdani/github-patches
|
git_diff
|
dmlc__dgl-5147
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Sparse] Improve the efficiency of multiplication between SparseMatrix and DiagMatrix.
## 🔨Work Item
**IMPORTANT:**
* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.
* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.
Project tracker: https://github.com/orgs/dmlc/projects/2
## Description
<!-- short description of the work item -->
## Depending work items or issues
<!-- what must be done before this -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/dgl/sparse/matmul.py`
Content:
```
1 """Matmul ops for SparseMatrix"""
2 # pylint: disable=invalid-name
3 from typing import Union
4
5 import torch
6
7 from .diag_matrix import diag, DiagMatrix
8
9 from .sparse_matrix import SparseMatrix
10
11 __all__ = ["spmm", "bspmm", "spspmm", "mm"]
12
13
14 def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
15 """Multiply a sparse matrix by a dense matrix.
16
17 Parameters
18 ----------
19 A : SparseMatrix or DiagMatrix
20 Sparse matrix of shape (N, M) with values of shape (nnz)
21 X : torch.Tensor
22 Dense tensor of shape (M, F) or (M)
23
24 Returns
25 -------
26 torch.Tensor
27 The multiplication result of shape (N, F) or (N)
28
29 Examples
30 --------
31
32 >>> row = torch.tensor([0, 1, 1])
33 >>> col = torch.tensor([1, 0, 1])
34 >>> val = torch.randn(len(row))
35 >>> A = from_coo(row, col, val)
36 >>> X = torch.randn(2, 3)
37 >>> result = dgl.sparse.spmm(A, X)
38 >>> print(type(result))
39 <class 'torch.Tensor'>
40 >>> print(result.shape)
41 torch.Size([2, 3])
42 """
43 assert isinstance(
44 A, (SparseMatrix, DiagMatrix)
45 ), f"Expect arg1 to be a SparseMatrix or DiagMatrix object, got {type(A)}"
46 assert isinstance(
47 X, torch.Tensor
48 ), f"Expect arg2 to be a torch.Tensor, got {type(X)}"
49
50 # The input is a DiagMatrix. Cast it to SparseMatrix
51 if not isinstance(A, SparseMatrix):
52 A = A.as_sparse()
53 return torch.ops.dgl_sparse.spmm(A.c_sparse_matrix, X)
54
55
56 def bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:
57 """Multiply a sparse matrix by a dense matrix by batches.
58
59 Parameters
60 ----------
61 A : SparseMatrix or DiagMatrix
62 Sparse matrix of shape (N, M, B) with values of shape (nnz)
63 X : torch.Tensor
64 Dense tensor of shape (M, F, B)
65
66 Returns
67 -------
68 torch.Tensor
69 The multiplication result of shape (N, F, B)
70
71 Examples
72 --------
73
74 >>> row = torch.tensor([0, 1, 1])
75 >>> col = torch.tensor([1, 0, 2])
76 >>> val = torch.randn(len(row), 2)
77 >>> A = from_coo(row, col, val, shape=(3, 3))
78 >>> X = torch.randn(3, 3, 2)
79 >>> result = dgl.sparse.bspmm(A, X)
80 >>> print(type(result))
81 <class 'torch.Tensor'>
82 >>> print(result.shape)
83 torch.Size([3, 3, 2])
84 """
85 assert isinstance(
86 A, (SparseMatrix, DiagMatrix)
87 ), f"Expect arg1 to be a SparseMatrix or DiagMatrix object, got {type(A)}"
88 assert isinstance(
89 X, torch.Tensor
90 ), f"Expect arg2 to be a torch.Tensor, got {type(X)}"
91 return spmm(A, X)
92
93
94 def _diag_diag_mm(A1: DiagMatrix, A2: DiagMatrix) -> DiagMatrix:
95 """Internal function for multiplying a diagonal matrix by a diagonal matrix
96
97 Parameters
98 ----------
99 A1 : DiagMatrix
100 Matrix of shape (N, M), with values of shape (nnz1)
101 A2 : DiagMatrix
102 Matrix of shape (M, P), with values of shape (nnz2)
103
104 Returns
105 -------
106 DiagMatrix
107 The result of multiplication.
108 """
109 M, N = A1.shape
110 N, P = A2.shape
111 common_diag_len = min(M, N, P)
112 new_diag_len = min(M, P)
113 diag_val = torch.zeros(new_diag_len)
114 diag_val[:common_diag_len] = (
115 A1.val[:common_diag_len] * A2.val[:common_diag_len]
116 )
117 return diag(diag_val.to(A1.device), (M, P))
118
119
120 def spspmm(
121 A1: Union[SparseMatrix, DiagMatrix], A2: Union[SparseMatrix, DiagMatrix]
122 ) -> Union[SparseMatrix, DiagMatrix]:
123 """Multiply a sparse matrix by a sparse matrix. The non-zero values of the
124 two sparse matrices must be 1D.
125
126 Parameters
127 ----------
128 A1 : SparseMatrix or DiagMatrix
129 Sparse matrix of shape (N, M) with values of shape (nnz)
130 A2 : SparseMatrix or DiagMatrix
131 Sparse matrix of shape (M, P) with values of shape (nnz)
132
133 Returns
134 -------
135 SparseMatrix or DiagMatrix
136 The result of multiplication. It is a DiagMatrix object if both matrices
137 are DiagMatrix objects. It is a SparseMatrix object otherwise.
138
139 Examples
140 --------
141
142 >>> row1 = torch.tensor([0, 1, 1])
143 >>> col1 = torch.tensor([1, 0, 1])
144 >>> val1 = torch.ones(len(row1))
145 >>> A1 = from_coo(row1, col1, val1)
146
147 >>> row2 = torch.tensor([0, 1, 1])
148 >>> col2 = torch.tensor([0, 2, 1])
149 >>> val2 = torch.ones(len(row2))
150 >>> A2 = from_coo(row2, col2, val2)
151 >>> result = dgl.sparse.spspmm(A1, A2)
152 >>> print(result)
153 SparseMatrix(indices=tensor([[0, 0, 1, 1, 1],
154 [1, 2, 0, 1, 2]]),
155 values=tensor([1., 1., 1., 1., 1.]),
156 shape=(2, 3), nnz=5)
157 """
158 assert isinstance(
159 A1, (SparseMatrix, DiagMatrix)
160 ), f"Expect A1 to be a SparseMatrix or DiagMatrix object, got {type(A1)}"
161 assert isinstance(
162 A2, (SparseMatrix, DiagMatrix)
163 ), f"Expect A2 to be a SparseMatrix or DiagMatrix object, got {type(A2)}"
164
165 if isinstance(A1, DiagMatrix) and isinstance(A2, DiagMatrix):
166 return _diag_diag_mm(A1, A2)
167 if isinstance(A1, DiagMatrix):
168 A1 = A1.as_sparse()
169 if isinstance(A2, DiagMatrix):
170 A2 = A2.as_sparse()
171 return SparseMatrix(
172 torch.ops.dgl_sparse.spspmm(A1.c_sparse_matrix, A2.c_sparse_matrix)
173 )
174
175
176 def mm(
177 A1: Union[SparseMatrix, DiagMatrix],
178 A2: Union[torch.Tensor, SparseMatrix, DiagMatrix],
179 ) -> Union[torch.Tensor, SparseMatrix, DiagMatrix]:
180 """Multiply a sparse/diagonal matrix by a dense/sparse/diagonal matrix.
181 If an input is a SparseMatrix or DiagMatrix, its non-zero values should
182 be 1-D.
183
184 Parameters
185 ----------
186 A1 : SparseMatrix or DiagMatrix
187 Matrix of shape (N, M), with values of shape (nnz1)
188 A2 : torch.Tensor, SparseMatrix, or DiagMatrix
189 Matrix of shape (M, P). If it is a SparseMatrix or DiagMatrix,
190 it should have values of shape (nnz2).
191
192 Returns
193 -------
194 torch.Tensor or DiagMatrix or SparseMatrix
195 The result of multiplication of shape (N, P)
196
197 * It is a dense torch tensor if :attr:`A2` is so.
198 * It is a DiagMatrix object if both :attr:`A1` and :attr:`A2` are so.
199 * It is a SparseMatrix object otherwise.
200
201 Examples
202 --------
203
204 >>> val = torch.randn(3)
205 >>> A1 = diag(val)
206 >>> A2 = torch.randn(3, 2)
207 >>> result = dgl.sparse.mm(A1, A2)
208 >>> print(type(result))
209 <class 'torch.Tensor'>
210 >>> print(result.shape)
211 torch.Size([3, 2])
212 """
213 assert isinstance(
214 A1, (SparseMatrix, DiagMatrix)
215 ), f"Expect arg1 to be a SparseMatrix, or DiagMatrix object, got {type(A1)}."
216 assert isinstance(A2, (torch.Tensor, SparseMatrix, DiagMatrix)), (
217 f"Expect arg2 to be a torch Tensor, SparseMatrix, or DiagMatrix"
218 f"object, got {type(A2)}."
219 )
220 if isinstance(A2, torch.Tensor):
221 return spmm(A1, A2)
222 if isinstance(A1, DiagMatrix) and isinstance(A2, DiagMatrix):
223 return _diag_diag_mm(A1, A2)
224 return spspmm(A1, A2)
225
226
227 SparseMatrix.__matmul__ = mm
228 DiagMatrix.__matmul__ = mm
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/dgl/sparse/matmul.py b/python/dgl/sparse/matmul.py
--- a/python/dgl/sparse/matmul.py
+++ b/python/dgl/sparse/matmul.py
@@ -6,7 +6,7 @@
from .diag_matrix import diag, DiagMatrix
-from .sparse_matrix import SparseMatrix
+from .sparse_matrix import SparseMatrix, val_like
__all__ = ["spmm", "bspmm", "spspmm", "mm"]
@@ -117,6 +117,62 @@
return diag(diag_val.to(A1.device), (M, P))
+def _sparse_diag_mm(A, D):
+ """Internal function for multiplying a sparse matrix by a diagonal matrix.
+
+ Parameters
+ ----------
+ A : SparseMatrix
+ Matrix of shape (N, M), with values of shape (nnz1)
+ D : DiagMatrix
+ Matrix of shape (M, P), with values of shape (nnz2)
+
+ Returns
+ -------
+ SparseMatrix
+ SparseMatrix with shape (N, P)
+ """
+ assert (
+ A.shape[1] == D.shape[0]
+ ), f"The second dimension of SparseMatrix should be equal to the first \
+ dimension of DiagMatrix in matmul(SparseMatrix, DiagMatrix), but the \
+ shapes of SparseMatrix and DiagMatrix are {A.shape} and {D.shape} \
+ respectively."
+ assert (
+ D.shape[0] == D.shape[1]
+ ), f"The DiagMatrix should be a square in matmul(SparseMatrix, DiagMatrix) \
+ but got {D.shape}"
+ return val_like(A, D.val[A.col] * A.val)
+
+
+def _diag_sparse_mm(D, A):
+ """Internal function for multiplying a diag matrix by a sparse matrix.
+
+ Parameters
+ ----------
+ D : DiagMatrix
+ Matrix of shape (N, M), with values of shape (nnz1)
+ A : DiagMatrix
+ Matrix of shape (M, P), with values of shape (nnz2)
+
+ Returns
+ -------
+ SparseMatrix
+ SparseMatrix with shape (N, P)
+ """
+ assert (
+ D.shape[1] == A.shape[0]
+ ), f"The second dimension of DiagMatrix should be equal to the first \
+ dimension of SparseMatrix in matmul(DiagMatrix, SparseMatrix), but the \
+ shapes of DiagMatrix and SparseMatrix are {D.shape} and {A.shape} \
+ respectively."
+ assert (
+ D.shape[0] == D.shape[1]
+ ), f"The DiagMatrix should be a square in matmul(DiagMatrix, SparseMatrix) \
+ but got {D.shape}"
+ return val_like(A, D.val[A.row] * A.val)
+
+
def spspmm(
A1: Union[SparseMatrix, DiagMatrix], A2: Union[SparseMatrix, DiagMatrix]
) -> Union[SparseMatrix, DiagMatrix]:
@@ -165,9 +221,9 @@
if isinstance(A1, DiagMatrix) and isinstance(A2, DiagMatrix):
return _diag_diag_mm(A1, A2)
if isinstance(A1, DiagMatrix):
- A1 = A1.as_sparse()
+ return _diag_sparse_mm(A1, A2)
if isinstance(A2, DiagMatrix):
- A2 = A2.as_sparse()
+ return _sparse_diag_mm(A1, A2)
return SparseMatrix(
torch.ops.dgl_sparse.spspmm(A1.c_sparse_matrix, A2.c_sparse_matrix)
)
|
{"golden_diff": "diff --git a/python/dgl/sparse/matmul.py b/python/dgl/sparse/matmul.py\n--- a/python/dgl/sparse/matmul.py\n+++ b/python/dgl/sparse/matmul.py\n@@ -6,7 +6,7 @@\n \n from .diag_matrix import diag, DiagMatrix\n \n-from .sparse_matrix import SparseMatrix\n+from .sparse_matrix import SparseMatrix, val_like\n \n __all__ = [\"spmm\", \"bspmm\", \"spspmm\", \"mm\"]\n \n@@ -117,6 +117,62 @@\n return diag(diag_val.to(A1.device), (M, P))\n \n \n+def _sparse_diag_mm(A, D):\n+ \"\"\"Internal function for multiplying a sparse matrix by a diagonal matrix.\n+\n+ Parameters\n+ ----------\n+ A : SparseMatrix\n+ Matrix of shape (N, M), with values of shape (nnz1)\n+ D : DiagMatrix\n+ Matrix of shape (M, P), with values of shape (nnz2)\n+\n+ Returns\n+ -------\n+ SparseMatrix\n+ SparseMatrix with shape (N, P)\n+ \"\"\"\n+ assert (\n+ A.shape[1] == D.shape[0]\n+ ), f\"The second dimension of SparseMatrix should be equal to the first \\\n+ dimension of DiagMatrix in matmul(SparseMatrix, DiagMatrix), but the \\\n+ shapes of SparseMatrix and DiagMatrix are {A.shape} and {D.shape} \\\n+ respectively.\"\n+ assert (\n+ D.shape[0] == D.shape[1]\n+ ), f\"The DiagMatrix should be a square in matmul(SparseMatrix, DiagMatrix) \\\n+ but got {D.shape}\"\n+ return val_like(A, D.val[A.col] * A.val)\n+\n+\n+def _diag_sparse_mm(D, A):\n+ \"\"\"Internal function for multiplying a diag matrix by a sparse matrix.\n+\n+ Parameters\n+ ----------\n+ D : DiagMatrix\n+ Matrix of shape (N, M), with values of shape (nnz1)\n+ A : DiagMatrix\n+ Matrix of shape (M, P), with values of shape (nnz2)\n+\n+ Returns\n+ -------\n+ SparseMatrix\n+ SparseMatrix with shape (N, P)\n+ \"\"\"\n+ assert (\n+ D.shape[1] == A.shape[0]\n+ ), f\"The second dimension of DiagMatrix should be equal to the first \\\n+ dimension of SparseMatrix in matmul(DiagMatrix, SparseMatrix), but the \\\n+ shapes of DiagMatrix and SparseMatrix are {D.shape} and {A.shape} \\\n+ respectively.\"\n+ assert (\n+ D.shape[0] == D.shape[1]\n+ ), f\"The DiagMatrix should be a square in matmul(DiagMatrix, SparseMatrix) \\\n+ but got {D.shape}\"\n+ return val_like(A, D.val[A.row] * A.val)\n+\n+\n def spspmm(\n A1: Union[SparseMatrix, DiagMatrix], A2: Union[SparseMatrix, DiagMatrix]\n ) -> Union[SparseMatrix, DiagMatrix]:\n@@ -165,9 +221,9 @@\n if isinstance(A1, DiagMatrix) and isinstance(A2, DiagMatrix):\n return _diag_diag_mm(A1, A2)\n if isinstance(A1, DiagMatrix):\n- A1 = A1.as_sparse()\n+ return _diag_sparse_mm(A1, A2)\n if isinstance(A2, DiagMatrix):\n- A2 = A2.as_sparse()\n+ return _sparse_diag_mm(A1, A2)\n return SparseMatrix(\n torch.ops.dgl_sparse.spspmm(A1.c_sparse_matrix, A2.c_sparse_matrix)\n )\n", "issue": "[Sparse] Improve the efficiency of multiplication between SparseMatrix and DiagMatrix.\n## \ud83d\udd28Work Item\r\n\r\n**IMPORTANT:**\r\n* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.\r\n* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.\r\n\r\nProject tracker: https://github.com/orgs/dmlc/projects/2\r\n\r\n## Description\r\n\r\n<!-- short description of the work item -->\r\n\r\n## Depending work items or issues\r\n\r\n<!-- what must be done before this -->\r\n\n", "before_files": [{"content": "\"\"\"Matmul ops for SparseMatrix\"\"\"\n# pylint: disable=invalid-name\nfrom typing import Union\n\nimport torch\n\nfrom .diag_matrix import diag, DiagMatrix\n\nfrom .sparse_matrix import SparseMatrix\n\n__all__ = [\"spmm\", \"bspmm\", \"spspmm\", \"mm\"]\n\n\ndef spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:\n \"\"\"Multiply a sparse matrix by a dense matrix.\n\n Parameters\n ----------\n A : SparseMatrix or DiagMatrix\n Sparse matrix of shape (N, M) with values of shape (nnz)\n X : torch.Tensor\n Dense tensor of shape (M, F) or (M)\n\n Returns\n -------\n torch.Tensor\n The multiplication result of shape (N, F) or (N)\n\n Examples\n --------\n\n >>> row = torch.tensor([0, 1, 1])\n >>> col = torch.tensor([1, 0, 1])\n >>> val = torch.randn(len(row))\n >>> A = from_coo(row, col, val)\n >>> X = torch.randn(2, 3)\n >>> result = dgl.sparse.spmm(A, X)\n >>> print(type(result))\n <class 'torch.Tensor'>\n >>> print(result.shape)\n torch.Size([2, 3])\n \"\"\"\n assert isinstance(\n A, (SparseMatrix, DiagMatrix)\n ), f\"Expect arg1 to be a SparseMatrix or DiagMatrix object, got {type(A)}\"\n assert isinstance(\n X, torch.Tensor\n ), f\"Expect arg2 to be a torch.Tensor, got {type(X)}\"\n\n # The input is a DiagMatrix. Cast it to SparseMatrix\n if not isinstance(A, SparseMatrix):\n A = A.as_sparse()\n return torch.ops.dgl_sparse.spmm(A.c_sparse_matrix, X)\n\n\ndef bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:\n \"\"\"Multiply a sparse matrix by a dense matrix by batches.\n\n Parameters\n ----------\n A : SparseMatrix or DiagMatrix\n Sparse matrix of shape (N, M, B) with values of shape (nnz)\n X : torch.Tensor\n Dense tensor of shape (M, F, B)\n\n Returns\n -------\n torch.Tensor\n The multiplication result of shape (N, F, B)\n\n Examples\n --------\n\n >>> row = torch.tensor([0, 1, 1])\n >>> col = torch.tensor([1, 0, 2])\n >>> val = torch.randn(len(row), 2)\n >>> A = from_coo(row, col, val, shape=(3, 3))\n >>> X = torch.randn(3, 3, 2)\n >>> result = dgl.sparse.bspmm(A, X)\n >>> print(type(result))\n <class 'torch.Tensor'>\n >>> print(result.shape)\n torch.Size([3, 3, 2])\n \"\"\"\n assert isinstance(\n A, (SparseMatrix, DiagMatrix)\n ), f\"Expect arg1 to be a SparseMatrix or DiagMatrix object, got {type(A)}\"\n assert isinstance(\n X, torch.Tensor\n ), f\"Expect arg2 to be a torch.Tensor, got {type(X)}\"\n return spmm(A, X)\n\n\ndef _diag_diag_mm(A1: DiagMatrix, A2: DiagMatrix) -> DiagMatrix:\n \"\"\"Internal function for multiplying a diagonal matrix by a diagonal matrix\n\n Parameters\n ----------\n A1 : DiagMatrix\n Matrix of shape (N, M), with values of shape (nnz1)\n A2 : DiagMatrix\n Matrix of shape (M, P), with values of shape (nnz2)\n\n Returns\n -------\n DiagMatrix\n The result of multiplication.\n \"\"\"\n M, N = A1.shape\n N, P = A2.shape\n common_diag_len = min(M, N, P)\n new_diag_len = min(M, P)\n diag_val = torch.zeros(new_diag_len)\n diag_val[:common_diag_len] = (\n A1.val[:common_diag_len] * A2.val[:common_diag_len]\n )\n return diag(diag_val.to(A1.device), (M, P))\n\n\ndef spspmm(\n A1: Union[SparseMatrix, DiagMatrix], A2: Union[SparseMatrix, DiagMatrix]\n) -> Union[SparseMatrix, DiagMatrix]:\n \"\"\"Multiply a sparse matrix by a sparse matrix. The non-zero values of the\n two sparse matrices must be 1D.\n\n Parameters\n ----------\n A1 : SparseMatrix or DiagMatrix\n Sparse matrix of shape (N, M) with values of shape (nnz)\n A2 : SparseMatrix or DiagMatrix\n Sparse matrix of shape (M, P) with values of shape (nnz)\n\n Returns\n -------\n SparseMatrix or DiagMatrix\n The result of multiplication. It is a DiagMatrix object if both matrices\n are DiagMatrix objects. It is a SparseMatrix object otherwise.\n\n Examples\n --------\n\n >>> row1 = torch.tensor([0, 1, 1])\n >>> col1 = torch.tensor([1, 0, 1])\n >>> val1 = torch.ones(len(row1))\n >>> A1 = from_coo(row1, col1, val1)\n\n >>> row2 = torch.tensor([0, 1, 1])\n >>> col2 = torch.tensor([0, 2, 1])\n >>> val2 = torch.ones(len(row2))\n >>> A2 = from_coo(row2, col2, val2)\n >>> result = dgl.sparse.spspmm(A1, A2)\n >>> print(result)\n SparseMatrix(indices=tensor([[0, 0, 1, 1, 1],\n [1, 2, 0, 1, 2]]),\n values=tensor([1., 1., 1., 1., 1.]),\n shape=(2, 3), nnz=5)\n \"\"\"\n assert isinstance(\n A1, (SparseMatrix, DiagMatrix)\n ), f\"Expect A1 to be a SparseMatrix or DiagMatrix object, got {type(A1)}\"\n assert isinstance(\n A2, (SparseMatrix, DiagMatrix)\n ), f\"Expect A2 to be a SparseMatrix or DiagMatrix object, got {type(A2)}\"\n\n if isinstance(A1, DiagMatrix) and isinstance(A2, DiagMatrix):\n return _diag_diag_mm(A1, A2)\n if isinstance(A1, DiagMatrix):\n A1 = A1.as_sparse()\n if isinstance(A2, DiagMatrix):\n A2 = A2.as_sparse()\n return SparseMatrix(\n torch.ops.dgl_sparse.spspmm(A1.c_sparse_matrix, A2.c_sparse_matrix)\n )\n\n\ndef mm(\n A1: Union[SparseMatrix, DiagMatrix],\n A2: Union[torch.Tensor, SparseMatrix, DiagMatrix],\n) -> Union[torch.Tensor, SparseMatrix, DiagMatrix]:\n \"\"\"Multiply a sparse/diagonal matrix by a dense/sparse/diagonal matrix.\n If an input is a SparseMatrix or DiagMatrix, its non-zero values should\n be 1-D.\n\n Parameters\n ----------\n A1 : SparseMatrix or DiagMatrix\n Matrix of shape (N, M), with values of shape (nnz1)\n A2 : torch.Tensor, SparseMatrix, or DiagMatrix\n Matrix of shape (M, P). If it is a SparseMatrix or DiagMatrix,\n it should have values of shape (nnz2).\n\n Returns\n -------\n torch.Tensor or DiagMatrix or SparseMatrix\n The result of multiplication of shape (N, P)\n\n * It is a dense torch tensor if :attr:`A2` is so.\n * It is a DiagMatrix object if both :attr:`A1` and :attr:`A2` are so.\n * It is a SparseMatrix object otherwise.\n\n Examples\n --------\n\n >>> val = torch.randn(3)\n >>> A1 = diag(val)\n >>> A2 = torch.randn(3, 2)\n >>> result = dgl.sparse.mm(A1, A2)\n >>> print(type(result))\n <class 'torch.Tensor'>\n >>> print(result.shape)\n torch.Size([3, 2])\n \"\"\"\n assert isinstance(\n A1, (SparseMatrix, DiagMatrix)\n ), f\"Expect arg1 to be a SparseMatrix, or DiagMatrix object, got {type(A1)}.\"\n assert isinstance(A2, (torch.Tensor, SparseMatrix, DiagMatrix)), (\n f\"Expect arg2 to be a torch Tensor, SparseMatrix, or DiagMatrix\"\n f\"object, got {type(A2)}.\"\n )\n if isinstance(A2, torch.Tensor):\n return spmm(A1, A2)\n if isinstance(A1, DiagMatrix) and isinstance(A2, DiagMatrix):\n return _diag_diag_mm(A1, A2)\n return spspmm(A1, A2)\n\n\nSparseMatrix.__matmul__ = mm\nDiagMatrix.__matmul__ = mm\n", "path": "python/dgl/sparse/matmul.py"}], "after_files": [{"content": "\"\"\"Matmul ops for SparseMatrix\"\"\"\n# pylint: disable=invalid-name\nfrom typing import Union\n\nimport torch\n\nfrom .diag_matrix import diag, DiagMatrix\n\nfrom .sparse_matrix import SparseMatrix, val_like\n\n__all__ = [\"spmm\", \"bspmm\", \"spspmm\", \"mm\"]\n\n\ndef spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:\n \"\"\"Multiply a sparse matrix by a dense matrix.\n\n Parameters\n ----------\n A : SparseMatrix or DiagMatrix\n Sparse matrix of shape (N, M) with values of shape (nnz)\n X : torch.Tensor\n Dense tensor of shape (M, F) or (M)\n\n Returns\n -------\n torch.Tensor\n The multiplication result of shape (N, F) or (N)\n\n Examples\n --------\n\n >>> row = torch.tensor([0, 1, 1])\n >>> col = torch.tensor([1, 0, 1])\n >>> val = torch.randn(len(row))\n >>> A = from_coo(row, col, val)\n >>> X = torch.randn(2, 3)\n >>> result = dgl.sparse.spmm(A, X)\n >>> print(type(result))\n <class 'torch.Tensor'>\n >>> print(result.shape)\n torch.Size([2, 3])\n \"\"\"\n assert isinstance(\n A, (SparseMatrix, DiagMatrix)\n ), f\"Expect arg1 to be a SparseMatrix or DiagMatrix object, got {type(A)}\"\n assert isinstance(\n X, torch.Tensor\n ), f\"Expect arg2 to be a torch.Tensor, got {type(X)}\"\n\n # The input is a DiagMatrix. Cast it to SparseMatrix\n if not isinstance(A, SparseMatrix):\n A = A.as_sparse()\n return torch.ops.dgl_sparse.spmm(A.c_sparse_matrix, X)\n\n\ndef bspmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:\n \"\"\"Multiply a sparse matrix by a dense matrix by batches.\n\n Parameters\n ----------\n A : SparseMatrix or DiagMatrix\n Sparse matrix of shape (N, M, B) with values of shape (nnz)\n X : torch.Tensor\n Dense tensor of shape (M, F, B)\n\n Returns\n -------\n torch.Tensor\n The multiplication result of shape (N, F, B)\n\n Examples\n --------\n\n >>> row = torch.tensor([0, 1, 1])\n >>> col = torch.tensor([1, 0, 2])\n >>> val = torch.randn(len(row), 2)\n >>> A = from_coo(row, col, val, shape=(3, 3))\n >>> X = torch.randn(3, 3, 2)\n >>> result = dgl.sparse.bspmm(A, X)\n >>> print(type(result))\n <class 'torch.Tensor'>\n >>> print(result.shape)\n torch.Size([3, 3, 2])\n \"\"\"\n assert isinstance(\n A, (SparseMatrix, DiagMatrix)\n ), f\"Expect arg1 to be a SparseMatrix or DiagMatrix object, got {type(A)}\"\n assert isinstance(\n X, torch.Tensor\n ), f\"Expect arg2 to be a torch.Tensor, got {type(X)}\"\n return spmm(A, X)\n\n\ndef _diag_diag_mm(A1: DiagMatrix, A2: DiagMatrix) -> DiagMatrix:\n \"\"\"Internal function for multiplying a diagonal matrix by a diagonal matrix\n\n Parameters\n ----------\n A1 : DiagMatrix\n Matrix of shape (N, M), with values of shape (nnz1)\n A2 : DiagMatrix\n Matrix of shape (M, P), with values of shape (nnz2)\n\n Returns\n -------\n DiagMatrix\n The result of multiplication.\n \"\"\"\n M, N = A1.shape\n N, P = A2.shape\n common_diag_len = min(M, N, P)\n new_diag_len = min(M, P)\n diag_val = torch.zeros(new_diag_len)\n diag_val[:common_diag_len] = (\n A1.val[:common_diag_len] * A2.val[:common_diag_len]\n )\n return diag(diag_val.to(A1.device), (M, P))\n\n\ndef _sparse_diag_mm(A, D):\n \"\"\"Internal function for multiplying a sparse matrix by a diagonal matrix.\n\n Parameters\n ----------\n A : SparseMatrix\n Matrix of shape (N, M), with values of shape (nnz1)\n D : DiagMatrix\n Matrix of shape (M, P), with values of shape (nnz2)\n\n Returns\n -------\n SparseMatrix\n SparseMatrix with shape (N, P)\n \"\"\"\n assert (\n A.shape[1] == D.shape[0]\n ), f\"The second dimension of SparseMatrix should be equal to the first \\\n dimension of DiagMatrix in matmul(SparseMatrix, DiagMatrix), but the \\\n shapes of SparseMatrix and DiagMatrix are {A.shape} and {D.shape} \\\n respectively.\"\n assert (\n D.shape[0] == D.shape[1]\n ), f\"The DiagMatrix should be a square in matmul(SparseMatrix, DiagMatrix) \\\n but got {D.shape}\"\n return val_like(A, D.val[A.col] * A.val)\n\n\ndef _diag_sparse_mm(D, A):\n \"\"\"Internal function for multiplying a diag matrix by a sparse matrix.\n\n Parameters\n ----------\n D : DiagMatrix\n Matrix of shape (N, M), with values of shape (nnz1)\n A : DiagMatrix\n Matrix of shape (M, P), with values of shape (nnz2)\n\n Returns\n -------\n SparseMatrix\n SparseMatrix with shape (N, P)\n \"\"\"\n assert (\n D.shape[1] == A.shape[0]\n ), f\"The second dimension of DiagMatrix should be equal to the first \\\n dimension of SparseMatrix in matmul(DiagMatrix, SparseMatrix), but the \\\n shapes of DiagMatrix and SparseMatrix are {D.shape} and {A.shape} \\\n respectively.\"\n assert (\n D.shape[0] == D.shape[1]\n ), f\"The DiagMatrix should be a square in matmul(DiagMatrix, SparseMatrix) \\\n but got {D.shape}\"\n return val_like(A, D.val[A.row] * A.val)\n\n\ndef spspmm(\n A1: Union[SparseMatrix, DiagMatrix], A2: Union[SparseMatrix, DiagMatrix]\n) -> Union[SparseMatrix, DiagMatrix]:\n \"\"\"Multiply a sparse matrix by a sparse matrix. The non-zero values of the\n two sparse matrices must be 1D.\n\n Parameters\n ----------\n A1 : SparseMatrix or DiagMatrix\n Sparse matrix of shape (N, M) with values of shape (nnz)\n A2 : SparseMatrix or DiagMatrix\n Sparse matrix of shape (M, P) with values of shape (nnz)\n\n Returns\n -------\n SparseMatrix or DiagMatrix\n The result of multiplication. It is a DiagMatrix object if both matrices\n are DiagMatrix objects. It is a SparseMatrix object otherwise.\n\n Examples\n --------\n\n >>> row1 = torch.tensor([0, 1, 1])\n >>> col1 = torch.tensor([1, 0, 1])\n >>> val1 = torch.ones(len(row1))\n >>> A1 = from_coo(row1, col1, val1)\n\n >>> row2 = torch.tensor([0, 1, 1])\n >>> col2 = torch.tensor([0, 2, 1])\n >>> val2 = torch.ones(len(row2))\n >>> A2 = from_coo(row2, col2, val2)\n >>> result = dgl.sparse.spspmm(A1, A2)\n >>> print(result)\n SparseMatrix(indices=tensor([[0, 0, 1, 1, 1],\n [1, 2, 0, 1, 2]]),\n values=tensor([1., 1., 1., 1., 1.]),\n shape=(2, 3), nnz=5)\n \"\"\"\n assert isinstance(\n A1, (SparseMatrix, DiagMatrix)\n ), f\"Expect A1 to be a SparseMatrix or DiagMatrix object, got {type(A1)}\"\n assert isinstance(\n A2, (SparseMatrix, DiagMatrix)\n ), f\"Expect A2 to be a SparseMatrix or DiagMatrix object, got {type(A2)}\"\n\n if isinstance(A1, DiagMatrix) and isinstance(A2, DiagMatrix):\n return _diag_diag_mm(A1, A2)\n if isinstance(A1, DiagMatrix):\n return _diag_sparse_mm(A1, A2)\n if isinstance(A2, DiagMatrix):\n return _sparse_diag_mm(A1, A2)\n return SparseMatrix(\n torch.ops.dgl_sparse.spspmm(A1.c_sparse_matrix, A2.c_sparse_matrix)\n )\n\n\ndef mm(\n A1: Union[SparseMatrix, DiagMatrix],\n A2: Union[torch.Tensor, SparseMatrix, DiagMatrix],\n) -> Union[torch.Tensor, SparseMatrix, DiagMatrix]:\n \"\"\"Multiply a sparse/diagonal matrix by a dense/sparse/diagonal matrix.\n If an input is a SparseMatrix or DiagMatrix, its non-zero values should\n be 1-D.\n\n Parameters\n ----------\n A1 : SparseMatrix or DiagMatrix\n Matrix of shape (N, M), with values of shape (nnz1)\n A2 : torch.Tensor, SparseMatrix, or DiagMatrix\n Matrix of shape (M, P). If it is a SparseMatrix or DiagMatrix,\n it should have values of shape (nnz2).\n\n Returns\n -------\n torch.Tensor or DiagMatrix or SparseMatrix\n The result of multiplication of shape (N, P)\n\n * It is a dense torch tensor if :attr:`A2` is so.\n * It is a DiagMatrix object if both :attr:`A1` and :attr:`A2` are so.\n * It is a SparseMatrix object otherwise.\n\n Examples\n --------\n\n >>> val = torch.randn(3)\n >>> A1 = diag(val)\n >>> A2 = torch.randn(3, 2)\n >>> result = dgl.sparse.mm(A1, A2)\n >>> print(type(result))\n <class 'torch.Tensor'>\n >>> print(result.shape)\n torch.Size([3, 2])\n \"\"\"\n assert isinstance(\n A1, (SparseMatrix, DiagMatrix)\n ), f\"Expect arg1 to be a SparseMatrix, or DiagMatrix object, got {type(A1)}.\"\n assert isinstance(A2, (torch.Tensor, SparseMatrix, DiagMatrix)), (\n f\"Expect arg2 to be a torch Tensor, SparseMatrix, or DiagMatrix\"\n f\"object, got {type(A2)}.\"\n )\n if isinstance(A2, torch.Tensor):\n return spmm(A1, A2)\n if isinstance(A1, DiagMatrix) and isinstance(A2, DiagMatrix):\n return _diag_diag_mm(A1, A2)\n return spspmm(A1, A2)\n\n\nSparseMatrix.__matmul__ = mm\nDiagMatrix.__matmul__ = mm\n", "path": "python/dgl/sparse/matmul.py"}]}
| 3,015 | 842 |
gh_patches_debug_18031
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2725
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Internal server error when importing CSVs with long names
Follow the same steps as reported in #2634 and observer the error from the screenshot below:
API: `http://localhost/api/db/v0/tables/12/records/?limit=500&offset=0`
<img width="1512" alt="Screenshot 2023-03-20 at 5 29 52 AM" src="https://user-images.githubusercontent.com/11032856/226218521-75355de8-eee0-4b5e-9a9c-47aa3ff67da2.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/identifiers.py`
Content:
```
1 import hashlib
2
3
4 def truncate_if_necessary(identifier):
5 """
6 Takes an identifier and returns it, truncating it, if it is too long. The truncated version
7 will end with a hash of the passed identifier, therefore column name collision should be very
8 rare.
9
10 Iteratively removes characters from the end of the identifier, until the resulting string, with
11 the suffix hash of the identifier appended, is short enough that it doesn't need to be truncated
12 anymore. Whitespace is trimmed from the truncated identifier before appending the suffix.
13 """
14 assert type(identifier) is str
15 if not is_identifier_too_long(identifier):
16 return identifier
17 right_side = "-" + _get_truncation_hash(identifier)
18 identifier_length = len(identifier)
19 assert len(right_side) < identifier_length # Sanity check
20 range_of_num_of_chars_to_remove = range(1, identifier_length)
21 for num_of_chars_to_remove in range_of_num_of_chars_to_remove:
22 left_side = identifier[:num_of_chars_to_remove * -1]
23 left_side = left_side.rstrip()
24 truncated_identifier = left_side + right_side
25 if not is_identifier_too_long(truncated_identifier):
26 return truncated_identifier
27 raise Exception(
28 "Acceptable truncation not found; should never happen."
29 )
30
31
32 def is_identifier_too_long(identifier):
33 postgres_identifier_size_limit = 63
34 size = _get_size_of_identifier_in_bytes(identifier)
35 return size > postgres_identifier_size_limit
36
37
38 def _get_truncation_hash(identifier):
39 """
40 Produces an 8-character string hash of the passed identifier.
41
42 Using hash function blake2s, because it seems fairly recommended and it seems to be better
43 suited for shorter digests than blake2b. We want short digests to not take up too much of the
44 truncated identifier in whose construction this will be used.
45 """
46 h = hashlib.blake2s(digest_size=4)
47 bytes = _get_identifier_in_bytes(identifier)
48 h.update(bytes)
49 return h.hexdigest()
50
51
52 def _get_size_of_identifier_in_bytes(s):
53 bytes = _get_identifier_in_bytes(s)
54 return len(bytes)
55
56
57 def _get_identifier_in_bytes(s):
58 """
59 Afaict, following Postgres doc [0] says that UTF-8 supports all languages; therefore, different
60 server locale configurations should not break this.
61
62 [0] https://www.postgresql.org/docs/13/multibyte.html
63 """
64 return s.encode('utf-8')
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/identifiers.py b/db/identifiers.py
--- a/db/identifiers.py
+++ b/db/identifiers.py
@@ -1,6 +1,9 @@
import hashlib
+POSTGRES_IDENTIFIER_SIZE_LIMIT = 63
+
+
def truncate_if_necessary(identifier):
"""
Takes an identifier and returns it, truncating it, if it is too long. The truncated version
@@ -30,9 +33,13 @@
def is_identifier_too_long(identifier):
- postgres_identifier_size_limit = 63
+ # TODO we should support POSTGRES_IDENTIFIER_SIZE_LIMIT here;
+ # Our current limit due to an unknown bug that manifests at least
+ # when importing CSVs seems to be 57 bytes. Here we're setting it even
+ # lower just in case.
+ our_temporary_identifier_size_limit = 48
size = _get_size_of_identifier_in_bytes(identifier)
- return size > postgres_identifier_size_limit
+ return size > our_temporary_identifier_size_limit
def _get_truncation_hash(identifier):
|
{"golden_diff": "diff --git a/db/identifiers.py b/db/identifiers.py\n--- a/db/identifiers.py\n+++ b/db/identifiers.py\n@@ -1,6 +1,9 @@\n import hashlib\n \n \n+POSTGRES_IDENTIFIER_SIZE_LIMIT = 63\n+\n+\n def truncate_if_necessary(identifier):\n \"\"\"\n Takes an identifier and returns it, truncating it, if it is too long. The truncated version\n@@ -30,9 +33,13 @@\n \n \n def is_identifier_too_long(identifier):\n- postgres_identifier_size_limit = 63\n+ # TODO we should support POSTGRES_IDENTIFIER_SIZE_LIMIT here;\n+ # Our current limit due to an unknown bug that manifests at least\n+ # when importing CSVs seems to be 57 bytes. Here we're setting it even\n+ # lower just in case.\n+ our_temporary_identifier_size_limit = 48\n size = _get_size_of_identifier_in_bytes(identifier)\n- return size > postgres_identifier_size_limit\n+ return size > our_temporary_identifier_size_limit\n \n \n def _get_truncation_hash(identifier):\n", "issue": "Internal server error when importing CSVs with long names\nFollow the same steps as reported in #2634 and observer the error from the screenshot below: \r\n\r\nAPI: `http://localhost/api/db/v0/tables/12/records/?limit=500&offset=0`\r\n\r\n<img width=\"1512\" alt=\"Screenshot 2023-03-20 at 5 29 52 AM\" src=\"https://user-images.githubusercontent.com/11032856/226218521-75355de8-eee0-4b5e-9a9c-47aa3ff67da2.png\">\r\n\n", "before_files": [{"content": "import hashlib\n\n\ndef truncate_if_necessary(identifier):\n \"\"\"\n Takes an identifier and returns it, truncating it, if it is too long. The truncated version\n will end with a hash of the passed identifier, therefore column name collision should be very\n rare.\n\n Iteratively removes characters from the end of the identifier, until the resulting string, with\n the suffix hash of the identifier appended, is short enough that it doesn't need to be truncated\n anymore. Whitespace is trimmed from the truncated identifier before appending the suffix.\n \"\"\"\n assert type(identifier) is str\n if not is_identifier_too_long(identifier):\n return identifier\n right_side = \"-\" + _get_truncation_hash(identifier)\n identifier_length = len(identifier)\n assert len(right_side) < identifier_length # Sanity check\n range_of_num_of_chars_to_remove = range(1, identifier_length)\n for num_of_chars_to_remove in range_of_num_of_chars_to_remove:\n left_side = identifier[:num_of_chars_to_remove * -1]\n left_side = left_side.rstrip()\n truncated_identifier = left_side + right_side\n if not is_identifier_too_long(truncated_identifier):\n return truncated_identifier\n raise Exception(\n \"Acceptable truncation not found; should never happen.\"\n )\n\n\ndef is_identifier_too_long(identifier):\n postgres_identifier_size_limit = 63\n size = _get_size_of_identifier_in_bytes(identifier)\n return size > postgres_identifier_size_limit\n\n\ndef _get_truncation_hash(identifier):\n \"\"\"\n Produces an 8-character string hash of the passed identifier.\n\n Using hash function blake2s, because it seems fairly recommended and it seems to be better\n suited for shorter digests than blake2b. We want short digests to not take up too much of the\n truncated identifier in whose construction this will be used.\n \"\"\"\n h = hashlib.blake2s(digest_size=4)\n bytes = _get_identifier_in_bytes(identifier)\n h.update(bytes)\n return h.hexdigest()\n\n\ndef _get_size_of_identifier_in_bytes(s):\n bytes = _get_identifier_in_bytes(s)\n return len(bytes)\n\n\ndef _get_identifier_in_bytes(s):\n \"\"\"\n Afaict, following Postgres doc [0] says that UTF-8 supports all languages; therefore, different\n server locale configurations should not break this.\n\n [0] https://www.postgresql.org/docs/13/multibyte.html\n \"\"\"\n return s.encode('utf-8')\n", "path": "db/identifiers.py"}], "after_files": [{"content": "import hashlib\n\n\nPOSTGRES_IDENTIFIER_SIZE_LIMIT = 63\n\n\ndef truncate_if_necessary(identifier):\n \"\"\"\n Takes an identifier and returns it, truncating it, if it is too long. The truncated version\n will end with a hash of the passed identifier, therefore column name collision should be very\n rare.\n\n Iteratively removes characters from the end of the identifier, until the resulting string, with\n the suffix hash of the identifier appended, is short enough that it doesn't need to be truncated\n anymore. Whitespace is trimmed from the truncated identifier before appending the suffix.\n \"\"\"\n assert type(identifier) is str\n if not is_identifier_too_long(identifier):\n return identifier\n right_side = \"-\" + _get_truncation_hash(identifier)\n identifier_length = len(identifier)\n assert len(right_side) < identifier_length # Sanity check\n range_of_num_of_chars_to_remove = range(1, identifier_length)\n for num_of_chars_to_remove in range_of_num_of_chars_to_remove:\n left_side = identifier[:num_of_chars_to_remove * -1]\n left_side = left_side.rstrip()\n truncated_identifier = left_side + right_side\n if not is_identifier_too_long(truncated_identifier):\n return truncated_identifier\n raise Exception(\n \"Acceptable truncation not found; should never happen.\"\n )\n\n\ndef is_identifier_too_long(identifier):\n # TODO we should support POSTGRES_IDENTIFIER_SIZE_LIMIT here;\n # Our current limit due to an unknown bug that manifests at least\n # when importing CSVs seems to be 57 bytes. Here we're setting it even\n # lower just in case.\n our_temporary_identifier_size_limit = 48\n size = _get_size_of_identifier_in_bytes(identifier)\n return size > our_temporary_identifier_size_limit\n\n\ndef _get_truncation_hash(identifier):\n \"\"\"\n Produces an 8-character string hash of the passed identifier.\n\n Using hash function blake2s, because it seems fairly recommended and it seems to be better\n suited for shorter digests than blake2b. We want short digests to not take up too much of the\n truncated identifier in whose construction this will be used.\n \"\"\"\n h = hashlib.blake2s(digest_size=4)\n bytes = _get_identifier_in_bytes(identifier)\n h.update(bytes)\n return h.hexdigest()\n\n\ndef _get_size_of_identifier_in_bytes(s):\n bytes = _get_identifier_in_bytes(s)\n return len(bytes)\n\n\ndef _get_identifier_in_bytes(s):\n \"\"\"\n Afaict, following Postgres doc [0] says that UTF-8 supports all languages; therefore, different\n server locale configurations should not break this.\n\n [0] https://www.postgresql.org/docs/13/multibyte.html\n \"\"\"\n return s.encode('utf-8')\n", "path": "db/identifiers.py"}]}
| 1,081 | 240 |
gh_patches_debug_63591
|
rasdani/github-patches
|
git_diff
|
openai__gym-1092
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError when installing on Windows 10 and [33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>
Dears,
Would you please let me know how I could solve this warning and this error? (Windows 10)
Using TensorFlow backend.
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
File "C:\Users\fi\Desktop\rl\code\3.6\stock_market_reinforcement_learning-master\environment.py", line 43, in __init__
self.reset()
File "C:\Users\fi\Anaconda30\envs\tensorflow\lib\site-packages\gym\core.py", line 70, in reset
raise NotImplementedError
NotImplementedErrorr
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/envs/mujoco/mujoco_env.py`
Content:
```
1 import os
2
3 from gym import error, spaces
4 from gym.utils import seeding
5 import numpy as np
6 from os import path
7 import gym
8 import six
9
10 try:
11 import mujoco_py
12 except ImportError as e:
13 raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e))
14
15 DEFAULT_SIZE = 500
16
17 class MujocoEnv(gym.Env):
18 """Superclass for all MuJoCo environments.
19 """
20
21 def __init__(self, model_path, frame_skip):
22 if model_path.startswith("/"):
23 fullpath = model_path
24 else:
25 fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
26 if not path.exists(fullpath):
27 raise IOError("File %s does not exist" % fullpath)
28 self.frame_skip = frame_skip
29 self.model = mujoco_py.load_model_from_path(fullpath)
30 self.sim = mujoco_py.MjSim(self.model)
31 self.data = self.sim.data
32 self.viewer = None
33 self._viewers = {}
34
35 self.metadata = {
36 'render.modes': ['human', 'rgb_array'],
37 'video.frames_per_second': int(np.round(1.0 / self.dt))
38 }
39
40 self.init_qpos = self.sim.data.qpos.ravel().copy()
41 self.init_qvel = self.sim.data.qvel.ravel().copy()
42 observation, _reward, done, _info = self.step(np.zeros(self.model.nu))
43 assert not done
44 self.obs_dim = observation.size
45
46 bounds = self.model.actuator_ctrlrange.copy()
47 low = bounds[:, 0]
48 high = bounds[:, 1]
49 self.action_space = spaces.Box(low=low, high=high)
50
51 high = np.inf*np.ones(self.obs_dim)
52 low = -high
53 self.observation_space = spaces.Box(low, high)
54
55 self.seed()
56
57 def seed(self, seed=None):
58 self.np_random, seed = seeding.np_random(seed)
59 return [seed]
60
61 # methods to override:
62 # ----------------------------
63
64 def reset_model(self):
65 """
66 Reset the robot degrees of freedom (qpos and qvel).
67 Implement this in each subclass.
68 """
69 raise NotImplementedError
70
71 def viewer_setup(self):
72 """
73 This method is called when the viewer is initialized and after every reset
74 Optionally implement this method, if you need to tinker with camera position
75 and so forth.
76 """
77 pass
78
79 # -----------------------------
80
81 def reset(self):
82 self.sim.reset()
83 ob = self.reset_model()
84 old_viewer = self.viewer
85 for v in self._viewers.values():
86 self.viewer = v
87 self.viewer_setup()
88 self.viewer = old_viewer
89 return ob
90
91 def set_state(self, qpos, qvel):
92 assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
93 old_state = self.sim.get_state()
94 new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
95 old_state.act, old_state.udd_state)
96 self.sim.set_state(new_state)
97 self.sim.forward()
98
99 @property
100 def dt(self):
101 return self.model.opt.timestep * self.frame_skip
102
103 def do_simulation(self, ctrl, n_frames):
104 self.sim.data.ctrl[:] = ctrl
105 for _ in range(n_frames):
106 self.sim.step()
107
108 def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):
109 if mode == 'rgb_array':
110 self._get_viewer(mode).render(width, height)
111 # window size used for old mujoco-py:
112 data = self._get_viewer(mode).read_pixels(width, height, depth=False)
113 # original image is upside-down, so flip it
114 return data[::-1, :, :]
115 elif mode == 'human':
116 self._get_viewer(mode).render()
117
118 def close(self):
119 if self.viewer is not None:
120 # self.viewer.finish()
121 self.viewer = None
122 self._viewers = {}
123
124 def _get_viewer(self, mode):
125 self.viewer = self._viewers.get(mode)
126 if self.viewer is None:
127 if mode == 'human':
128 self.viewer = mujoco_py.MjViewer(self.sim)
129 elif mode == 'rgb_array':
130 self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)
131 self.viewer_setup()
132 self._viewers[mode] = self.viewer
133 return self.viewer
134
135 def get_body_com(self, body_name):
136 return self.data.get_body_xpos(body_name)
137
138 def state_vector(self):
139 return np.concatenate([
140 self.sim.data.qpos.flat,
141 self.sim.data.qvel.flat
142 ])
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gym/envs/mujoco/mujoco_env.py b/gym/envs/mujoco/mujoco_env.py
--- a/gym/envs/mujoco/mujoco_env.py
+++ b/gym/envs/mujoco/mujoco_env.py
@@ -46,7 +46,7 @@
bounds = self.model.actuator_ctrlrange.copy()
low = bounds[:, 0]
high = bounds[:, 1]
- self.action_space = spaces.Box(low=low, high=high)
+ self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)
high = np.inf*np.ones(self.obs_dim)
low = -high
|
{"golden_diff": "diff --git a/gym/envs/mujoco/mujoco_env.py b/gym/envs/mujoco/mujoco_env.py\n--- a/gym/envs/mujoco/mujoco_env.py\n+++ b/gym/envs/mujoco/mujoco_env.py\n@@ -46,7 +46,7 @@\n bounds = self.model.actuator_ctrlrange.copy()\n low = bounds[:, 0]\n high = bounds[:, 1]\n- self.action_space = spaces.Box(low=low, high=high)\n+ self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n \n high = np.inf*np.ones(self.obs_dim)\n low = -high\n", "issue": "ImportError when installing on Windows 10 and [33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>\nDears,\r\nWould you please let me know how I could solve this warning and this error? (Windows 10)\r\n\r\nUsing TensorFlow backend.\r\n\u001b[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.\u001b[0m\r\n\r\n File \"C:\\Users\\fi\\Desktop\\rl\\code\\3.6\\stock_market_reinforcement_learning-master\\environment.py\", line 43, in __init__\r\n self.reset()\r\n File \"C:\\Users\\fi\\Anaconda30\\envs\\tensorflow\\lib\\site-packages\\gym\\core.py\", line 70, in reset\r\n raise NotImplementedError\r\nNotImplementedErrorr\r\n\n", "before_files": [{"content": "import os\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\nimport six\n\ntry:\n import mujoco_py\nexcept ImportError as e:\n raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nDEFAULT_SIZE = 500\n\nclass MujocoEnv(gym.Env):\n \"\"\"Superclass for all MuJoCo environments.\n \"\"\"\n\n def __init__(self, model_path, frame_skip):\n if model_path.startswith(\"/\"):\n fullpath = model_path\n else:\n fullpath = os.path.join(os.path.dirname(__file__), \"assets\", model_path)\n if not path.exists(fullpath):\n raise IOError(\"File %s does not exist\" % fullpath)\n self.frame_skip = frame_skip\n self.model = mujoco_py.load_model_from_path(fullpath)\n self.sim = mujoco_py.MjSim(self.model)\n self.data = self.sim.data\n self.viewer = None\n self._viewers = {}\n\n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': int(np.round(1.0 / self.dt))\n }\n\n self.init_qpos = self.sim.data.qpos.ravel().copy()\n self.init_qvel = self.sim.data.qvel.ravel().copy()\n observation, _reward, done, _info = self.step(np.zeros(self.model.nu))\n assert not done\n self.obs_dim = observation.size\n\n bounds = self.model.actuator_ctrlrange.copy()\n low = bounds[:, 0]\n high = bounds[:, 1]\n self.action_space = spaces.Box(low=low, high=high)\n\n high = np.inf*np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n # methods to override:\n # ----------------------------\n\n def reset_model(self):\n \"\"\"\n Reset the robot degrees of freedom (qpos and qvel).\n Implement this in each subclass.\n \"\"\"\n raise NotImplementedError\n\n def viewer_setup(self):\n \"\"\"\n This method is called when the viewer is initialized and after every reset\n Optionally implement this method, if you need to tinker with camera position\n and so forth.\n \"\"\"\n pass\n\n # -----------------------------\n\n def reset(self):\n self.sim.reset()\n ob = self.reset_model()\n old_viewer = self.viewer\n for v in self._viewers.values():\n self.viewer = v\n self.viewer_setup()\n self.viewer = old_viewer\n return ob\n\n def set_state(self, qpos, qvel):\n assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n old_state.act, old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n @property\n def dt(self):\n return self.model.opt.timestep * self.frame_skip\n\n def do_simulation(self, ctrl, n_frames):\n self.sim.data.ctrl[:] = ctrl\n for _ in range(n_frames):\n self.sim.step()\n\n def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):\n if mode == 'rgb_array':\n self._get_viewer(mode).render(width, height)\n # window size used for old mujoco-py:\n data = self._get_viewer(mode).read_pixels(width, height, depth=False)\n # original image is upside-down, so flip it\n return data[::-1, :, :]\n elif mode == 'human':\n self._get_viewer(mode).render()\n\n def close(self):\n if self.viewer is not None:\n # self.viewer.finish()\n self.viewer = None\n self._viewers = {}\n\n def _get_viewer(self, mode):\n self.viewer = self._viewers.get(mode)\n if self.viewer is None:\n if mode == 'human':\n self.viewer = mujoco_py.MjViewer(self.sim)\n elif mode == 'rgb_array':\n self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)\n self.viewer_setup()\n self._viewers[mode] = self.viewer\n return self.viewer\n\n def get_body_com(self, body_name):\n return self.data.get_body_xpos(body_name)\n\n def state_vector(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat\n ])\n", "path": "gym/envs/mujoco/mujoco_env.py"}], "after_files": [{"content": "import os\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\nimport six\n\ntry:\n import mujoco_py\nexcept ImportError as e:\n raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nDEFAULT_SIZE = 500\n\nclass MujocoEnv(gym.Env):\n \"\"\"Superclass for all MuJoCo environments.\n \"\"\"\n\n def __init__(self, model_path, frame_skip):\n if model_path.startswith(\"/\"):\n fullpath = model_path\n else:\n fullpath = os.path.join(os.path.dirname(__file__), \"assets\", model_path)\n if not path.exists(fullpath):\n raise IOError(\"File %s does not exist\" % fullpath)\n self.frame_skip = frame_skip\n self.model = mujoco_py.load_model_from_path(fullpath)\n self.sim = mujoco_py.MjSim(self.model)\n self.data = self.sim.data\n self.viewer = None\n self._viewers = {}\n\n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': int(np.round(1.0 / self.dt))\n }\n\n self.init_qpos = self.sim.data.qpos.ravel().copy()\n self.init_qvel = self.sim.data.qvel.ravel().copy()\n observation, _reward, done, _info = self.step(np.zeros(self.model.nu))\n assert not done\n self.obs_dim = observation.size\n\n bounds = self.model.actuator_ctrlrange.copy()\n low = bounds[:, 0]\n high = bounds[:, 1]\n self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n\n high = np.inf*np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n # methods to override:\n # ----------------------------\n\n def reset_model(self):\n \"\"\"\n Reset the robot degrees of freedom (qpos and qvel).\n Implement this in each subclass.\n \"\"\"\n raise NotImplementedError\n\n def viewer_setup(self):\n \"\"\"\n This method is called when the viewer is initialized and after every reset\n Optionally implement this method, if you need to tinker with camera position\n and so forth.\n \"\"\"\n pass\n\n # -----------------------------\n\n def reset(self):\n self.sim.reset()\n ob = self.reset_model()\n old_viewer = self.viewer\n for v in self._viewers.values():\n self.viewer = v\n self.viewer_setup()\n self.viewer = old_viewer\n return ob\n\n def set_state(self, qpos, qvel):\n assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n old_state.act, old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n @property\n def dt(self):\n return self.model.opt.timestep * self.frame_skip\n\n def do_simulation(self, ctrl, n_frames):\n self.sim.data.ctrl[:] = ctrl\n for _ in range(n_frames):\n self.sim.step()\n\n def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):\n if mode == 'rgb_array':\n self._get_viewer(mode).render(width, height)\n # window size used for old mujoco-py:\n data = self._get_viewer(mode).read_pixels(width, height, depth=False)\n # original image is upside-down, so flip it\n return data[::-1, :, :]\n elif mode == 'human':\n self._get_viewer(mode).render()\n\n def close(self):\n if self.viewer is not None:\n # self.viewer.finish()\n self.viewer = None\n self._viewers = {}\n\n def _get_viewer(self, mode):\n self.viewer = self._viewers.get(mode)\n if self.viewer is None:\n if mode == 'human':\n self.viewer = mujoco_py.MjViewer(self.sim)\n elif mode == 'rgb_array':\n self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)\n self.viewer_setup()\n self._viewers[mode] = self.viewer\n return self.viewer\n\n def get_body_com(self, body_name):\n return self.data.get_body_xpos(body_name)\n\n def state_vector(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat\n ])\n", "path": "gym/envs/mujoco/mujoco_env.py"}]}
| 1,828 | 155 |
gh_patches_debug_7763
|
rasdani/github-patches
|
git_diff
|
plotly__dash-808
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Defer `pytest` import?
Looks like `pytest` isn't the safest dependency, causing issues with scikit-learn: https://community.plot.ly/t/pytest-transient-dependency/25383
Could we move the `import pytest` into the testing module/class/function itself and not require it upon install? We could even have a separate install with setup.py's "extras" feature (https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) like `pip install dash[testing]` or something.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import io
2 from setuptools import setup, find_packages
3
4 main_ns = {}
5 exec(open("dash/version.py").read(), main_ns) # pylint: disable=exec-used
6
7
8 def read_req_file(req_type):
9 with open("requires-{}.txt".format(req_type)) as fp:
10 requires = (line.strip() for line in fp)
11 return [req for req in requires if req and not req.startswith("#")]
12
13
14 setup(
15 name="dash",
16 version=main_ns["__version__"],
17 author="chris p",
18 author_email="[email protected]",
19 packages=find_packages(exclude=["tests*"]),
20 include_package_data=True,
21 license="MIT",
22 description=(
23 "A Python framework for building reactive web-apps. "
24 "Developed by Plotly."
25 ),
26 long_description=io.open("README.md", encoding="utf-8").read(),
27 long_description_content_type="text/markdown",
28 install_requires=read_req_file("install"),
29 extras_require={"ci": read_req_file("ci")},
30 entry_points={
31 "console_scripts": [
32 "dash-generate-components ="
33 " dash.development.component_generator:cli"
34 ],
35 "pytest11": ["dash = dash.testing.plugin"],
36 },
37 url="https://plot.ly/dash",
38 classifiers=[
39 "Development Status :: 5 - Production/Stable",
40 "Environment :: Web Environment",
41 "Framework :: Flask",
42 "Intended Audience :: Developers",
43 "Intended Audience :: Education",
44 "Intended Audience :: Financial and Insurance Industry",
45 "Intended Audience :: Healthcare Industry",
46 "Intended Audience :: Manufacturing",
47 "Intended Audience :: Science/Research",
48 "License :: OSI Approved :: MIT License",
49 "Programming Language :: Python",
50 "Programming Language :: Python :: 2",
51 "Programming Language :: Python :: 2.7",
52 "Programming Language :: Python :: 3",
53 "Programming Language :: Python :: 3.3",
54 "Programming Language :: Python :: 3.4",
55 "Programming Language :: Python :: 3.5",
56 "Programming Language :: Python :: 3.6",
57 "Programming Language :: Python :: 3.7",
58 "Topic :: Database :: Front-Ends",
59 "Topic :: Office/Business :: Financial :: Spreadsheet",
60 "Topic :: Scientific/Engineering :: Visualization",
61 "Topic :: Software Development :: Libraries :: Application Frameworks",
62 "Topic :: Software Development :: Widget Sets",
63 ],
64 )
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,10 @@
long_description=io.open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
install_requires=read_req_file("install"),
- extras_require={"ci": read_req_file("ci")},
+ extras_require={
+ "ci": read_req_file("ci"),
+ "testing": read_req_file("testing"),
+ },
entry_points={
"console_scripts": [
"dash-generate-components ="
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,10 @@\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n- extras_require={\"ci\": read_req_file(\"ci\")},\n+ extras_require={\n+ \"ci\": read_req_file(\"ci\"),\n+ \"testing\": read_req_file(\"testing\"),\n+ },\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components =\"\n", "issue": "Defer `pytest` import?\nLooks like `pytest` isn't the safest dependency, causing issues with scikit-learn: https://community.plot.ly/t/pytest-transient-dependency/25383\r\n\r\nCould we move the `import pytest` into the testing module/class/function itself and not require it upon install? We could even have a separate install with setup.py's \"extras\" feature (https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) like `pip install dash[testing]` or something.\n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open(\"dash/version.py\").read(), main_ns) # pylint: disable=exec-used\n\n\ndef read_req_file(req_type):\n with open(\"requires-{}.txt\".format(req_type)) as fp:\n requires = (line.strip() for line in fp)\n return [req for req in requires if req and not req.startswith(\"#\")]\n\n\nsetup(\n name=\"dash\",\n version=main_ns[\"__version__\"],\n author=\"chris p\",\n author_email=\"[email protected]\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n license=\"MIT\",\n description=(\n \"A Python framework for building reactive web-apps. \"\n \"Developed by Plotly.\"\n ),\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n extras_require={\"ci\": read_req_file(\"ci\")},\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components =\"\n \" dash.development.component_generator:cli\"\n ],\n \"pytest11\": [\"dash = dash.testing.plugin\"],\n },\n url=\"https://plot.ly/dash\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open(\"dash/version.py\").read(), main_ns) # pylint: disable=exec-used\n\n\ndef read_req_file(req_type):\n with open(\"requires-{}.txt\".format(req_type)) as fp:\n requires = (line.strip() for line in fp)\n return [req for req in requires if req and not req.startswith(\"#\")]\n\n\nsetup(\n name=\"dash\",\n version=main_ns[\"__version__\"],\n author=\"chris p\",\n author_email=\"[email protected]\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n license=\"MIT\",\n description=(\n \"A Python framework for building reactive web-apps. \"\n \"Developed by Plotly.\"\n ),\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n extras_require={\n \"ci\": read_req_file(\"ci\"),\n \"testing\": read_req_file(\"testing\"),\n },\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components =\"\n \" dash.development.component_generator:cli\"\n ],\n \"pytest11\": [\"dash = dash.testing.plugin\"],\n },\n url=\"https://plot.ly/dash\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n)\n", "path": "setup.py"}]}
| 1,028 | 131 |
gh_patches_debug_39246
|
rasdani/github-patches
|
git_diff
|
dask__distributed-1462
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add rejoin operation to rejoin thread pool
Currently the `secede` function allows a task to remove itself from the worker's current thread pool, opening up a space for more tasks.
We might consider an inverse operation, `rejoin` that blocks until a new spot in the thread pool has opened up. This would enable long-running task computations to avoid contention of many threads computing at once.
First suggested by @adamklein in https://github.com/dask/distributed/issues/1342
also cc @ogrisel
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/__init__.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 from .config import config
4 from .core import connect, rpc
5 from .deploy import LocalCluster
6 from .diagnostics import progress
7 from .client import (Client, Executor, CompatibleExecutor,
8 wait, as_completed, default_client, fire_and_forget,
9 Future)
10 from .nanny import Nanny
11 from .queues import Queue
12 from .scheduler import Scheduler
13 from .utils import sync
14 from .variable import Variable
15 from .worker import Worker, get_worker, get_client, secede
16 from .worker_client import local_client, worker_client
17
18 from ._version import get_versions
19 versions = get_versions()
20 __version__ = versions['version']
21 __git_revision__ = versions['full-revisionid']
22 del get_versions, versions
23
```
Path: `distributed/threadpoolexecutor.py`
Content:
```
1 """
2 Modified ThreadPoolExecutor to support threads leaving the thread pool
3
4 This includes a global `secede` method that a submitted function can call to
5 have its thread leave the ThreadPoolExecutor's thread pool. This allows the
6 thread pool to allocate another thread if necessary and so is useful when a
7 function realises that it is going to be a long-running job that doesn't want
8 to take up space. When the function finishes its thread will terminate
9 gracefully.
10
11 This code copies and modifies two functions from the
12 `concurrent.futures.thread` module, notably `_worker` and
13 ThreadPoolExecutor._adjust_thread_count` to allow for checking against a global
14 `threading.local` state. These functions are subject to the following license,
15 which is included as a comment at the end of this file:
16
17 https://docs.python.org/3/license.html
18
19 ... and are under copyright by the Python Software Foundation
20
21 Copyright 2001-2016 Python Software Foundation; All Rights Reserved
22 """
23 from __future__ import print_function, division, absolute_import
24
25 from . import _concurrent_futures_thread as thread
26 import logging
27 import threading
28
29 from .compatibility import get_thread_identity
30 from .metrics import time
31
32 logger = logging.getLogger(__name__)
33
34 thread_state = threading.local()
35
36
37 def _worker(executor, work_queue):
38 thread_state.proceed = True
39 thread_state.executor = executor
40
41 try:
42 while thread_state.proceed:
43 task = work_queue.get()
44 if task is not None: # sentinel
45 task.run()
46 del task
47 elif thread._shutdown or executor is None or executor._shutdown:
48 work_queue.put(None)
49 return
50 del executor
51 except BaseException:
52 logger.critical('Exception in worker', exc_info=True)
53 finally:
54 del thread_state.proceed
55 del thread_state.executor
56
57
58 class ThreadPoolExecutor(thread.ThreadPoolExecutor):
59 def _adjust_thread_count(self):
60 if len(self._threads) < self._max_workers:
61 t = threading.Thread(target=_worker,
62 name="ThreadPool worker %d" % len(self._threads,),
63 args=(self, self._work_queue))
64 t.daemon = True
65 self._threads.add(t)
66 t.start()
67
68 def shutdown(self, wait=True, timeout=None):
69 with threads_lock:
70 with self._shutdown_lock:
71 self._shutdown = True
72 self._work_queue.put(None)
73 if timeout is not None:
74 deadline = time() + timeout
75 for t in self._threads:
76 if timeout is not None:
77 timeout2 = max(deadline - time(), 0)
78 else:
79 timeout2 = None
80 t.join(timeout=timeout2)
81
82
83 def secede():
84 """ Have this thread secede from the ThreadPoolExecutor """
85 thread_state.proceed = False
86 ident = get_thread_identity()
87 with threads_lock:
88 for t in list(thread_state.executor._threads):
89 if t.ident == ident:
90 thread_state.executor._threads.remove(t)
91 break
92 thread_state.executor._adjust_thread_count()
93
94
95 threads_lock = threading.Lock()
96
97 """
98 PSF LICENSE AGREEMENT FOR PYTHON 3.5.2
99 ======================================
100
101 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
102 the Individual or Organization ("Licensee") accessing and otherwise using Python
103 3.5.2 software in source or binary form and its associated documentation.
104
105 2. Subject to the terms and conditions of this License Agreement, PSF hereby
106 grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
107 analyze, test, perform and/or display publicly, prepare derivative works,
108 distribute, and otherwise use Python 3.5.2 alone or in any derivative
109 version, provided, however, that PSF's License Agreement and PSF's notice of
110 copyright, i.e., "Copyright c 2001-2016 Python Software Foundation; All Rights
111 Reserved" are retained in Python 3.5.2 alone or in any derivative version
112 prepared by Licensee.
113
114 3. In the event Licensee prepares a derivative work that is based on or
115 incorporates Python 3.5.2 or any part thereof, and wants to make the
116 derivative work available to others as provided herein, then Licensee hereby
117 agrees to include in any such work a brief summary of the changes made to Python
118 3.5.2.
119
120 4. PSF is making Python 3.5.2 available to Licensee on an "AS IS" basis.
121 PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
122 EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
123 WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
124 USE OF PYTHON 3.5.2 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
125
126 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.5.2
127 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
128 MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.5.2, OR ANY DERIVATIVE
129 THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
130
131 6. This License Agreement will automatically terminate upon a material breach of
132 its terms and conditions.
133
134 7. Nothing in this License Agreement shall be deemed to create any relationship
135 of agency, partnership, or joint venture between PSF and Licensee. This License
136 Agreement does not grant permission to use PSF trademarks or trade name in a
137 trademark sense to endorse or promote products or services of Licensee, or any
138 third party.
139
140 8. By copying, installing or otherwise using Python 3.5.2, Licensee agrees
141 to be bound by the terms and conditions of this License Agreement.
142 """
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/distributed/__init__.py b/distributed/__init__.py
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -10,6 +10,7 @@
from .nanny import Nanny
from .queues import Queue
from .scheduler import Scheduler
+from .threadpoolexecutor import rejoin
from .utils import sync
from .variable import Variable
from .worker import Worker, get_worker, get_client, secede
diff --git a/distributed/threadpoolexecutor.py b/distributed/threadpoolexecutor.py
--- a/distributed/threadpoolexecutor.py
+++ b/distributed/threadpoolexecutor.py
@@ -26,7 +26,6 @@
import logging
import threading
-from .compatibility import get_thread_identity
from .metrics import time
logger = logging.getLogger(__name__)
@@ -40,6 +39,13 @@
try:
while thread_state.proceed:
+ with executor._rejoin_lock:
+ if executor._rejoin_list:
+ rejoin_thread, rejoin_event = executor._rejoin_list.pop()
+ executor._threads.add(rejoin_thread)
+ executor._threads.remove(threading.current_thread())
+ rejoin_event.set()
+ break
task = work_queue.get()
if task is not None: # sentinel
task.run()
@@ -56,6 +62,11 @@
class ThreadPoolExecutor(thread.ThreadPoolExecutor):
+ def __init__(self, *args, **kwargs):
+ super(ThreadPoolExecutor, self).__init__(*args, **kwargs)
+ self._rejoin_list = []
+ self._rejoin_lock = threading.Lock()
+
def _adjust_thread_count(self):
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
@@ -80,16 +91,38 @@
t.join(timeout=timeout2)
-def secede():
- """ Have this thread secede from the ThreadPoolExecutor """
+def secede(adjust=True):
+ """ Have this thread secede from the ThreadPoolExecutor
+
+ See Also
+ --------
+ rejoin: rejoin the thread pool
+ """
thread_state.proceed = False
- ident = get_thread_identity()
with threads_lock:
- for t in list(thread_state.executor._threads):
- if t.ident == ident:
- thread_state.executor._threads.remove(t)
- break
- thread_state.executor._adjust_thread_count()
+ thread_state.executor._threads.remove(threading.current_thread())
+ if adjust:
+ thread_state.executor._adjust_thread_count()
+
+
+def rejoin():
+ """ Have this thread rejoin the ThreadPoolExecutor
+
+ This will block until a new slot opens up in the executor. The next thread
+ to finish a task will leave the pool to allow this one to join.
+
+ See Also
+ --------
+ secede: leave the thread pool
+ """
+ thread = threading.current_thread()
+ event = threading.Event()
+ e = thread_state.executor
+ with e._rejoin_lock:
+ e._rejoin_list.append((thread, event))
+ e.submit(lambda: None)
+ event.wait()
+ thread_state.proceed = True
threads_lock = threading.Lock()
|
{"golden_diff": "diff --git a/distributed/__init__.py b/distributed/__init__.py\n--- a/distributed/__init__.py\n+++ b/distributed/__init__.py\n@@ -10,6 +10,7 @@\n from .nanny import Nanny\n from .queues import Queue\n from .scheduler import Scheduler\n+from .threadpoolexecutor import rejoin\n from .utils import sync\n from .variable import Variable\n from .worker import Worker, get_worker, get_client, secede\ndiff --git a/distributed/threadpoolexecutor.py b/distributed/threadpoolexecutor.py\n--- a/distributed/threadpoolexecutor.py\n+++ b/distributed/threadpoolexecutor.py\n@@ -26,7 +26,6 @@\n import logging\n import threading\n \n-from .compatibility import get_thread_identity\n from .metrics import time\n \n logger = logging.getLogger(__name__)\n@@ -40,6 +39,13 @@\n \n try:\n while thread_state.proceed:\n+ with executor._rejoin_lock:\n+ if executor._rejoin_list:\n+ rejoin_thread, rejoin_event = executor._rejoin_list.pop()\n+ executor._threads.add(rejoin_thread)\n+ executor._threads.remove(threading.current_thread())\n+ rejoin_event.set()\n+ break\n task = work_queue.get()\n if task is not None: # sentinel\n task.run()\n@@ -56,6 +62,11 @@\n \n \n class ThreadPoolExecutor(thread.ThreadPoolExecutor):\n+ def __init__(self, *args, **kwargs):\n+ super(ThreadPoolExecutor, self).__init__(*args, **kwargs)\n+ self._rejoin_list = []\n+ self._rejoin_lock = threading.Lock()\n+\n def _adjust_thread_count(self):\n if len(self._threads) < self._max_workers:\n t = threading.Thread(target=_worker,\n@@ -80,16 +91,38 @@\n t.join(timeout=timeout2)\n \n \n-def secede():\n- \"\"\" Have this thread secede from the ThreadPoolExecutor \"\"\"\n+def secede(adjust=True):\n+ \"\"\" Have this thread secede from the ThreadPoolExecutor\n+\n+ See Also\n+ --------\n+ rejoin: rejoin the thread pool\n+ \"\"\"\n thread_state.proceed = False\n- ident = get_thread_identity()\n with threads_lock:\n- for t in list(thread_state.executor._threads):\n- if t.ident == ident:\n- thread_state.executor._threads.remove(t)\n- break\n- thread_state.executor._adjust_thread_count()\n+ thread_state.executor._threads.remove(threading.current_thread())\n+ if adjust:\n+ thread_state.executor._adjust_thread_count()\n+\n+\n+def rejoin():\n+ \"\"\" Have this thread rejoin the ThreadPoolExecutor\n+\n+ This will block until a new slot opens up in the executor. The next thread\n+ to finish a task will leave the pool to allow this one to join.\n+\n+ See Also\n+ --------\n+ secede: leave the thread pool\n+ \"\"\"\n+ thread = threading.current_thread()\n+ event = threading.Event()\n+ e = thread_state.executor\n+ with e._rejoin_lock:\n+ e._rejoin_list.append((thread, event))\n+ e.submit(lambda: None)\n+ event.wait()\n+ thread_state.proceed = True\n \n \n threads_lock = threading.Lock()\n", "issue": "Add rejoin operation to rejoin thread pool\nCurrently the `secede` function allows a task to remove itself from the worker's current thread pool, opening up a space for more tasks.\r\n\r\nWe might consider an inverse operation, `rejoin` that blocks until a new spot in the thread pool has opened up. This would enable long-running task computations to avoid contention of many threads computing at once.\r\n\r\nFirst suggested by @adamklein in https://github.com/dask/distributed/issues/1342\r\n\r\nalso cc @ogrisel\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom .config import config\nfrom .core import connect, rpc\nfrom .deploy import LocalCluster\nfrom .diagnostics import progress\nfrom .client import (Client, Executor, CompatibleExecutor,\n wait, as_completed, default_client, fire_and_forget,\n Future)\nfrom .nanny import Nanny\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .utils import sync\nfrom .variable import Variable\nfrom .worker import Worker, get_worker, get_client, secede\nfrom .worker_client import local_client, worker_client\n\nfrom ._version import get_versions\nversions = get_versions()\n__version__ = versions['version']\n__git_revision__ = versions['full-revisionid']\ndel get_versions, versions\n", "path": "distributed/__init__.py"}, {"content": "\"\"\"\nModified ThreadPoolExecutor to support threads leaving the thread pool\n\nThis includes a global `secede` method that a submitted function can call to\nhave its thread leave the ThreadPoolExecutor's thread pool. This allows the\nthread pool to allocate another thread if necessary and so is useful when a\nfunction realises that it is going to be a long-running job that doesn't want\nto take up space. When the function finishes its thread will terminate\ngracefully.\n\nThis code copies and modifies two functions from the\n`concurrent.futures.thread` module, notably `_worker` and\nThreadPoolExecutor._adjust_thread_count` to allow for checking against a global\n`threading.local` state. These functions are subject to the following license,\nwhich is included as a comment at the end of this file:\n\n https://docs.python.org/3/license.html\n\n... and are under copyright by the Python Software Foundation\n\n Copyright 2001-2016 Python Software Foundation; All Rights Reserved\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nfrom . import _concurrent_futures_thread as thread\nimport logging\nimport threading\n\nfrom .compatibility import get_thread_identity\nfrom .metrics import time\n\nlogger = logging.getLogger(__name__)\n\nthread_state = threading.local()\n\n\ndef _worker(executor, work_queue):\n thread_state.proceed = True\n thread_state.executor = executor\n\n try:\n while thread_state.proceed:\n task = work_queue.get()\n if task is not None: # sentinel\n task.run()\n del task\n elif thread._shutdown or executor is None or executor._shutdown:\n work_queue.put(None)\n return\n del executor\n except BaseException:\n logger.critical('Exception in worker', exc_info=True)\n finally:\n del thread_state.proceed\n del thread_state.executor\n\n\nclass ThreadPoolExecutor(thread.ThreadPoolExecutor):\n def _adjust_thread_count(self):\n if len(self._threads) < self._max_workers:\n t = threading.Thread(target=_worker,\n name=\"ThreadPool worker %d\" % len(self._threads,),\n args=(self, self._work_queue))\n t.daemon = True\n self._threads.add(t)\n t.start()\n\n def shutdown(self, wait=True, timeout=None):\n with threads_lock:\n with self._shutdown_lock:\n self._shutdown = True\n self._work_queue.put(None)\n if timeout is not None:\n deadline = time() + timeout\n for t in self._threads:\n if timeout is not None:\n timeout2 = max(deadline - time(), 0)\n else:\n timeout2 = None\n t.join(timeout=timeout2)\n\n\ndef secede():\n \"\"\" Have this thread secede from the ThreadPoolExecutor \"\"\"\n thread_state.proceed = False\n ident = get_thread_identity()\n with threads_lock:\n for t in list(thread_state.executor._threads):\n if t.ident == ident:\n thread_state.executor._threads.remove(t)\n break\n thread_state.executor._adjust_thread_count()\n\n\nthreads_lock = threading.Lock()\n\n\"\"\"\nPSF LICENSE AGREEMENT FOR PYTHON 3.5.2\n======================================\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation (\"PSF\"), and\n the Individual or Organization (\"Licensee\") accessing and otherwise using Python\n 3.5.2 software in source or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\n grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\n analyze, test, perform and/or display publicly, prepare derivative works,\n distribute, and otherwise use Python 3.5.2 alone or in any derivative\n version, provided, however, that PSF's License Agreement and PSF's notice of\n copyright, i.e., \"Copyright c 2001-2016 Python Software Foundation; All Rights\n Reserved\" are retained in Python 3.5.2 alone or in any derivative version\n prepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on or\n incorporates Python 3.5.2 or any part thereof, and wants to make the\n derivative work available to others as provided herein, then Licensee hereby\n agrees to include in any such work a brief summary of the changes made to Python\n 3.5.2.\n\n4. PSF is making Python 3.5.2 available to Licensee on an \"AS IS\" basis.\n PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF\n EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR\n WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE\n USE OF PYTHON 3.5.2 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.5.2\n FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF\n MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.5.2, OR ANY DERIVATIVE\n THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material breach of\n its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any relationship\n of agency, partnership, or joint venture between PSF and Licensee. This License\n Agreement does not grant permission to use PSF trademarks or trade name in a\n trademark sense to endorse or promote products or services of Licensee, or any\n third party.\n\n8. By copying, installing or otherwise using Python 3.5.2, Licensee agrees\n to be bound by the terms and conditions of this License Agreement.\n\"\"\"\n", "path": "distributed/threadpoolexecutor.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom .config import config\nfrom .core import connect, rpc\nfrom .deploy import LocalCluster\nfrom .diagnostics import progress\nfrom .client import (Client, Executor, CompatibleExecutor,\n wait, as_completed, default_client, fire_and_forget,\n Future)\nfrom .nanny import Nanny\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .threadpoolexecutor import rejoin\nfrom .utils import sync\nfrom .variable import Variable\nfrom .worker import Worker, get_worker, get_client, secede\nfrom .worker_client import local_client, worker_client\n\nfrom ._version import get_versions\nversions = get_versions()\n__version__ = versions['version']\n__git_revision__ = versions['full-revisionid']\ndel get_versions, versions\n", "path": "distributed/__init__.py"}, {"content": "\"\"\"\nModified ThreadPoolExecutor to support threads leaving the thread pool\n\nThis includes a global `secede` method that a submitted function can call to\nhave its thread leave the ThreadPoolExecutor's thread pool. This allows the\nthread pool to allocate another thread if necessary and so is useful when a\nfunction realises that it is going to be a long-running job that doesn't want\nto take up space. When the function finishes its thread will terminate\ngracefully.\n\nThis code copies and modifies two functions from the\n`concurrent.futures.thread` module, notably `_worker` and\nThreadPoolExecutor._adjust_thread_count` to allow for checking against a global\n`threading.local` state. These functions are subject to the following license,\nwhich is included as a comment at the end of this file:\n\n https://docs.python.org/3/license.html\n\n... and are under copyright by the Python Software Foundation\n\n Copyright 2001-2016 Python Software Foundation; All Rights Reserved\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nfrom . import _concurrent_futures_thread as thread\nimport logging\nimport threading\n\nfrom .metrics import time\n\nlogger = logging.getLogger(__name__)\n\nthread_state = threading.local()\n\n\ndef _worker(executor, work_queue):\n thread_state.proceed = True\n thread_state.executor = executor\n\n try:\n while thread_state.proceed:\n with executor._rejoin_lock:\n if executor._rejoin_list:\n rejoin_thread, rejoin_event = executor._rejoin_list.pop()\n executor._threads.add(rejoin_thread)\n executor._threads.remove(threading.current_thread())\n rejoin_event.set()\n break\n task = work_queue.get()\n if task is not None: # sentinel\n task.run()\n del task\n elif thread._shutdown or executor is None or executor._shutdown:\n work_queue.put(None)\n return\n del executor\n except BaseException:\n logger.critical('Exception in worker', exc_info=True)\n finally:\n del thread_state.proceed\n del thread_state.executor\n\n\nclass ThreadPoolExecutor(thread.ThreadPoolExecutor):\n def __init__(self, *args, **kwargs):\n super(ThreadPoolExecutor, self).__init__(*args, **kwargs)\n self._rejoin_list = []\n self._rejoin_lock = threading.Lock()\n\n def _adjust_thread_count(self):\n if len(self._threads) < self._max_workers:\n t = threading.Thread(target=_worker,\n name=\"ThreadPool worker %d\" % len(self._threads,),\n args=(self, self._work_queue))\n t.daemon = True\n self._threads.add(t)\n t.start()\n\n def shutdown(self, wait=True, timeout=None):\n with threads_lock:\n with self._shutdown_lock:\n self._shutdown = True\n self._work_queue.put(None)\n if timeout is not None:\n deadline = time() + timeout\n for t in self._threads:\n if timeout is not None:\n timeout2 = max(deadline - time(), 0)\n else:\n timeout2 = None\n t.join(timeout=timeout2)\n\n\ndef secede(adjust=True):\n \"\"\" Have this thread secede from the ThreadPoolExecutor\n\n See Also\n --------\n rejoin: rejoin the thread pool\n \"\"\"\n thread_state.proceed = False\n with threads_lock:\n thread_state.executor._threads.remove(threading.current_thread())\n if adjust:\n thread_state.executor._adjust_thread_count()\n\n\ndef rejoin():\n \"\"\" Have this thread rejoin the ThreadPoolExecutor\n\n This will block until a new slot opens up in the executor. The next thread\n to finish a task will leave the pool to allow this one to join.\n\n See Also\n --------\n secede: leave the thread pool\n \"\"\"\n thread = threading.current_thread()\n event = threading.Event()\n e = thread_state.executor\n with e._rejoin_lock:\n e._rejoin_list.append((thread, event))\n e.submit(lambda: None)\n event.wait()\n thread_state.proceed = True\n\n\nthreads_lock = threading.Lock()\n\n\"\"\"\nPSF LICENSE AGREEMENT FOR PYTHON 3.5.2\n======================================\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation (\"PSF\"), and\n the Individual or Organization (\"Licensee\") accessing and otherwise using Python\n 3.5.2 software in source or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\n grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\n analyze, test, perform and/or display publicly, prepare derivative works,\n distribute, and otherwise use Python 3.5.2 alone or in any derivative\n version, provided, however, that PSF's License Agreement and PSF's notice of\n copyright, i.e., \"Copyright c 2001-2016 Python Software Foundation; All Rights\n Reserved\" are retained in Python 3.5.2 alone or in any derivative version\n prepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on or\n incorporates Python 3.5.2 or any part thereof, and wants to make the\n derivative work available to others as provided herein, then Licensee hereby\n agrees to include in any such work a brief summary of the changes made to Python\n 3.5.2.\n\n4. PSF is making Python 3.5.2 available to Licensee on an \"AS IS\" basis.\n PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF\n EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR\n WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE\n USE OF PYTHON 3.5.2 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.5.2\n FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF\n MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.5.2, OR ANY DERIVATIVE\n THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material breach of\n its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any relationship\n of agency, partnership, or joint venture between PSF and Licensee. This License\n Agreement does not grant permission to use PSF trademarks or trade name in a\n trademark sense to endorse or promote products or services of Licensee, or any\n third party.\n\n8. By copying, installing or otherwise using Python 3.5.2, Licensee agrees\n to be bound by the terms and conditions of this License Agreement.\n\"\"\"\n", "path": "distributed/threadpoolexecutor.py"}]}
| 2,190 | 737 |
gh_patches_debug_34466
|
rasdani/github-patches
|
git_diff
|
horovod__horovod-3074
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incremental build support
**Environment:**
1. Framework: (TensorFlow, Keras, PyTorch, MXNet) TensorFlow
2. Framework version: TF 1.14.0
3. Horovod version: tip of master
4. MPI version:
5. CUDA version: 10.0
6. NCCL version: tip of master
7. Python version: 3.6.8
8. OS and version: Ubuntu 18.04
9. GCC version: 7.4.0
**Checklist:**
1. Did you search issues to find if somebody asked this question before? Yes.
2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)? N/A
3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)? N/A
4. Did you check if you question is answered in the [troubleshooting guide](https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)? Yes.
**Your question:**
Is there any way to do an incremental build of horovod? I can't figure out a way to build horovod from a local copy of the source code, except through `pip install .`, but that seems to build from scratch every time, regardless of changes to the source code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
2 # Modifications copyright Microsoft
3 # Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 # ==============================================================================
17
18 import os
19 import subprocess
20 import sys
21 import textwrap
22
23 from setuptools import setup, Extension, find_packages
24 from setuptools.command.build_ext import build_ext
25
26 from horovod import __version__
27
28
29 class CMakeExtension(Extension):
30 def __init__(self, name, cmake_lists_dir='.', sources=[], **kwa):
31 Extension.__init__(self, name, sources=sources, **kwa)
32 self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)
33
34
35 tensorflow_mpi_lib = CMakeExtension('horovod.tensorflow.mpi_lib',
36 cmake_lists_dir='.', sources=[])
37 torch_mpi_lib_v2 = CMakeExtension('horovod.torch.mpi_lib_v2',
38 cmake_lists_dir='.', sources=[])
39 mxnet_mpi_lib = CMakeExtension('horovod.mxnet.mpi_lib',
40 cmake_lists_dir='.', sources=[])
41
42 def is_build_action():
43 if len(sys.argv) <= 1:
44 return False
45
46 if sys.argv[1].startswith('build'):
47 return True
48
49 if sys.argv[1].startswith('bdist'):
50 return True
51
52 if sys.argv[1].startswith('install'):
53 return True
54
55
56 def get_cmake_bin():
57 return os.environ.get('HOROVOD_CMAKE', 'cmake')
58
59
60 class custom_build_ext(build_ext):
61 def build_extensions(self):
62 if os.getenv('HOROVOD_SKIP_COMPILE') == '1':
63 # Skip building extensions using CMake
64 print("Horovod is being installed without native libraries")
65 return
66
67 cmake_bin = get_cmake_bin()
68
69 config = 'Debug' if self.debug else 'RelWithDebInfo'
70
71 ext_name = self.extensions[0].name
72 build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')
73 build_dir = os.path.abspath(build_dir)
74
75 cmake_args = ['-DCMAKE_BUILD_TYPE=' + config,
76 '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(config.upper(), build_dir),
77 '-DPYTHON_EXECUTABLE:FILEPATH=' + sys.executable]
78
79 make_args = []
80 if self.verbose:
81 make_args.append('VERBOSE=1')
82
83 cmake_build_args = ['--config', config]
84 if make_args:
85 # -- specifies that these args are going to the native build tool: make
86 cmake_build_args += ['--'] + make_args
87
88 cmake_build_dir = os.path.join(self.build_temp, config)
89 if not os.path.exists(cmake_build_dir):
90 os.makedirs(cmake_build_dir)
91
92 # Config and build the extension
93 try:
94 subprocess.check_call([cmake_bin, self.extensions[0].cmake_lists_dir] + cmake_args,
95 cwd=cmake_build_dir)
96 subprocess.check_call([cmake_bin, '--build', '.'] + cmake_build_args,
97 cwd=cmake_build_dir)
98 except OSError as e:
99 raise RuntimeError('CMake failed: {}'.format(str(e)))
100
101
102 # python packages required to use horovod in general
103 require_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<"3.7"']
104
105 # framework dependencies
106 tensorflow_require_list = ['tensorflow']
107 tensorflow_cpu_require_list = ['tensorflow-cpu']
108 tensorflow_gpu_require_list = ['tensorflow-gpu']
109 keras_require_list = ['keras>=2.0.8,!=2.0.9,!=2.1.0,!=2.1.1']
110 pytorch_require_list = ['torch', 'pytorch_lightning']
111 mxnet_require_list = ['mxnet>=1.4.1']
112 pyspark_require_list = ['pyspark>=2.3.2;python_version<"3.8"',
113 'pyspark>=3.0.0;python_version>="3.8"']
114 # Pin h5py: https://github.com/h5py/h5py/issues/1732
115 spark_require_list = ['h5py<3', 'numpy', 'petastorm>=0.11.0', 'pyarrow>=0.15.0', 'fsspec']
116 ray_require_list = ['ray']
117 pytorch_spark_require_list = pytorch_require_list + \
118 spark_require_list + \
119 pyspark_require_list
120
121 # all frameworks' dependencies
122 all_frameworks_require_list = tensorflow_require_list + \
123 keras_require_list + \
124 pytorch_require_list + \
125 mxnet_require_list + \
126 spark_require_list + \
127 pyspark_require_list
128
129 # python packages required / recommended to develop horovod
130 # these are the earliest versions to work with Python 3.8
131 # keep in sync with Dockerfile.test.cpu
132 # NOTE: do not use versions with +cpu or +gpu here as users would need to add --find-links to pip
133 dev_require_list = ['tensorflow-cpu==2.2.0',
134 'keras==2.3.1',
135 'torch==1.4.0',
136 'torchvision==0.5.0',
137 'pytorch_lightning>=1.2.9',
138 'mxnet==1.5.0',
139 'pyspark==3.0.1'] + spark_require_list
140 # torchvision 0.5.0 depends on torch==1.4.0
141
142 # python packages required only to run tests
143 # Pin h5py: https://github.com/h5py/h5py/issues/1732
144 test_require_list = ['mock', 'pytest', 'pytest-forked', 'parameterized', 'h5py<3']
145
146 # Skip cffi if pytorch extension explicitly disabled
147 if not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):
148 require_list.append('cffi>=1.4.0')
149
150
151 def get_package_version():
152 return __version__ + "+" + os.environ['HOROVOD_LOCAL_VERSION'] if 'HOROVOD_LOCAL_VERSION' in os.environ else __version__
153
154
155 setup(name='horovod',
156 version=get_package_version(),
157 packages=find_packages(),
158 description='Distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.',
159 author='The Horovod Authors',
160 license='Apache 2.0',
161 long_description=textwrap.dedent('''\
162 Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.
163 The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),
164 url='https://github.com/horovod/horovod',
165 keywords=['deep learning', 'tensorflow', 'keras', 'pytorch', 'mxnet', 'spark', 'AI'],
166 classifiers=[
167 'License :: OSI Approved :: Apache Software License',
168 'Development Status :: 4 - Beta',
169 'Intended Audience :: Developers',
170 'Topic :: Scientific/Engineering :: Artificial Intelligence',
171 ],
172 ext_modules=[tensorflow_mpi_lib, torch_mpi_lib_v2, mxnet_mpi_lib],
173 cmdclass={'build_ext': custom_build_ext},
174 # cffi is required for PyTorch
175 # If cffi is specified in setup_requires, it will need libffi to be installed on the machine,
176 # which is undesirable. Luckily, `install` action will install cffi before executing build,
177 # so it's only necessary for `build*` or `bdist*` actions.
178 setup_requires=require_list if is_build_action() else [],
179 install_requires=require_list,
180 tests_require=test_require_list,
181 extras_require={
182 'all-frameworks': all_frameworks_require_list,
183 'tensorflow': tensorflow_require_list,
184 'tensorflow-cpu': tensorflow_cpu_require_list,
185 'tensorflow-gpu': tensorflow_gpu_require_list,
186 'keras': keras_require_list,
187 'pytorch': pytorch_require_list,
188 'mxnet': mxnet_require_list,
189 'spark': spark_require_list + pyspark_require_list,
190 'pytorch-spark': pytorch_spark_require_list,
191 'ray': ray_require_list,
192 'dev': dev_require_list,
193 'test': test_require_list,
194 },
195 python_requires='>=3.6',
196 zip_safe=False,
197 entry_points={
198 'console_scripts': [
199 'horovodrun = horovod.runner.launch:run_commandline'
200 ]
201 })
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,6 +16,7 @@
# ==============================================================================
import os
+import shutil
import subprocess
import sys
import textwrap
@@ -25,6 +26,7 @@
from horovod import __version__
+_FRAMEWORK_METADATA_FILE = 'horovod/metadata.json'
class CMakeExtension(Extension):
def __init__(self, name, cmake_lists_dir='.', sources=[], **kwa):
@@ -52,6 +54,8 @@
if sys.argv[1].startswith('install'):
return True
+ if sys.argv[1].startswith('develop'):
+ return True
def get_cmake_bin():
return os.environ.get('HOROVOD_CMAKE', 'cmake')
@@ -66,7 +70,7 @@
cmake_bin = get_cmake_bin()
- config = 'Debug' if self.debug else 'RelWithDebInfo'
+ config = 'Debug' if self.debug or os.environ.get('HOROVOD_DEBUG') == "1" else 'RelWithDebInfo'
ext_name = self.extensions[0].name
build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')
@@ -98,6 +102,13 @@
except OSError as e:
raise RuntimeError('CMake failed: {}'.format(str(e)))
+ if sys.argv[1].startswith('develop'):
+ # Copy over metadata.json file from build directory
+ shutil.copyfile(os.path.join(build_dir, _FRAMEWORK_METADATA_FILE),
+ os.path.join(self.extensions[0].cmake_lists_dir, _FRAMEWORK_METADATA_FILE))
+ # Remove unfound frameworks, otherwise develop mode will fail the install
+ self.extensions = [x for x in self.extensions if os.path.exists(self.get_ext_fullpath(x.name))]
+
# python packages required to use horovod in general
require_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<"3.7"']
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,6 +16,7 @@\n # ==============================================================================\n \n import os\n+import shutil\n import subprocess\n import sys\n import textwrap\n@@ -25,6 +26,7 @@\n \n from horovod import __version__\n \n+_FRAMEWORK_METADATA_FILE = 'horovod/metadata.json'\n \n class CMakeExtension(Extension):\n def __init__(self, name, cmake_lists_dir='.', sources=[], **kwa):\n@@ -52,6 +54,8 @@\n if sys.argv[1].startswith('install'):\n return True\n \n+ if sys.argv[1].startswith('develop'):\n+ return True\n \n def get_cmake_bin():\n return os.environ.get('HOROVOD_CMAKE', 'cmake')\n@@ -66,7 +70,7 @@\n \n cmake_bin = get_cmake_bin()\n \n- config = 'Debug' if self.debug else 'RelWithDebInfo'\n+ config = 'Debug' if self.debug or os.environ.get('HOROVOD_DEBUG') == \"1\" else 'RelWithDebInfo'\n \n ext_name = self.extensions[0].name\n build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')\n@@ -98,6 +102,13 @@\n except OSError as e:\n raise RuntimeError('CMake failed: {}'.format(str(e)))\n \n+ if sys.argv[1].startswith('develop'):\n+ # Copy over metadata.json file from build directory\n+ shutil.copyfile(os.path.join(build_dir, _FRAMEWORK_METADATA_FILE),\n+ os.path.join(self.extensions[0].cmake_lists_dir, _FRAMEWORK_METADATA_FILE))\n+ # Remove unfound frameworks, otherwise develop mode will fail the install\n+ self.extensions = [x for x in self.extensions if os.path.exists(self.get_ext_fullpath(x.name))]\n+\n \n # python packages required to use horovod in general\n require_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<\"3.7\"']\n", "issue": "Incremental build support\n**Environment:**\r\n1. Framework: (TensorFlow, Keras, PyTorch, MXNet) TensorFlow\r\n2. Framework version: TF 1.14.0\r\n3. Horovod version: tip of master\r\n4. MPI version:\r\n5. CUDA version: 10.0\r\n6. NCCL version: tip of master\r\n7. Python version: 3.6.8\r\n8. OS and version: Ubuntu 18.04\r\n9. GCC version: 7.4.0\r\n\r\n**Checklist:**\r\n1. Did you search issues to find if somebody asked this question before? Yes.\r\n2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)? N/A\r\n3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)? N/A\r\n4. Did you check if you question is answered in the [troubleshooting guide](https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)? Yes.\r\n\r\n**Your question:**\r\n\r\nIs there any way to do an incremental build of horovod? I can't figure out a way to build horovod from a local copy of the source code, except through `pip install .`, but that seems to build from scratch every time, regardless of changes to the source code.\n", "before_files": [{"content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n# Modifications copyright Microsoft\n# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport subprocess\nimport sys\nimport textwrap\n\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\n\nfrom horovod import __version__\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, cmake_lists_dir='.', sources=[], **kwa):\n Extension.__init__(self, name, sources=sources, **kwa)\n self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)\n\n\ntensorflow_mpi_lib = CMakeExtension('horovod.tensorflow.mpi_lib',\n cmake_lists_dir='.', sources=[])\ntorch_mpi_lib_v2 = CMakeExtension('horovod.torch.mpi_lib_v2',\n cmake_lists_dir='.', sources=[])\nmxnet_mpi_lib = CMakeExtension('horovod.mxnet.mpi_lib',\n cmake_lists_dir='.', sources=[])\n\ndef is_build_action():\n if len(sys.argv) <= 1:\n return False\n\n if sys.argv[1].startswith('build'):\n return True\n\n if sys.argv[1].startswith('bdist'):\n return True\n\n if sys.argv[1].startswith('install'):\n return True\n\n\ndef get_cmake_bin():\n return os.environ.get('HOROVOD_CMAKE', 'cmake')\n\n\nclass custom_build_ext(build_ext):\n def build_extensions(self):\n if os.getenv('HOROVOD_SKIP_COMPILE') == '1':\n # Skip building extensions using CMake\n print(\"Horovod is being installed without native libraries\")\n return\n\n cmake_bin = get_cmake_bin()\n\n config = 'Debug' if self.debug else 'RelWithDebInfo'\n\n ext_name = self.extensions[0].name\n build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')\n build_dir = os.path.abspath(build_dir)\n\n cmake_args = ['-DCMAKE_BUILD_TYPE=' + config,\n '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(config.upper(), build_dir),\n '-DPYTHON_EXECUTABLE:FILEPATH=' + sys.executable]\n\n make_args = []\n if self.verbose:\n make_args.append('VERBOSE=1')\n\n cmake_build_args = ['--config', config]\n if make_args:\n # -- specifies that these args are going to the native build tool: make\n cmake_build_args += ['--'] + make_args\n\n cmake_build_dir = os.path.join(self.build_temp, config)\n if not os.path.exists(cmake_build_dir):\n os.makedirs(cmake_build_dir)\n\n # Config and build the extension\n try:\n subprocess.check_call([cmake_bin, self.extensions[0].cmake_lists_dir] + cmake_args,\n cwd=cmake_build_dir)\n subprocess.check_call([cmake_bin, '--build', '.'] + cmake_build_args,\n cwd=cmake_build_dir)\n except OSError as e:\n raise RuntimeError('CMake failed: {}'.format(str(e)))\n\n\n# python packages required to use horovod in general\nrequire_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<\"3.7\"']\n\n# framework dependencies\ntensorflow_require_list = ['tensorflow']\ntensorflow_cpu_require_list = ['tensorflow-cpu']\ntensorflow_gpu_require_list = ['tensorflow-gpu']\nkeras_require_list = ['keras>=2.0.8,!=2.0.9,!=2.1.0,!=2.1.1']\npytorch_require_list = ['torch', 'pytorch_lightning']\nmxnet_require_list = ['mxnet>=1.4.1']\npyspark_require_list = ['pyspark>=2.3.2;python_version<\"3.8\"',\n 'pyspark>=3.0.0;python_version>=\"3.8\"']\n# Pin h5py: https://github.com/h5py/h5py/issues/1732\nspark_require_list = ['h5py<3', 'numpy', 'petastorm>=0.11.0', 'pyarrow>=0.15.0', 'fsspec']\nray_require_list = ['ray']\npytorch_spark_require_list = pytorch_require_list + \\\n spark_require_list + \\\n pyspark_require_list\n\n# all frameworks' dependencies\nall_frameworks_require_list = tensorflow_require_list + \\\n keras_require_list + \\\n pytorch_require_list + \\\n mxnet_require_list + \\\n spark_require_list + \\\n pyspark_require_list\n\n# python packages required / recommended to develop horovod\n# these are the earliest versions to work with Python 3.8\n# keep in sync with Dockerfile.test.cpu\n# NOTE: do not use versions with +cpu or +gpu here as users would need to add --find-links to pip\ndev_require_list = ['tensorflow-cpu==2.2.0',\n 'keras==2.3.1',\n 'torch==1.4.0',\n 'torchvision==0.5.0',\n 'pytorch_lightning>=1.2.9',\n 'mxnet==1.5.0',\n 'pyspark==3.0.1'] + spark_require_list\n# torchvision 0.5.0 depends on torch==1.4.0\n\n# python packages required only to run tests\n# Pin h5py: https://github.com/h5py/h5py/issues/1732\ntest_require_list = ['mock', 'pytest', 'pytest-forked', 'parameterized', 'h5py<3']\n\n# Skip cffi if pytorch extension explicitly disabled\nif not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):\n require_list.append('cffi>=1.4.0')\n\n\ndef get_package_version():\n return __version__ + \"+\" + os.environ['HOROVOD_LOCAL_VERSION'] if 'HOROVOD_LOCAL_VERSION' in os.environ else __version__\n\n\nsetup(name='horovod',\n version=get_package_version(),\n packages=find_packages(),\n description='Distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.',\n author='The Horovod Authors',\n license='Apache 2.0',\n long_description=textwrap.dedent('''\\\n Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.\n The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),\n url='https://github.com/horovod/horovod',\n keywords=['deep learning', 'tensorflow', 'keras', 'pytorch', 'mxnet', 'spark', 'AI'],\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n ext_modules=[tensorflow_mpi_lib, torch_mpi_lib_v2, mxnet_mpi_lib],\n cmdclass={'build_ext': custom_build_ext},\n # cffi is required for PyTorch\n # If cffi is specified in setup_requires, it will need libffi to be installed on the machine,\n # which is undesirable. Luckily, `install` action will install cffi before executing build,\n # so it's only necessary for `build*` or `bdist*` actions.\n setup_requires=require_list if is_build_action() else [],\n install_requires=require_list,\n tests_require=test_require_list,\n extras_require={\n 'all-frameworks': all_frameworks_require_list,\n 'tensorflow': tensorflow_require_list,\n 'tensorflow-cpu': tensorflow_cpu_require_list,\n 'tensorflow-gpu': tensorflow_gpu_require_list,\n 'keras': keras_require_list,\n 'pytorch': pytorch_require_list,\n 'mxnet': mxnet_require_list,\n 'spark': spark_require_list + pyspark_require_list,\n 'pytorch-spark': pytorch_spark_require_list,\n 'ray': ray_require_list,\n 'dev': dev_require_list,\n 'test': test_require_list,\n },\n python_requires='>=3.6',\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'horovodrun = horovod.runner.launch:run_commandline'\n ]\n })\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n# Modifications copyright Microsoft\n# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport textwrap\n\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\n\nfrom horovod import __version__\n\n_FRAMEWORK_METADATA_FILE = 'horovod/metadata.json'\n\nclass CMakeExtension(Extension):\n def __init__(self, name, cmake_lists_dir='.', sources=[], **kwa):\n Extension.__init__(self, name, sources=sources, **kwa)\n self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)\n\n\ntensorflow_mpi_lib = CMakeExtension('horovod.tensorflow.mpi_lib',\n cmake_lists_dir='.', sources=[])\ntorch_mpi_lib_v2 = CMakeExtension('horovod.torch.mpi_lib_v2',\n cmake_lists_dir='.', sources=[])\nmxnet_mpi_lib = CMakeExtension('horovod.mxnet.mpi_lib',\n cmake_lists_dir='.', sources=[])\n\ndef is_build_action():\n if len(sys.argv) <= 1:\n return False\n\n if sys.argv[1].startswith('build'):\n return True\n\n if sys.argv[1].startswith('bdist'):\n return True\n\n if sys.argv[1].startswith('install'):\n return True\n\n if sys.argv[1].startswith('develop'):\n return True\n\ndef get_cmake_bin():\n return os.environ.get('HOROVOD_CMAKE', 'cmake')\n\n\nclass custom_build_ext(build_ext):\n def build_extensions(self):\n if os.getenv('HOROVOD_SKIP_COMPILE') == '1':\n # Skip building extensions using CMake\n print(\"Horovod is being installed without native libraries\")\n return\n\n cmake_bin = get_cmake_bin()\n\n config = 'Debug' if self.debug or os.environ.get('HOROVOD_DEBUG') == \"1\" else 'RelWithDebInfo'\n\n ext_name = self.extensions[0].name\n build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')\n build_dir = os.path.abspath(build_dir)\n\n cmake_args = ['-DCMAKE_BUILD_TYPE=' + config,\n '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(config.upper(), build_dir),\n '-DPYTHON_EXECUTABLE:FILEPATH=' + sys.executable]\n\n make_args = []\n if self.verbose:\n make_args.append('VERBOSE=1')\n\n cmake_build_args = ['--config', config]\n if make_args:\n # -- specifies that these args are going to the native build tool: make\n cmake_build_args += ['--'] + make_args\n\n cmake_build_dir = os.path.join(self.build_temp, config)\n if not os.path.exists(cmake_build_dir):\n os.makedirs(cmake_build_dir)\n\n # Config and build the extension\n try:\n subprocess.check_call([cmake_bin, self.extensions[0].cmake_lists_dir] + cmake_args,\n cwd=cmake_build_dir)\n subprocess.check_call([cmake_bin, '--build', '.'] + cmake_build_args,\n cwd=cmake_build_dir)\n except OSError as e:\n raise RuntimeError('CMake failed: {}'.format(str(e)))\n\n if sys.argv[1].startswith('develop'):\n # Copy over metadata.json file from build directory\n shutil.copyfile(os.path.join(build_dir, _FRAMEWORK_METADATA_FILE),\n os.path.join(self.extensions[0].cmake_lists_dir, _FRAMEWORK_METADATA_FILE))\n # Remove unfound frameworks, otherwise develop mode will fail the install\n self.extensions = [x for x in self.extensions if os.path.exists(self.get_ext_fullpath(x.name))]\n\n\n# python packages required to use horovod in general\nrequire_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<\"3.7\"']\n\n# framework dependencies\ntensorflow_require_list = ['tensorflow']\ntensorflow_cpu_require_list = ['tensorflow-cpu']\ntensorflow_gpu_require_list = ['tensorflow-gpu']\nkeras_require_list = ['keras>=2.0.8,!=2.0.9,!=2.1.0,!=2.1.1']\npytorch_require_list = ['torch', 'pytorch_lightning']\nmxnet_require_list = ['mxnet>=1.4.1']\npyspark_require_list = ['pyspark>=2.3.2;python_version<\"3.8\"',\n 'pyspark>=3.0.0;python_version>=\"3.8\"']\n# Pin h5py: https://github.com/h5py/h5py/issues/1732\nspark_require_list = ['h5py<3', 'numpy', 'petastorm>=0.11.0', 'pyarrow>=0.15.0', 'fsspec']\nray_require_list = ['ray']\npytorch_spark_require_list = pytorch_require_list + \\\n spark_require_list + \\\n pyspark_require_list\n\n# all frameworks' dependencies\nall_frameworks_require_list = tensorflow_require_list + \\\n keras_require_list + \\\n pytorch_require_list + \\\n mxnet_require_list + \\\n spark_require_list + \\\n pyspark_require_list\n\n# python packages required / recommended to develop horovod\n# these are the earliest versions to work with Python 3.8\n# keep in sync with Dockerfile.test.cpu\n# NOTE: do not use versions with +cpu or +gpu here as users would need to add --find-links to pip\ndev_require_list = ['tensorflow-cpu==2.2.0',\n 'keras==2.3.1',\n 'torch==1.4.0',\n 'torchvision==0.5.0',\n 'pytorch_lightning>=1.2.9',\n 'mxnet==1.5.0',\n 'pyspark==3.0.1'] + spark_require_list\n# torchvision 0.5.0 depends on torch==1.4.0\n\n# python packages required only to run tests\n# Pin h5py: https://github.com/h5py/h5py/issues/1732\ntest_require_list = ['mock', 'pytest', 'pytest-forked', 'parameterized', 'h5py<3']\n\n# Skip cffi if pytorch extension explicitly disabled\nif not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):\n require_list.append('cffi>=1.4.0')\n\n\ndef get_package_version():\n return __version__ + \"+\" + os.environ['HOROVOD_LOCAL_VERSION'] if 'HOROVOD_LOCAL_VERSION' in os.environ else __version__\n\n\nsetup(name='horovod',\n version=get_package_version(),\n packages=find_packages(),\n description='Distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.',\n author='The Horovod Authors',\n license='Apache 2.0',\n long_description=textwrap.dedent('''\\\n Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.\n The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),\n url='https://github.com/horovod/horovod',\n keywords=['deep learning', 'tensorflow', 'keras', 'pytorch', 'mxnet', 'spark', 'AI'],\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n ext_modules=[tensorflow_mpi_lib, torch_mpi_lib_v2, mxnet_mpi_lib],\n cmdclass={'build_ext': custom_build_ext},\n # cffi is required for PyTorch\n # If cffi is specified in setup_requires, it will need libffi to be installed on the machine,\n # which is undesirable. Luckily, `install` action will install cffi before executing build,\n # so it's only necessary for `build*` or `bdist*` actions.\n setup_requires=require_list if is_build_action() else [],\n install_requires=require_list,\n tests_require=test_require_list,\n extras_require={\n 'all-frameworks': all_frameworks_require_list,\n 'tensorflow': tensorflow_require_list,\n 'tensorflow-cpu': tensorflow_cpu_require_list,\n 'tensorflow-gpu': tensorflow_gpu_require_list,\n 'keras': keras_require_list,\n 'pytorch': pytorch_require_list,\n 'mxnet': mxnet_require_list,\n 'spark': spark_require_list + pyspark_require_list,\n 'pytorch-spark': pytorch_spark_require_list,\n 'ray': ray_require_list,\n 'dev': dev_require_list,\n 'test': test_require_list,\n },\n python_requires='>=3.6',\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'horovodrun = horovod.runner.launch:run_commandline'\n ]\n })\n", "path": "setup.py"}]}
| 3,035 | 468 |
gh_patches_debug_4284
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-3544
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide an API to query the build status by commit
In order to do a check before release that everything is ok, I would like to have a way to obtain the current build status for a given commit. So, in addition to:
```
GET /api/v1/build/{id}/
```
also have this:
```
GET /api/v1/commit/{sha1}/
```
or
```
GET /api/v1/{user}/{project}/commit/{sha1}/
```
Is this possible right now?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/restapi/views/model_views.py`
Content:
```
1 """Endpoints for listing Projects, Versions, Builds, etc."""
2
3 from __future__ import absolute_import
4 import logging
5
6 from django.shortcuts import get_object_or_404
7 from rest_framework import decorators, permissions, viewsets, status
8 from rest_framework.decorators import detail_route
9 from rest_framework.renderers import JSONRenderer
10 from rest_framework.response import Response
11
12 from readthedocs.builds.constants import BRANCH
13 from readthedocs.builds.constants import TAG
14 from readthedocs.builds.models import Build, BuildCommandResult, Version
15 from readthedocs.core.utils import trigger_build
16 from readthedocs.core.utils.extend import SettingsOverrideObject
17 from readthedocs.oauth.services import GitHubService, registry
18 from readthedocs.oauth.models import RemoteOrganization, RemoteRepository
19 from readthedocs.projects.models import Project, EmailHook, Domain
20 from readthedocs.projects.version_handling import determine_stable_version
21
22 from ..permissions import (APIPermission, APIRestrictedPermission,
23 RelatedProjectIsOwner, IsOwner)
24 from ..serializers import (BuildSerializer, BuildAdminSerializer,
25 BuildCommandSerializer,
26 ProjectSerializer, ProjectAdminSerializer,
27 VersionSerializer, VersionAdminSerializer,
28 DomainSerializer, RemoteOrganizationSerializer,
29 RemoteRepositorySerializer)
30 from .. import utils as api_utils
31
32 log = logging.getLogger(__name__)
33
34
35 class UserSelectViewSet(viewsets.ModelViewSet):
36
37 """
38 View set that varies serializer class based on request user credentials.
39
40 Viewsets using this class should have an attribute `admin_serializer_class`,
41 which is a serializer that might have more fields that only admin/staff
42 users require. If the user is staff, this class will be returned instead.
43 """
44
45 def get_serializer_class(self):
46 try:
47 if self.request.user.is_staff and self.admin_serializer_class is not None:
48 return self.admin_serializer_class
49 except AttributeError:
50 pass
51 return self.serializer_class
52
53 def get_queryset(self):
54 """Use our API manager method to determine authorization on queryset."""
55 return self.model.objects.api(self.request.user)
56
57
58 class ProjectViewSet(UserSelectViewSet):
59
60 """List, filter, etc. Projects."""
61
62 permission_classes = [APIPermission]
63 renderer_classes = (JSONRenderer,)
64 serializer_class = ProjectSerializer
65 admin_serializer_class = ProjectAdminSerializer
66 model = Project
67 paginate_by = 100
68 paginate_by_param = 'page_size'
69 max_paginate_by = 1000
70
71 @decorators.detail_route()
72 def valid_versions(self, request, **kwargs):
73 """Maintain state of versions that are wanted."""
74 project = get_object_or_404(
75 Project.objects.api(request.user), pk=kwargs['pk'])
76 if not project.num_major or not project.num_minor or not project.num_point:
77 return Response(
78 {'error': 'Project does not support point version control'},
79 status=status.HTTP_400_BAD_REQUEST)
80 version_strings = project.supported_versions()
81 # Disable making old versions inactive for now.
82 # project.versions.exclude(verbose_name__in=version_strings).update(active=False)
83 project.versions.filter(
84 verbose_name__in=version_strings).update(active=True)
85 return Response({
86 'flat': version_strings,
87 })
88
89 @detail_route()
90 def translations(self, *_, **__):
91 translations = self.get_object().translations.all()
92 return Response({
93 'translations': ProjectSerializer(translations, many=True).data
94 })
95
96 @detail_route()
97 def subprojects(self, request, **kwargs):
98 project = get_object_or_404(
99 Project.objects.api(request.user), pk=kwargs['pk'])
100 rels = project.subprojects.all()
101 children = [rel.child for rel in rels]
102 return Response({
103 'subprojects': ProjectSerializer(children, many=True).data
104 })
105
106 @detail_route()
107 def active_versions(self, request, **kwargs):
108 project = get_object_or_404(
109 Project.objects.api(request.user), pk=kwargs['pk'])
110 versions = project.versions.filter(active=True)
111 return Response({
112 'versions': VersionSerializer(versions, many=True).data
113 })
114
115 @decorators.detail_route(permission_classes=[permissions.IsAdminUser])
116 def token(self, request, **kwargs):
117 project = get_object_or_404(
118 Project.objects.api(request.user), pk=kwargs['pk'])
119 token = GitHubService.get_token_for_project(project, force_local=True)
120 return Response({
121 'token': token
122 })
123
124 @decorators.detail_route()
125 def canonical_url(self, request, **kwargs):
126 project = get_object_or_404(
127 Project.objects.api(request.user), pk=kwargs['pk'])
128 return Response({
129 'url': project.get_docs_url()
130 })
131
132 @decorators.detail_route(permission_classes=[permissions.IsAdminUser], methods=['post'])
133 def sync_versions(self, request, **kwargs): # noqa: D205
134 """
135 Sync the version data in the repo (on the build server) with what we
136 have in the database.
137
138 Returns the identifiers for the versions that have been deleted.
139 """
140 project = get_object_or_404(
141 Project.objects.api(request.user), pk=kwargs['pk'])
142
143 # If the currently highest non-prerelease version is active, then make
144 # the new latest version active as well.
145 old_highest_version = determine_stable_version(project.versions.all())
146 if old_highest_version is not None:
147 activate_new_stable = old_highest_version.active
148 else:
149 activate_new_stable = False
150
151 try:
152 # Update All Versions
153 data = request.data
154 added_versions = set()
155 if 'tags' in data:
156 ret_set = api_utils.sync_versions(
157 project=project, versions=data['tags'], type=TAG)
158 added_versions.update(ret_set)
159 if 'branches' in data:
160 ret_set = api_utils.sync_versions(
161 project=project, versions=data['branches'], type=BRANCH)
162 added_versions.update(ret_set)
163 deleted_versions = api_utils.delete_versions(project, data)
164 except Exception as e:
165 log.exception("Sync Versions Error: %s", e.message)
166 return Response({'error': e.message}, status=status.HTTP_400_BAD_REQUEST)
167
168 promoted_version = project.update_stable_version()
169 if promoted_version:
170 new_stable = project.get_stable_version()
171 log.info(
172 "Triggering new stable build: {project}:{version}".format(
173 project=project.slug,
174 version=new_stable.identifier))
175 trigger_build(project=project, version=new_stable)
176
177 # Marking the tag that is considered the new stable version as
178 # active and building it if it was just added.
179 if (
180 activate_new_stable and
181 promoted_version.slug in added_versions):
182 promoted_version.active = True
183 promoted_version.save()
184 trigger_build(project=project, version=promoted_version)
185
186 return Response({
187 'added_versions': added_versions,
188 'deleted_versions': deleted_versions,
189 })
190
191
192 class VersionViewSet(UserSelectViewSet):
193
194 permission_classes = [APIRestrictedPermission]
195 renderer_classes = (JSONRenderer,)
196 serializer_class = VersionSerializer
197 admin_serializer_class = VersionAdminSerializer
198 model = Version
199
200
201 class BuildViewSetBase(UserSelectViewSet):
202 permission_classes = [APIRestrictedPermission]
203 renderer_classes = (JSONRenderer,)
204 serializer_class = BuildSerializer
205 admin_serializer_class = BuildAdminSerializer
206 model = Build
207
208
209 class BuildViewSet(SettingsOverrideObject):
210
211 """A pluggable class to allow for build cold storage."""
212
213 _default_class = BuildViewSetBase
214
215
216 class BuildCommandViewSet(UserSelectViewSet):
217 permission_classes = [APIRestrictedPermission]
218 renderer_classes = (JSONRenderer,)
219 serializer_class = BuildCommandSerializer
220 model = BuildCommandResult
221
222
223 class NotificationViewSet(viewsets.ReadOnlyModelViewSet):
224 permission_classes = (permissions.IsAuthenticated, RelatedProjectIsOwner)
225 renderer_classes = (JSONRenderer,)
226 model = EmailHook
227
228 def get_queryset(self):
229 return self.model.objects.api(self.request.user)
230
231
232 class DomainViewSet(UserSelectViewSet):
233 permission_classes = [APIRestrictedPermission]
234 renderer_classes = (JSONRenderer,)
235 serializer_class = DomainSerializer
236 model = Domain
237
238
239 class RemoteOrganizationViewSet(viewsets.ReadOnlyModelViewSet):
240 permission_classes = [IsOwner]
241 renderer_classes = (JSONRenderer,)
242 serializer_class = RemoteOrganizationSerializer
243 model = RemoteOrganization
244 paginate_by = 25
245
246 def get_queryset(self):
247 return (self.model.objects.api(self.request.user)
248 .filter(account__provider__in=[service.adapter.provider_id
249 for service in registry]))
250
251
252 class RemoteRepositoryViewSet(viewsets.ReadOnlyModelViewSet):
253 permission_classes = [IsOwner]
254 renderer_classes = (JSONRenderer,)
255 serializer_class = RemoteRepositorySerializer
256 model = RemoteRepository
257
258 def get_queryset(self):
259 query = self.model.objects.api(self.request.user)
260 org = self.request.query_params.get('org', None)
261 if org is not None:
262 query = query.filter(organization__pk=org)
263 query = query.filter(account__provider__in=[service.adapter.provider_id
264 for service in registry])
265 return query
266
267 def get_paginate_by(self):
268 return self.request.query_params.get('page_size', 25)
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/restapi/views/model_views.py b/readthedocs/restapi/views/model_views.py
--- a/readthedocs/restapi/views/model_views.py
+++ b/readthedocs/restapi/views/model_views.py
@@ -205,6 +205,13 @@
admin_serializer_class = BuildAdminSerializer
model = Build
+ def get_queryset(self):
+ query = super(BuildViewSetBase, self).get_queryset()
+ commit = self.request.query_params.get('commit', None)
+ if commit is not None:
+ query = query.filter(commit=commit)
+ return query
+
class BuildViewSet(SettingsOverrideObject):
|
{"golden_diff": "diff --git a/readthedocs/restapi/views/model_views.py b/readthedocs/restapi/views/model_views.py\n--- a/readthedocs/restapi/views/model_views.py\n+++ b/readthedocs/restapi/views/model_views.py\n@@ -205,6 +205,13 @@\n admin_serializer_class = BuildAdminSerializer\n model = Build\n \n+ def get_queryset(self):\n+ query = super(BuildViewSetBase, self).get_queryset()\n+ commit = self.request.query_params.get('commit', None)\n+ if commit is not None:\n+ query = query.filter(commit=commit)\n+ return query\n+\n \n class BuildViewSet(SettingsOverrideObject):\n", "issue": "Provide an API to query the build status by commit\nIn order to do a check before release that everything is ok, I would like to have a way to obtain the current build status for a given commit. So, in addition to:\n\n```\nGET /api/v1/build/{id}/\n```\n\nalso have this:\n\n```\nGET /api/v1/commit/{sha1}/\n```\n\nor \n\n```\nGET /api/v1/{user}/{project}/commit/{sha1}/\n```\n\nIs this possible right now?\n\n", "before_files": [{"content": "\"\"\"Endpoints for listing Projects, Versions, Builds, etc.\"\"\"\n\nfrom __future__ import absolute_import\nimport logging\n\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import decorators, permissions, viewsets, status\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\n\nfrom readthedocs.builds.constants import BRANCH\nfrom readthedocs.builds.constants import TAG\nfrom readthedocs.builds.models import Build, BuildCommandResult, Version\nfrom readthedocs.core.utils import trigger_build\nfrom readthedocs.core.utils.extend import SettingsOverrideObject\nfrom readthedocs.oauth.services import GitHubService, registry\nfrom readthedocs.oauth.models import RemoteOrganization, RemoteRepository\nfrom readthedocs.projects.models import Project, EmailHook, Domain\nfrom readthedocs.projects.version_handling import determine_stable_version\n\nfrom ..permissions import (APIPermission, APIRestrictedPermission,\n RelatedProjectIsOwner, IsOwner)\nfrom ..serializers import (BuildSerializer, BuildAdminSerializer,\n BuildCommandSerializer,\n ProjectSerializer, ProjectAdminSerializer,\n VersionSerializer, VersionAdminSerializer,\n DomainSerializer, RemoteOrganizationSerializer,\n RemoteRepositorySerializer)\nfrom .. import utils as api_utils\n\nlog = logging.getLogger(__name__)\n\n\nclass UserSelectViewSet(viewsets.ModelViewSet):\n\n \"\"\"\n View set that varies serializer class based on request user credentials.\n\n Viewsets using this class should have an attribute `admin_serializer_class`,\n which is a serializer that might have more fields that only admin/staff\n users require. If the user is staff, this class will be returned instead.\n \"\"\"\n\n def get_serializer_class(self):\n try:\n if self.request.user.is_staff and self.admin_serializer_class is not None:\n return self.admin_serializer_class\n except AttributeError:\n pass\n return self.serializer_class\n\n def get_queryset(self):\n \"\"\"Use our API manager method to determine authorization on queryset.\"\"\"\n return self.model.objects.api(self.request.user)\n\n\nclass ProjectViewSet(UserSelectViewSet):\n\n \"\"\"List, filter, etc. Projects.\"\"\"\n\n permission_classes = [APIPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = ProjectSerializer\n admin_serializer_class = ProjectAdminSerializer\n model = Project\n paginate_by = 100\n paginate_by_param = 'page_size'\n max_paginate_by = 1000\n\n @decorators.detail_route()\n def valid_versions(self, request, **kwargs):\n \"\"\"Maintain state of versions that are wanted.\"\"\"\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n if not project.num_major or not project.num_minor or not project.num_point:\n return Response(\n {'error': 'Project does not support point version control'},\n status=status.HTTP_400_BAD_REQUEST)\n version_strings = project.supported_versions()\n # Disable making old versions inactive for now.\n # project.versions.exclude(verbose_name__in=version_strings).update(active=False)\n project.versions.filter(\n verbose_name__in=version_strings).update(active=True)\n return Response({\n 'flat': version_strings,\n })\n\n @detail_route()\n def translations(self, *_, **__):\n translations = self.get_object().translations.all()\n return Response({\n 'translations': ProjectSerializer(translations, many=True).data\n })\n\n @detail_route()\n def subprojects(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n rels = project.subprojects.all()\n children = [rel.child for rel in rels]\n return Response({\n 'subprojects': ProjectSerializer(children, many=True).data\n })\n\n @detail_route()\n def active_versions(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n versions = project.versions.filter(active=True)\n return Response({\n 'versions': VersionSerializer(versions, many=True).data\n })\n\n @decorators.detail_route(permission_classes=[permissions.IsAdminUser])\n def token(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n token = GitHubService.get_token_for_project(project, force_local=True)\n return Response({\n 'token': token\n })\n\n @decorators.detail_route()\n def canonical_url(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n return Response({\n 'url': project.get_docs_url()\n })\n\n @decorators.detail_route(permission_classes=[permissions.IsAdminUser], methods=['post'])\n def sync_versions(self, request, **kwargs): # noqa: D205\n \"\"\"\n Sync the version data in the repo (on the build server) with what we\n have in the database.\n\n Returns the identifiers for the versions that have been deleted.\n \"\"\"\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n\n # If the currently highest non-prerelease version is active, then make\n # the new latest version active as well.\n old_highest_version = determine_stable_version(project.versions.all())\n if old_highest_version is not None:\n activate_new_stable = old_highest_version.active\n else:\n activate_new_stable = False\n\n try:\n # Update All Versions\n data = request.data\n added_versions = set()\n if 'tags' in data:\n ret_set = api_utils.sync_versions(\n project=project, versions=data['tags'], type=TAG)\n added_versions.update(ret_set)\n if 'branches' in data:\n ret_set = api_utils.sync_versions(\n project=project, versions=data['branches'], type=BRANCH)\n added_versions.update(ret_set)\n deleted_versions = api_utils.delete_versions(project, data)\n except Exception as e:\n log.exception(\"Sync Versions Error: %s\", e.message)\n return Response({'error': e.message}, status=status.HTTP_400_BAD_REQUEST)\n\n promoted_version = project.update_stable_version()\n if promoted_version:\n new_stable = project.get_stable_version()\n log.info(\n \"Triggering new stable build: {project}:{version}\".format(\n project=project.slug,\n version=new_stable.identifier))\n trigger_build(project=project, version=new_stable)\n\n # Marking the tag that is considered the new stable version as\n # active and building it if it was just added.\n if (\n activate_new_stable and\n promoted_version.slug in added_versions):\n promoted_version.active = True\n promoted_version.save()\n trigger_build(project=project, version=promoted_version)\n\n return Response({\n 'added_versions': added_versions,\n 'deleted_versions': deleted_versions,\n })\n\n\nclass VersionViewSet(UserSelectViewSet):\n\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = VersionSerializer\n admin_serializer_class = VersionAdminSerializer\n model = Version\n\n\nclass BuildViewSetBase(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = BuildSerializer\n admin_serializer_class = BuildAdminSerializer\n model = Build\n\n\nclass BuildViewSet(SettingsOverrideObject):\n\n \"\"\"A pluggable class to allow for build cold storage.\"\"\"\n\n _default_class = BuildViewSetBase\n\n\nclass BuildCommandViewSet(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = BuildCommandSerializer\n model = BuildCommandResult\n\n\nclass NotificationViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (permissions.IsAuthenticated, RelatedProjectIsOwner)\n renderer_classes = (JSONRenderer,)\n model = EmailHook\n\n def get_queryset(self):\n return self.model.objects.api(self.request.user)\n\n\nclass DomainViewSet(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = DomainSerializer\n model = Domain\n\n\nclass RemoteOrganizationViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = [IsOwner]\n renderer_classes = (JSONRenderer,)\n serializer_class = RemoteOrganizationSerializer\n model = RemoteOrganization\n paginate_by = 25\n\n def get_queryset(self):\n return (self.model.objects.api(self.request.user)\n .filter(account__provider__in=[service.adapter.provider_id\n for service in registry]))\n\n\nclass RemoteRepositoryViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = [IsOwner]\n renderer_classes = (JSONRenderer,)\n serializer_class = RemoteRepositorySerializer\n model = RemoteRepository\n\n def get_queryset(self):\n query = self.model.objects.api(self.request.user)\n org = self.request.query_params.get('org', None)\n if org is not None:\n query = query.filter(organization__pk=org)\n query = query.filter(account__provider__in=[service.adapter.provider_id\n for service in registry])\n return query\n\n def get_paginate_by(self):\n return self.request.query_params.get('page_size', 25)\n", "path": "readthedocs/restapi/views/model_views.py"}], "after_files": [{"content": "\"\"\"Endpoints for listing Projects, Versions, Builds, etc.\"\"\"\n\nfrom __future__ import absolute_import\nimport logging\n\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import decorators, permissions, viewsets, status\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\n\nfrom readthedocs.builds.constants import BRANCH\nfrom readthedocs.builds.constants import TAG\nfrom readthedocs.builds.models import Build, BuildCommandResult, Version\nfrom readthedocs.core.utils import trigger_build\nfrom readthedocs.core.utils.extend import SettingsOverrideObject\nfrom readthedocs.oauth.services import GitHubService, registry\nfrom readthedocs.oauth.models import RemoteOrganization, RemoteRepository\nfrom readthedocs.projects.models import Project, EmailHook, Domain\nfrom readthedocs.projects.version_handling import determine_stable_version\n\nfrom ..permissions import (APIPermission, APIRestrictedPermission,\n RelatedProjectIsOwner, IsOwner)\nfrom ..serializers import (BuildSerializer, BuildAdminSerializer,\n BuildCommandSerializer,\n ProjectSerializer, ProjectAdminSerializer,\n VersionSerializer, VersionAdminSerializer,\n DomainSerializer, RemoteOrganizationSerializer,\n RemoteRepositorySerializer)\nfrom .. import utils as api_utils\n\nlog = logging.getLogger(__name__)\n\n\nclass UserSelectViewSet(viewsets.ModelViewSet):\n\n \"\"\"\n View set that varies serializer class based on request user credentials.\n\n Viewsets using this class should have an attribute `admin_serializer_class`,\n which is a serializer that might have more fields that only admin/staff\n users require. If the user is staff, this class will be returned instead.\n \"\"\"\n\n def get_serializer_class(self):\n try:\n if self.request.user.is_staff and self.admin_serializer_class is not None:\n return self.admin_serializer_class\n except AttributeError:\n pass\n return self.serializer_class\n\n def get_queryset(self):\n \"\"\"Use our API manager method to determine authorization on queryset.\"\"\"\n return self.model.objects.api(self.request.user)\n\n\nclass ProjectViewSet(UserSelectViewSet):\n\n \"\"\"List, filter, etc. Projects.\"\"\"\n\n permission_classes = [APIPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = ProjectSerializer\n admin_serializer_class = ProjectAdminSerializer\n model = Project\n paginate_by = 100\n paginate_by_param = 'page_size'\n max_paginate_by = 1000\n\n @decorators.detail_route()\n def valid_versions(self, request, **kwargs):\n \"\"\"Maintain state of versions that are wanted.\"\"\"\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n if not project.num_major or not project.num_minor or not project.num_point:\n return Response(\n {'error': 'Project does not support point version control'},\n status=status.HTTP_400_BAD_REQUEST)\n version_strings = project.supported_versions()\n # Disable making old versions inactive for now.\n # project.versions.exclude(verbose_name__in=version_strings).update(active=False)\n project.versions.filter(\n verbose_name__in=version_strings).update(active=True)\n return Response({\n 'flat': version_strings,\n })\n\n @detail_route()\n def translations(self, *_, **__):\n translations = self.get_object().translations.all()\n return Response({\n 'translations': ProjectSerializer(translations, many=True).data\n })\n\n @detail_route()\n def subprojects(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n rels = project.subprojects.all()\n children = [rel.child for rel in rels]\n return Response({\n 'subprojects': ProjectSerializer(children, many=True).data\n })\n\n @detail_route()\n def active_versions(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n versions = project.versions.filter(active=True)\n return Response({\n 'versions': VersionSerializer(versions, many=True).data\n })\n\n @decorators.detail_route(permission_classes=[permissions.IsAdminUser])\n def token(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n token = GitHubService.get_token_for_project(project, force_local=True)\n return Response({\n 'token': token\n })\n\n @decorators.detail_route()\n def canonical_url(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n return Response({\n 'url': project.get_docs_url()\n })\n\n @decorators.detail_route(permission_classes=[permissions.IsAdminUser], methods=['post'])\n def sync_versions(self, request, **kwargs): # noqa: D205\n \"\"\"\n Sync the version data in the repo (on the build server) with what we\n have in the database.\n\n Returns the identifiers for the versions that have been deleted.\n \"\"\"\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n\n # If the currently highest non-prerelease version is active, then make\n # the new latest version active as well.\n old_highest_version = determine_stable_version(project.versions.all())\n if old_highest_version is not None:\n activate_new_stable = old_highest_version.active\n else:\n activate_new_stable = False\n\n try:\n # Update All Versions\n data = request.data\n added_versions = set()\n if 'tags' in data:\n ret_set = api_utils.sync_versions(\n project=project, versions=data['tags'], type=TAG)\n added_versions.update(ret_set)\n if 'branches' in data:\n ret_set = api_utils.sync_versions(\n project=project, versions=data['branches'], type=BRANCH)\n added_versions.update(ret_set)\n deleted_versions = api_utils.delete_versions(project, data)\n except Exception as e:\n log.exception(\"Sync Versions Error: %s\", e.message)\n return Response({'error': e.message}, status=status.HTTP_400_BAD_REQUEST)\n\n promoted_version = project.update_stable_version()\n if promoted_version:\n new_stable = project.get_stable_version()\n log.info(\n \"Triggering new stable build: {project}:{version}\".format(\n project=project.slug,\n version=new_stable.identifier))\n trigger_build(project=project, version=new_stable)\n\n # Marking the tag that is considered the new stable version as\n # active and building it if it was just added.\n if (\n activate_new_stable and\n promoted_version.slug in added_versions):\n promoted_version.active = True\n promoted_version.save()\n trigger_build(project=project, version=promoted_version)\n\n return Response({\n 'added_versions': added_versions,\n 'deleted_versions': deleted_versions,\n })\n\n\nclass VersionViewSet(UserSelectViewSet):\n\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = VersionSerializer\n admin_serializer_class = VersionAdminSerializer\n model = Version\n\n\nclass BuildViewSetBase(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = BuildSerializer\n admin_serializer_class = BuildAdminSerializer\n model = Build\n\n def get_queryset(self):\n query = super(BuildViewSetBase, self).get_queryset()\n commit = self.request.query_params.get('commit', None)\n if commit is not None:\n query = query.filter(commit=commit)\n return query\n\n\nclass BuildViewSet(SettingsOverrideObject):\n\n \"\"\"A pluggable class to allow for build cold storage.\"\"\"\n\n _default_class = BuildViewSetBase\n\n\nclass BuildCommandViewSet(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = BuildCommandSerializer\n model = BuildCommandResult\n\n\nclass NotificationViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (permissions.IsAuthenticated, RelatedProjectIsOwner)\n renderer_classes = (JSONRenderer,)\n model = EmailHook\n\n def get_queryset(self):\n return self.model.objects.api(self.request.user)\n\n\nclass DomainViewSet(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = DomainSerializer\n model = Domain\n\n\nclass RemoteOrganizationViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = [IsOwner]\n renderer_classes = (JSONRenderer,)\n serializer_class = RemoteOrganizationSerializer\n model = RemoteOrganization\n paginate_by = 25\n\n def get_queryset(self):\n return (self.model.objects.api(self.request.user)\n .filter(account__provider__in=[service.adapter.provider_id\n for service in registry]))\n\n\nclass RemoteRepositoryViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = [IsOwner]\n renderer_classes = (JSONRenderer,)\n serializer_class = RemoteRepositorySerializer\n model = RemoteRepository\n\n def get_queryset(self):\n query = self.model.objects.api(self.request.user)\n org = self.request.query_params.get('org', None)\n if org is not None:\n query = query.filter(organization__pk=org)\n query = query.filter(account__provider__in=[service.adapter.provider_id\n for service in registry])\n return query\n\n def get_paginate_by(self):\n return self.request.query_params.get('page_size', 25)\n", "path": "readthedocs/restapi/views/model_views.py"}]}
| 3,074 | 144 |
gh_patches_debug_13155
|
rasdani/github-patches
|
git_diff
|
Zeroto521__my-data-toolkit-88
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: feature_union can't concat different dataframe well
https://github.com/Zeroto521/my-data-toolkit/blob/8b6ec3ce2658626f265bf5e1bab5a7c0897a0787/dtoolkit/transformer.py#L55-L56
When we use transformers to handle data frames, some rows would be deleted.
So use the feature union transformer would cause the following problem.
```python
0 1.0 0.0 0.0 1.0 0.0 ... 0.070607 0.0 1.0 1.0 1.0
1 0.0 1.0 0.0 1.0 0.0 ... 0.000000 0.0 1.0 1.0 1.0
2 0.0 0.0 1.0 0.0 1.0 ... 0.853865 1.0 1.0 1.0 1.0
3 0.0 0.0 1.0 0.0 1.0 ... 0.279593 0.0 0.0 1.0 0.0
4 0.0 0.0 1.0 1.0 0.0 ... 1.000000 0.0 1.0 1.0 0.0
5 1.0 0.0 0.0 0.0 1.0 ... 0.566105 0.0 0.0 1.0 0.0
6 0.0 1.0 0.0 1.0 0.0 ... 0.007911 0.0 1.0 0.0 1.0
7 0.0 1.0 0.0 1.0 0.0 ... 0.220168 0.0 1.0 0.0 1.0
8 0.0 1.0 0.0 1.0 0.0 ... 0.242736 0.0 1.0 0.0 1.0
9 1.0 0.0 0.0 1.0 0.0 ... 0.491557 0.0 1.0 0.0 1.0
10 1.0 0.0 0.0 0.0 1.0 ... NaN NaN NaN NaN NaN
11 NaN NaN NaN NaN NaN ... 0.184352 0.0 1.0 0.0 1.0
```
We could see, row index 10 and 11 data have NaN.
To fix this, there should add a parameter to ignore the index then concat data.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dtoolkit/transformer.py`
Content:
```
1 from __future__ import annotations
2
3 import numpy as np
4 import pandas as pd
5 from more_itertools import flatten
6 from scipy import sparse
7 from sklearn.base import TransformerMixin
8 from sklearn.pipeline import _name_estimators
9 from sklearn.pipeline import FeatureUnion as SKFeatureUnion
10 from sklearn.preprocessing import MinMaxScaler as SKMinMaxScaler
11 from sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder
12
13 from ._checking import check_dataframe_type
14 from ._checking import istype
15 from ._typing import PandasTypeList
16 from .accessor import FilterInAccessor # noqa
17
18
19 class Transformer(TransformerMixin):
20 def __init__(self, *args, **kwargs):
21 self.args = args
22 self.kwargs = kwargs
23
24 def operate(self, X, *_, **__):
25 return X
26
27 def validate(self, *_, **__):
28 ...
29
30 def fit(self, *_):
31 return self
32
33 def transform(self, X, *_):
34 self.validate(X)
35
36 return self.operate(X, *self.args, **self.kwargs)
37
38 def fit_transform(self, X, *_):
39 return self.fit().transform(X)
40
41 def inverse_transform(self, X, *_):
42 return X
43
44
45 #
46 # Sklearn's operation
47 #
48
49
50 class FeatureUnion(SKFeatureUnion):
51 def _hstack(self, Xs):
52 if any(sparse.issparse(f) for f in Xs):
53 return sparse.hstack(Xs).tocsr()
54
55 if all(istype(i, PandasTypeList) for i in Xs):
56 return pd.concat(Xs, axis=1)
57
58 return np.hstack(Xs)
59
60
61 # make_union function ported with modifications from scikit-learn
62 # https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/pipeline.py
63
64
65 def make_union(*transformers, n_jobs=None, verbose=False):
66 return FeatureUnion(
67 _name_estimators(transformers),
68 n_jobs=n_jobs,
69 verbose=verbose,
70 )
71
72
73 def _change_data_to_df(
74 data: np.ndarray,
75 df: pd.DataFrame | np.ndarray,
76 ) -> pd.DataFrame | np.ndarray:
77 if isinstance(df, pd.DataFrame):
78 return pd.DataFrame(data, columns=df.columns, index=df.index)
79
80 return data
81
82
83 class MinMaxScaler(SKMinMaxScaler):
84 def transform(self, X, *_):
85 X_new = super().transform(X, *_)
86
87 return _change_data_to_df(X_new, X)
88
89 def inverse_transform(self, X, *_):
90 X_new = super().inverse_transform(X, *_)
91
92 return _change_data_to_df(X_new, X)
93
94
95 class OneHotEncoder(SKOneHotEncoder):
96 def __init__(
97 self,
98 categories="auto",
99 drop=None,
100 sparse=False,
101 dtype=np.float64,
102 handle_unknown="error",
103 ):
104 super().__init__(
105 categories=categories,
106 drop=drop,
107 sparse=sparse,
108 dtype=dtype,
109 handle_unknown=handle_unknown,
110 )
111
112 def transform(self, X, *_):
113 X_new = super().transform(X, *_)
114
115 if self.sparse is False:
116 categories = flatten(self.categories_)
117 return pd.DataFrame(X_new, columns=categories)
118
119 return X_new
120
121
122 #
123 # Pandas's operation
124 #
125
126
127 class DataFrameTF(Transformer):
128 def validate(self, *args, **kwargs):
129 return check_dataframe_type(*args, **kwargs)
130
131
132 class AssignTF(Transformer):
133 def operate(self, *args, **kwargs):
134 return pd.DataFrame.assign(*args, **kwargs)
135
136
137 class AppendTF(DataFrameTF):
138 def operate(self, *args, **kwargs):
139 return pd.DataFrame.append(*args, **kwargs)
140
141
142 class DropTF(DataFrameTF):
143 def operate(self, *args, **kwargs):
144 return pd.DataFrame.drop(*args, **kwargs)
145
146
147 class EvalTF(DataFrameTF):
148 def operate(self, *args, **kwargs):
149 return pd.DataFrame.eval(*args, **kwargs)
150
151
152 class FillnaTF(DataFrameTF):
153 def operate(self, *args, **kwargs):
154 return pd.DataFrame.fillna(*args, **kwargs)
155
156
157 class FilterInTF(DataFrameTF):
158 def transform(self, X, *_):
159 self.validate(X)
160
161 return X.filterin(*self.args, **self.kwargs)
162
163
164 class FilterTF(DataFrameTF):
165 def operate(self, *args, **kwargs):
166 return pd.DataFrame.filter(*args, **kwargs)
167
168
169 class GetTF(Transformer):
170 def operate(self, *args, **kwargs):
171 return pd.DataFrame.get(*args, **kwargs)
172
173
174 class QueryTF(DataFrameTF):
175 def operate(self, *args, **kwargs):
176 return pd.DataFrame.query(*args, **kwargs)
177
178
179 class ReplaceTF(DataFrameTF):
180 def operate(self, *args, **kwargs):
181 return pd.DataFrame.replace(*args, **kwargs)
182
183
184 #
185 # numpy's operation
186 #
187
188
189 class RavelTF(Transformer):
190 def operate(self, *args, **kwargs):
191 return np.ravel(*args, **kwargs)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dtoolkit/transformer.py b/dtoolkit/transformer.py
--- a/dtoolkit/transformer.py
+++ b/dtoolkit/transformer.py
@@ -13,6 +13,7 @@
from ._checking import check_dataframe_type
from ._checking import istype
from ._typing import PandasTypeList
+from .accessor import ColumnAccessor # noqa
from .accessor import FilterInAccessor # noqa
@@ -53,6 +54,7 @@
return sparse.hstack(Xs).tocsr()
if all(istype(i, PandasTypeList) for i in Xs):
+ Xs = (i.reset_index(drop=True) for i in Xs)
return pd.concat(Xs, axis=1)
return np.hstack(Xs)
|
{"golden_diff": "diff --git a/dtoolkit/transformer.py b/dtoolkit/transformer.py\n--- a/dtoolkit/transformer.py\n+++ b/dtoolkit/transformer.py\n@@ -13,6 +13,7 @@\n from ._checking import check_dataframe_type\n from ._checking import istype\n from ._typing import PandasTypeList\n+from .accessor import ColumnAccessor # noqa\n from .accessor import FilterInAccessor # noqa\n \n \n@@ -53,6 +54,7 @@\n return sparse.hstack(Xs).tocsr()\n \n if all(istype(i, PandasTypeList) for i in Xs):\n+ Xs = (i.reset_index(drop=True) for i in Xs)\n return pd.concat(Xs, axis=1)\n \n return np.hstack(Xs)\n", "issue": "BUG: feature_union can't concat different dataframe well\nhttps://github.com/Zeroto521/my-data-toolkit/blob/8b6ec3ce2658626f265bf5e1bab5a7c0897a0787/dtoolkit/transformer.py#L55-L56\r\n\r\nWhen we use transformers to handle data frames, some rows would be deleted.\r\nSo use the feature union transformer would cause the following problem.\r\n\r\n```python\r\n0 1.0 0.0 0.0 1.0 0.0 ... 0.070607 0.0 1.0 1.0 1.0\r\n1 0.0 1.0 0.0 1.0 0.0 ... 0.000000 0.0 1.0 1.0 1.0\r\n2 0.0 0.0 1.0 0.0 1.0 ... 0.853865 1.0 1.0 1.0 1.0\r\n3 0.0 0.0 1.0 0.0 1.0 ... 0.279593 0.0 0.0 1.0 0.0\r\n4 0.0 0.0 1.0 1.0 0.0 ... 1.000000 0.0 1.0 1.0 0.0\r\n5 1.0 0.0 0.0 0.0 1.0 ... 0.566105 0.0 0.0 1.0 0.0\r\n6 0.0 1.0 0.0 1.0 0.0 ... 0.007911 0.0 1.0 0.0 1.0\r\n7 0.0 1.0 0.0 1.0 0.0 ... 0.220168 0.0 1.0 0.0 1.0\r\n8 0.0 1.0 0.0 1.0 0.0 ... 0.242736 0.0 1.0 0.0 1.0\r\n9 1.0 0.0 0.0 1.0 0.0 ... 0.491557 0.0 1.0 0.0 1.0\r\n10 1.0 0.0 0.0 0.0 1.0 ... NaN NaN NaN NaN NaN\r\n11 NaN NaN NaN NaN NaN ... 0.184352 0.0 1.0 0.0 1.0\r\n```\r\n\r\nWe could see, row index 10 and 11 data have NaN.\r\n\r\nTo fix this, there should add a parameter to ignore the index then concat data.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport numpy as np\nimport pandas as pd\nfrom more_itertools import flatten\nfrom scipy import sparse\nfrom sklearn.base import TransformerMixin\nfrom sklearn.pipeline import _name_estimators\nfrom sklearn.pipeline import FeatureUnion as SKFeatureUnion\nfrom sklearn.preprocessing import MinMaxScaler as SKMinMaxScaler\nfrom sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder\n\nfrom ._checking import check_dataframe_type\nfrom ._checking import istype\nfrom ._typing import PandasTypeList\nfrom .accessor import FilterInAccessor # noqa\n\n\nclass Transformer(TransformerMixin):\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n def operate(self, X, *_, **__):\n return X\n\n def validate(self, *_, **__):\n ...\n\n def fit(self, *_):\n return self\n\n def transform(self, X, *_):\n self.validate(X)\n\n return self.operate(X, *self.args, **self.kwargs)\n\n def fit_transform(self, X, *_):\n return self.fit().transform(X)\n\n def inverse_transform(self, X, *_):\n return X\n\n\n#\n# Sklearn's operation\n#\n\n\nclass FeatureUnion(SKFeatureUnion):\n def _hstack(self, Xs):\n if any(sparse.issparse(f) for f in Xs):\n return sparse.hstack(Xs).tocsr()\n\n if all(istype(i, PandasTypeList) for i in Xs):\n return pd.concat(Xs, axis=1)\n\n return np.hstack(Xs)\n\n\n# make_union function ported with modifications from scikit-learn\n# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/pipeline.py\n\n\ndef make_union(*transformers, n_jobs=None, verbose=False):\n return FeatureUnion(\n _name_estimators(transformers),\n n_jobs=n_jobs,\n verbose=verbose,\n )\n\n\ndef _change_data_to_df(\n data: np.ndarray,\n df: pd.DataFrame | np.ndarray,\n) -> pd.DataFrame | np.ndarray:\n if isinstance(df, pd.DataFrame):\n return pd.DataFrame(data, columns=df.columns, index=df.index)\n\n return data\n\n\nclass MinMaxScaler(SKMinMaxScaler):\n def transform(self, X, *_):\n X_new = super().transform(X, *_)\n\n return _change_data_to_df(X_new, X)\n\n def inverse_transform(self, X, *_):\n X_new = super().inverse_transform(X, *_)\n\n return _change_data_to_df(X_new, X)\n\n\nclass OneHotEncoder(SKOneHotEncoder):\n def __init__(\n self,\n categories=\"auto\",\n drop=None,\n sparse=False,\n dtype=np.float64,\n handle_unknown=\"error\",\n ):\n super().__init__(\n categories=categories,\n drop=drop,\n sparse=sparse,\n dtype=dtype,\n handle_unknown=handle_unknown,\n )\n\n def transform(self, X, *_):\n X_new = super().transform(X, *_)\n\n if self.sparse is False:\n categories = flatten(self.categories_)\n return pd.DataFrame(X_new, columns=categories)\n\n return X_new\n\n\n#\n# Pandas's operation\n#\n\n\nclass DataFrameTF(Transformer):\n def validate(self, *args, **kwargs):\n return check_dataframe_type(*args, **kwargs)\n\n\nclass AssignTF(Transformer):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.assign(*args, **kwargs)\n\n\nclass AppendTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.append(*args, **kwargs)\n\n\nclass DropTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.drop(*args, **kwargs)\n\n\nclass EvalTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.eval(*args, **kwargs)\n\n\nclass FillnaTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.fillna(*args, **kwargs)\n\n\nclass FilterInTF(DataFrameTF):\n def transform(self, X, *_):\n self.validate(X)\n\n return X.filterin(*self.args, **self.kwargs)\n\n\nclass FilterTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.filter(*args, **kwargs)\n\n\nclass GetTF(Transformer):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.get(*args, **kwargs)\n\n\nclass QueryTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.query(*args, **kwargs)\n\n\nclass ReplaceTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.replace(*args, **kwargs)\n\n\n#\n# numpy's operation\n#\n\n\nclass RavelTF(Transformer):\n def operate(self, *args, **kwargs):\n return np.ravel(*args, **kwargs)\n", "path": "dtoolkit/transformer.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport numpy as np\nimport pandas as pd\nfrom more_itertools import flatten\nfrom scipy import sparse\nfrom sklearn.base import TransformerMixin\nfrom sklearn.pipeline import _name_estimators\nfrom sklearn.pipeline import FeatureUnion as SKFeatureUnion\nfrom sklearn.preprocessing import MinMaxScaler as SKMinMaxScaler\nfrom sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder\n\nfrom ._checking import check_dataframe_type\nfrom ._checking import istype\nfrom ._typing import PandasTypeList\nfrom .accessor import ColumnAccessor # noqa\nfrom .accessor import FilterInAccessor # noqa\n\n\nclass Transformer(TransformerMixin):\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n def operate(self, X, *_, **__):\n return X\n\n def validate(self, *_, **__):\n ...\n\n def fit(self, *_):\n return self\n\n def transform(self, X, *_):\n self.validate(X)\n\n return self.operate(X, *self.args, **self.kwargs)\n\n def fit_transform(self, X, *_):\n return self.fit().transform(X)\n\n def inverse_transform(self, X, *_):\n return X\n\n\n#\n# Sklearn's operation\n#\n\n\nclass FeatureUnion(SKFeatureUnion):\n def _hstack(self, Xs):\n if any(sparse.issparse(f) for f in Xs):\n return sparse.hstack(Xs).tocsr()\n\n if all(istype(i, PandasTypeList) for i in Xs):\n Xs = (i.reset_index(drop=True) for i in Xs)\n return pd.concat(Xs, axis=1)\n\n return np.hstack(Xs)\n\n\n# make_union function ported with modifications from scikit-learn\n# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/pipeline.py\n\n\ndef make_union(*transformers, n_jobs=None, verbose=False):\n return FeatureUnion(\n _name_estimators(transformers),\n n_jobs=n_jobs,\n verbose=verbose,\n )\n\n\ndef _change_data_to_df(\n data: np.ndarray,\n df: pd.DataFrame | np.ndarray,\n) -> pd.DataFrame | np.ndarray:\n if isinstance(df, pd.DataFrame):\n return pd.DataFrame(data, columns=df.columns, index=df.index)\n\n return data\n\n\nclass MinMaxScaler(SKMinMaxScaler):\n def transform(self, X, *_):\n X_new = super().transform(X, *_)\n\n return _change_data_to_df(X_new, X)\n\n def inverse_transform(self, X, *_):\n X_new = super().inverse_transform(X, *_)\n\n return _change_data_to_df(X_new, X)\n\n\nclass OneHotEncoder(SKOneHotEncoder):\n def __init__(\n self,\n categories=\"auto\",\n drop=None,\n sparse=False,\n dtype=np.float64,\n handle_unknown=\"error\",\n ):\n super().__init__(\n categories=categories,\n drop=drop,\n sparse=sparse,\n dtype=dtype,\n handle_unknown=handle_unknown,\n )\n\n def transform(self, X, *_):\n X_new = super().transform(X, *_)\n\n if self.sparse is False:\n categories = flatten(self.categories_)\n return pd.DataFrame(X_new, columns=categories)\n\n return X_new\n\n\n#\n# Pandas's operation\n#\n\n\nclass DataFrameTF(Transformer):\n def validate(self, *args, **kwargs):\n return check_dataframe_type(*args, **kwargs)\n\n\nclass AssignTF(Transformer):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.assign(*args, **kwargs)\n\n\nclass AppendTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.append(*args, **kwargs)\n\n\nclass DropTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.drop(*args, **kwargs)\n\n\nclass EvalTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.eval(*args, **kwargs)\n\n\nclass FillnaTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.fillna(*args, **kwargs)\n\n\nclass FilterInTF(DataFrameTF):\n def transform(self, X, *_):\n self.validate(X)\n\n return X.filterin(*self.args, **self.kwargs)\n\n\nclass FilterTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.filter(*args, **kwargs)\n\n\nclass GetTF(Transformer):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.get(*args, **kwargs)\n\n\nclass QueryTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.query(*args, **kwargs)\n\n\nclass ReplaceTF(DataFrameTF):\n def operate(self, *args, **kwargs):\n return pd.DataFrame.replace(*args, **kwargs)\n\n\n#\n# numpy's operation\n#\n\n\nclass RavelTF(Transformer):\n def operate(self, *args, **kwargs):\n return np.ravel(*args, **kwargs)\n", "path": "dtoolkit/transformer.py"}]}
| 2,647 | 181 |
gh_patches_debug_42715
|
rasdani/github-patches
|
git_diff
|
openai__gym-1878
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Box bound precision warning
I get this warning a lot when using Box environments:
```
.../gym/logger.py:30: UserWarning: WARN: Box bound precision lowered by casting to float32
```
This is particularly annoying, especially because the [default dtype for Box is](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L24) `np.float(32)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/spaces/box.py`
Content:
```
1 import numpy as np
2
3 from .space import Space
4 from gym import logger
5
6
7 class Box(Space):
8 """
9 A (possibly unbounded) box in R^n. Specifically, a Box represents the
10 Cartesian product of n closed intervals. Each interval has the form of one
11 of [a, b], (-oo, b], [a, oo), or (-oo, oo).
12
13 There are two common use cases:
14
15 * Identical bound for each dimension::
16 >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
17 Box(3, 4)
18
19 * Independent bound for each dimension::
20 >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
21 Box(2,)
22
23 """
24 def __init__(self, low, high, shape=None, dtype=np.float32):
25 assert dtype is not None, 'dtype must be explicitly provided. '
26 self.dtype = np.dtype(dtype)
27
28 if shape is None:
29 assert low.shape == high.shape, 'box dimension mismatch. '
30 self.shape = low.shape
31 self.low = low
32 self.high = high
33 else:
34 assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '
35 self.shape = tuple(shape)
36 self.low = np.full(self.shape, low)
37 self.high = np.full(self.shape, high)
38
39 def _get_precision(dtype):
40 if np.issubdtype(dtype, np.floating):
41 return np.finfo(dtype).precision
42 else:
43 return np.inf
44 low_precision = _get_precision(self.low.dtype)
45 high_precision = _get_precision(self.high.dtype)
46 dtype_precision = _get_precision(self.dtype)
47 if min(low_precision, high_precision) > dtype_precision:
48 logger.warn("Box bound precision lowered by casting to {}".format(self.dtype))
49 self.low = self.low.astype(self.dtype)
50 self.high = self.high.astype(self.dtype)
51
52 # Boolean arrays which indicate the interval type for each coordinate
53 self.bounded_below = -np.inf < self.low
54 self.bounded_above = np.inf > self.high
55
56 super(Box, self).__init__(self.shape, self.dtype)
57
58 def is_bounded(self, manner="both"):
59 below = np.all(self.bounded_below)
60 above = np.all(self.bounded_above)
61 if manner == "both":
62 return below and above
63 elif manner == "below":
64 return below
65 elif manner == "above":
66 return above
67 else:
68 raise ValueError("manner is not in {'below', 'above', 'both'}")
69
70 def sample(self):
71 """
72 Generates a single random sample inside of the Box.
73
74 In creating a sample of the box, each coordinate is sampled according to
75 the form of the interval:
76
77 * [a, b] : uniform distribution
78 * [a, oo) : shifted exponential distribution
79 * (-oo, b] : shifted negative exponential distribution
80 * (-oo, oo) : normal distribution
81 """
82 high = self.high if self.dtype.kind == 'f' \
83 else self.high.astype('int64') + 1
84 sample = np.empty(self.shape)
85
86 # Masking arrays which classify the coordinates according to interval
87 # type
88 unbounded = ~self.bounded_below & ~self.bounded_above
89 upp_bounded = ~self.bounded_below & self.bounded_above
90 low_bounded = self.bounded_below & ~self.bounded_above
91 bounded = self.bounded_below & self.bounded_above
92
93
94 # Vectorized sampling by interval type
95 sample[unbounded] = self.np_random.normal(
96 size=unbounded[unbounded].shape)
97
98 sample[low_bounded] = self.np_random.exponential(
99 size=low_bounded[low_bounded].shape) + self.low[low_bounded]
100
101 sample[upp_bounded] = -self.np_random.exponential(
102 size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]
103
104 sample[bounded] = self.np_random.uniform(low=self.low[bounded],
105 high=high[bounded],
106 size=bounded[bounded].shape)
107 if self.dtype.kind == 'i':
108 sample = np.floor(sample)
109
110 return sample.astype(self.dtype)
111
112 def contains(self, x):
113 if isinstance(x, list):
114 x = np.array(x) # Promote list to array for contains check
115 return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
116
117 def to_jsonable(self, sample_n):
118 return np.array(sample_n).tolist()
119
120 def from_jsonable(self, sample_n):
121 return [np.asarray(sample) for sample in sample_n]
122
123 def __repr__(self):
124 return "Box" + str(self.shape)
125
126 def __eq__(self, other):
127 return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gym/spaces/box.py b/gym/spaces/box.py
--- a/gym/spaces/box.py
+++ b/gym/spaces/box.py
@@ -9,13 +9,13 @@
A (possibly unbounded) box in R^n. Specifically, a Box represents the
Cartesian product of n closed intervals. Each interval has the form of one
of [a, b], (-oo, b], [a, oo), or (-oo, oo).
-
+
There are two common use cases:
-
+
* Identical bound for each dimension::
>>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
Box(3, 4)
-
+
* Independent bound for each dimension::
>>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
Box(2,)
@@ -33,8 +33,8 @@
else:
assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '
self.shape = tuple(shape)
- self.low = np.full(self.shape, low)
- self.high = np.full(self.shape, high)
+ self.low = np.full(self.shape, low, dtype=dtype)
+ self.high = np.full(self.shape, high, dtype=dtype)
def _get_precision(dtype):
if np.issubdtype(dtype, np.floating):
@@ -69,12 +69,12 @@
def sample(self):
"""
- Generates a single random sample inside of the Box.
+ Generates a single random sample inside of the Box.
In creating a sample of the box, each coordinate is sampled according to
the form of the interval:
-
- * [a, b] : uniform distribution
+
+ * [a, b] : uniform distribution
* [a, oo) : shifted exponential distribution
* (-oo, b] : shifted negative exponential distribution
* (-oo, oo) : normal distribution
@@ -89,7 +89,7 @@
upp_bounded = ~self.bounded_below & self.bounded_above
low_bounded = self.bounded_below & ~self.bounded_above
bounded = self.bounded_below & self.bounded_above
-
+
# Vectorized sampling by interval type
sample[unbounded] = self.np_random.normal(
@@ -97,18 +97,18 @@
sample[low_bounded] = self.np_random.exponential(
size=low_bounded[low_bounded].shape) + self.low[low_bounded]
-
+
sample[upp_bounded] = -self.np_random.exponential(
size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]
-
- sample[bounded] = self.np_random.uniform(low=self.low[bounded],
+
+ sample[bounded] = self.np_random.uniform(low=self.low[bounded],
high=high[bounded],
size=bounded[bounded].shape)
if self.dtype.kind == 'i':
sample = np.floor(sample)
return sample.astype(self.dtype)
-
+
def contains(self, x):
if isinstance(x, list):
x = np.array(x) # Promote list to array for contains check
|
{"golden_diff": "diff --git a/gym/spaces/box.py b/gym/spaces/box.py\n--- a/gym/spaces/box.py\n+++ b/gym/spaces/box.py\n@@ -9,13 +9,13 @@\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n- \n+\n There are two common use cases:\n- \n+\n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n- \n+\n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n@@ -33,8 +33,8 @@\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n- self.low = np.full(self.shape, low)\n- self.high = np.full(self.shape, high)\n+ self.low = np.full(self.shape, low, dtype=dtype)\n+ self.high = np.full(self.shape, high, dtype=dtype)\n \n def _get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n@@ -69,12 +69,12 @@\n \n def sample(self):\n \"\"\"\n- Generates a single random sample inside of the Box. \n+ Generates a single random sample inside of the Box.\n \n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n- \n- * [a, b] : uniform distribution \n+\n+ * [a, b] : uniform distribution\n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n@@ -89,7 +89,7 @@\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n- \n+\n \n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n@@ -97,18 +97,18 @@\n \n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n- \n+\n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]\n- \n- sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n+\n+ sample[bounded] = self.np_random.uniform(low=self.low[bounded],\n high=high[bounded],\n size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n \n return sample.astype(self.dtype)\n- \n+\n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n", "issue": "Box bound precision warning\nI get this warning a lot when using Box environments:\r\n\r\n```\r\n.../gym/logger.py:30: UserWarning: WARN: Box bound precision lowered by casting to float32\r\n```\r\nThis is particularly annoying, especially because the [default dtype for Box is](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L24) `np.float(32)`\n", "before_files": [{"content": "import numpy as np\n\nfrom .space import Space\nfrom gym import logger\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n \n There are two common use cases:\n \n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n \n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low)\n self.high = np.full(self.shape, high)\n\n def _get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n return np.finfo(dtype).precision\n else:\n return np.inf\n low_precision = _get_precision(self.low.dtype)\n high_precision = _get_precision(self.high.dtype)\n dtype_precision = _get_precision(self.dtype)\n if min(low_precision, high_precision) > dtype_precision:\n logger.warn(\"Box bound precision lowered by casting to {}\".format(self.dtype))\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box. \n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n \n * [a, b] : uniform distribution \n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n \n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n \n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]\n \n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n \n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py"}], "after_files": [{"content": "import numpy as np\n\nfrom .space import Space\nfrom gym import logger\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n\n There are two common use cases:\n\n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n\n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low, dtype=dtype)\n self.high = np.full(self.shape, high, dtype=dtype)\n\n def _get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n return np.finfo(dtype).precision\n else:\n return np.inf\n low_precision = _get_precision(self.low.dtype)\n high_precision = _get_precision(self.high.dtype)\n dtype_precision = _get_precision(self.dtype)\n if min(low_precision, high_precision) > dtype_precision:\n logger.warn(\"Box bound precision lowered by casting to {}\".format(self.dtype))\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box.\n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n\n * [a, b] : uniform distribution\n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n\n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n\n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]\n\n sample[bounded] = self.np_random.uniform(low=self.low[bounded],\n high=high[bounded],\n size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n\n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py"}]}
| 1,765 | 769 |
gh_patches_debug_67479
|
rasdani/github-patches
|
git_diff
|
scverse__scanpy-783
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get.rank_genes_groups() key argument not used
`rank_genes_groups_df` takes `key` as an argument and the docs says it is the key differential expression groups were stored under. However, the function does not use that key and fetches DE results from the default 'rank_genes_groups' key.
line 55 under `rank_genes_groups_df() ` in scanpy/get.py
`d[k] = adata.uns["rank_genes_groups"][k][group]` should be changed to `d[k] = adata.uns[key][k][group]`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/get.py`
Content:
```
1 """This module contains helper functions for accessing data."""
2 from typing import Optional, Iterable, Tuple
3
4 import numpy as np
5 import pandas as pd
6 from scipy.sparse import spmatrix
7
8 from anndata import AnnData
9 # --------------------------------------------------------------------------------
10 # Plotting data helpers
11 # --------------------------------------------------------------------------------
12
13
14 # TODO: implement diffxpy method, make singledispatch
15 def rank_genes_groups_df(
16 adata: AnnData,
17 group: str, # Can this be something other than a str?
18 *,
19 key: str = "rank_genes_groups",
20 pval_cutoff: Optional[float] = None,
21 log2fc_min: Optional[float] = None,
22 log2fc_max: Optional[float] = None,
23 gene_symbols: Optional[str] = None
24 ) -> pd.DataFrame:
25 """
26 :func:`scanpy.tl.rank_genes_groups` results in the form of a :class:`pd.DataFrame`.
27
28 Params
29 ------
30 adata
31 Object to get results from.
32 group
33 Which group (as in :func:`scanpy.tl.rank_genes_groups`'s `groupby`
34 argument) to return results from.
35 key
36 Key differential expression groups were stored under.
37 pval_cutoff
38 Minimum adjusted pval to return.
39 log2fc_min
40 Minumum logfc to return.
41 log2fc_max
42 Maximum logfc to return.
43 gene_symbols
44 Column name in `.var` DataFrame that stores gene symbols. Specifying
45 this will add that column to the returned dataframe.
46
47 Example
48 -------
49 >>> pbmc = sc.datasets.pbmc68k_reduced()
50 >>> sc.tl.rank_genes_groups(pbmc, groupby="louvain", use_raw=True, n_genes=pbmc.shape[1])
51 >>> dedf = sc.get.rank_genes_groups_df(pbmc, group="0")
52 """
53 d = pd.DataFrame()
54 for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:
55 d[k] = adata.uns["rank_genes_groups"][k][group]
56 if pval_cutoff is not None:
57 d = d[d["pvals_adj"] < pval_cutoff]
58 if log2fc_min is not None:
59 d = d[d["logfoldchanges"] > log2fc_min]
60 if log2fc_max is not None:
61 d = d[d["logfoldchanges"] < log2fc_max]
62 if gene_symbols is not None:
63 d = d.join(adata.var[gene_symbols], on="names")
64 return d
65
66
67 def obs_df(
68 adata: AnnData,
69 keys: Iterable[str] = (),
70 obsm_keys: Iterable[Tuple[str, int]] = (),
71 *,
72 layer: str = None,
73 gene_symbols: str = None,
74 use_raw: bool = False
75 ) -> pd.DataFrame:
76 """\
77 Return values for observations in adata.
78
79 Params
80 ------
81 adata
82 AnnData object to get values from.
83 keys
84 Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`.
85 obsm_keys
86 Tuple of `(key from obsm, column index of obsm[key])`.
87 layer
88 Layer of `adata` to use as expression values.
89 gene_symbols
90 Column of `adata.var` to search for `keys` in.
91 use_raw
92 Whether to get expression values from `adata.raw`.
93
94 Returns
95 -------
96 A dataframe with `adata.obs_names` as index, and values specified by `keys`
97 and `obsm_keys`.
98
99 Examples
100 --------
101 Getting value for plotting:
102
103 >>> pbmc = sc.datasets.pbmc68k_reduced()
104 >>> plotdf = sc.get.obs_df(
105 pbmc,
106 keys=["CD8B", "n_genes"],
107 obsm_keys=[("X_umap", 0), ("X_umap", 1)]
108 )
109 >>> plotdf.plot.scatter("X_umap0", "X_umap1", c="CD8B")
110
111 Calculating mean expression for marker genes by cluster:
112
113 >>> pbmc = sc.datasets.pbmc68k_reduced()
114 >>> marker_genes = ['CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ']
115 >>> genedf = sc.get.obs_df(
116 pbmc,
117 keys=["louvain", *marker_genes]
118 )
119 >>> grouped = genedf.groupby("louvain")
120 >>> mean, var = grouped.mean(), grouped.var()
121 """
122 if use_raw:
123 assert layer is None, "Cannot specify use_raw=True and a layer at the same time."
124 if gene_symbols is not None:
125 gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var[gene_symbols])
126 else:
127 gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var_names)
128 else:
129 if gene_symbols is not None:
130 gene_names = pd.Series(adata.var_names, index=adata.var[gene_symbols])
131 else:
132 gene_names = pd.Series(adata.var_names, index=adata.var_names)
133 lookup_keys = []
134 not_found = []
135 for key in keys:
136 if key in adata.obs.columns:
137 lookup_keys.append(key)
138 elif key in gene_names.index:
139 lookup_keys.append(gene_names[key])
140 else:
141 not_found.append(key)
142 if len(not_found) > 0:
143 if use_raw:
144 if gene_symbols is None:
145 gene_error = "`adata.raw.var_names`"
146 else:
147 gene_error = "gene_symbols column `adata.raw.var[{}].values`".format(gene_symbols)
148 else:
149 if gene_symbols is None:
150 gene_error = "`adata.var_names`"
151 else:
152 gene_error = "gene_symbols column `adata.var[{}].values`".format(gene_symbols)
153 raise KeyError(
154 f"Could not find keys '{not_found}' in columns of `adata.obs` or in"
155 f" {gene_error}."
156 )
157
158 # Make df
159 df = pd.DataFrame(index=adata.obs_names)
160 for k, l in zip(keys, lookup_keys):
161 if not use_raw or k in adata.obs.columns:
162 df[k] = adata.obs_vector(l, layer=layer)
163 else:
164 df[k] = adata.raw.obs_vector(l)
165 for k, idx in obsm_keys:
166 added_k = f"{k}-{idx}"
167 val = adata.obsm[k]
168 if isinstance(val, np.ndarray):
169 df[added_k] = np.ravel(val[:, idx])
170 elif isinstance(val, spmatrix):
171 df[added_k] = np.ravel(val[:, idx].toarray())
172 elif isinstance(val, pd.DataFrame):
173 df[added_k] = val.loc[:, idx]
174 return df
175
176
177 def var_df(
178 adata: AnnData,
179 keys: Iterable[str] = (),
180 varm_keys: Iterable[Tuple[str, int]] = (),
181 *,
182 layer: str = None,
183 ) -> pd.DataFrame:
184 """\
185 Return values for observations in adata.
186
187 Params
188 ------
189 adata
190 AnnData object to get values from.
191 keys
192 Keys from either `.obs_names`, or `.var.columns`.
193 varm_keys
194 Tuple of `(key from varm, column index of varm[key])`.
195 layer
196 Layer of `adata` to use as expression values.
197
198 Returns
199 -------
200 A dataframe with `adata.var_names` as index, and values specified by `keys`
201 and `varm_keys`.
202 """
203 # Argument handling
204 lookup_keys = []
205 not_found = []
206 for key in keys:
207 if key in adata.var.columns:
208 lookup_keys.append(key)
209 elif key in adata.obs_names:
210 lookup_keys.append(key)
211 else:
212 not_found.append(key)
213 if len(not_found) > 0:
214 raise KeyError(
215 f"Could not find keys '{not_found}' in columns of `adata.var` or"
216 " in `adata.obs_names`."
217 )
218
219 # Make df
220 df = pd.DataFrame(index=adata.var_names)
221 for k, l in zip(keys, lookup_keys):
222 df[k] = adata.var_vector(l, layer=layer)
223 for k, idx in varm_keys:
224 added_k = f"{k}-{idx}"
225 val = adata.varm[k]
226 if isinstance(val, np.ndarray):
227 df[added_k] = np.ravel(val[:, idx])
228 elif isinstance(val, spmatrix):
229 df[added_k] = np.ravel(val[:, idx].toarray())
230 elif isinstance(val, pd.DataFrame):
231 df[added_k] = val.loc[:, idx]
232 return df
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scanpy/get.py b/scanpy/get.py
--- a/scanpy/get.py
+++ b/scanpy/get.py
@@ -52,7 +52,7 @@
"""
d = pd.DataFrame()
for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:
- d[k] = adata.uns["rank_genes_groups"][k][group]
+ d[k] = adata.uns[key][k][group]
if pval_cutoff is not None:
d = d[d["pvals_adj"] < pval_cutoff]
if log2fc_min is not None:
|
{"golden_diff": "diff --git a/scanpy/get.py b/scanpy/get.py\n--- a/scanpy/get.py\n+++ b/scanpy/get.py\n@@ -52,7 +52,7 @@\n \"\"\"\n d = pd.DataFrame()\n for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:\n- d[k] = adata.uns[\"rank_genes_groups\"][k][group]\n+ d[k] = adata.uns[key][k][group]\n if pval_cutoff is not None:\n d = d[d[\"pvals_adj\"] < pval_cutoff]\n if log2fc_min is not None:\n", "issue": "get.rank_genes_groups() key argument not used\n`rank_genes_groups_df` takes `key` as an argument and the docs says it is the key differential expression groups were stored under. However, the function does not use that key and fetches DE results from the default 'rank_genes_groups' key.\r\n\r\nline 55 under `rank_genes_groups_df() ` in scanpy/get.py\r\n`d[k] = adata.uns[\"rank_genes_groups\"][k][group]` should be changed to `d[k] = adata.uns[key][k][group]`\n", "before_files": [{"content": "\"\"\"This module contains helper functions for accessing data.\"\"\"\nfrom typing import Optional, Iterable, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import spmatrix\n\nfrom anndata import AnnData\n# --------------------------------------------------------------------------------\n# Plotting data helpers\n# --------------------------------------------------------------------------------\n\n\n# TODO: implement diffxpy method, make singledispatch\ndef rank_genes_groups_df(\n adata: AnnData,\n group: str, # Can this be something other than a str?\n *,\n key: str = \"rank_genes_groups\",\n pval_cutoff: Optional[float] = None,\n log2fc_min: Optional[float] = None,\n log2fc_max: Optional[float] = None,\n gene_symbols: Optional[str] = None\n) -> pd.DataFrame:\n \"\"\"\n :func:`scanpy.tl.rank_genes_groups` results in the form of a :class:`pd.DataFrame`.\n\n Params\n ------\n adata\n Object to get results from.\n group\n Which group (as in :func:`scanpy.tl.rank_genes_groups`'s `groupby`\n argument) to return results from.\n key\n Key differential expression groups were stored under.\n pval_cutoff\n Minimum adjusted pval to return.\n log2fc_min\n Minumum logfc to return.\n log2fc_max\n Maximum logfc to return.\n gene_symbols\n Column name in `.var` DataFrame that stores gene symbols. Specifying\n this will add that column to the returned dataframe.\n\n Example\n -------\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.rank_genes_groups(pbmc, groupby=\"louvain\", use_raw=True, n_genes=pbmc.shape[1])\n >>> dedf = sc.get.rank_genes_groups_df(pbmc, group=\"0\")\n \"\"\"\n d = pd.DataFrame()\n for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:\n d[k] = adata.uns[\"rank_genes_groups\"][k][group]\n if pval_cutoff is not None:\n d = d[d[\"pvals_adj\"] < pval_cutoff]\n if log2fc_min is not None:\n d = d[d[\"logfoldchanges\"] > log2fc_min]\n if log2fc_max is not None:\n d = d[d[\"logfoldchanges\"] < log2fc_max]\n if gene_symbols is not None:\n d = d.join(adata.var[gene_symbols], on=\"names\")\n return d\n\n\ndef obs_df(\n adata: AnnData,\n keys: Iterable[str] = (),\n obsm_keys: Iterable[Tuple[str, int]] = (),\n *,\n layer: str = None,\n gene_symbols: str = None,\n use_raw: bool = False\n) -> pd.DataFrame:\n \"\"\"\\\n Return values for observations in adata.\n\n Params\n ------\n adata\n AnnData object to get values from.\n keys\n Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`.\n obsm_keys\n Tuple of `(key from obsm, column index of obsm[key])`.\n layer\n Layer of `adata` to use as expression values.\n gene_symbols\n Column of `adata.var` to search for `keys` in.\n use_raw\n Whether to get expression values from `adata.raw`.\n\n Returns\n -------\n A dataframe with `adata.obs_names` as index, and values specified by `keys`\n and `obsm_keys`.\n\n Examples\n --------\n Getting value for plotting:\n\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> plotdf = sc.get.obs_df(\n pbmc,\n keys=[\"CD8B\", \"n_genes\"],\n obsm_keys=[(\"X_umap\", 0), (\"X_umap\", 1)]\n )\n >>> plotdf.plot.scatter(\"X_umap0\", \"X_umap1\", c=\"CD8B\")\n\n Calculating mean expression for marker genes by cluster:\n\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> marker_genes = ['CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ']\n >>> genedf = sc.get.obs_df(\n pbmc,\n keys=[\"louvain\", *marker_genes]\n )\n >>> grouped = genedf.groupby(\"louvain\")\n >>> mean, var = grouped.mean(), grouped.var()\n \"\"\"\n if use_raw:\n assert layer is None, \"Cannot specify use_raw=True and a layer at the same time.\"\n if gene_symbols is not None:\n gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var[gene_symbols])\n else:\n gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var_names)\n else:\n if gene_symbols is not None:\n gene_names = pd.Series(adata.var_names, index=adata.var[gene_symbols])\n else:\n gene_names = pd.Series(adata.var_names, index=adata.var_names)\n lookup_keys = []\n not_found = []\n for key in keys:\n if key in adata.obs.columns:\n lookup_keys.append(key)\n elif key in gene_names.index:\n lookup_keys.append(gene_names[key])\n else:\n not_found.append(key)\n if len(not_found) > 0:\n if use_raw:\n if gene_symbols is None:\n gene_error = \"`adata.raw.var_names`\"\n else:\n gene_error = \"gene_symbols column `adata.raw.var[{}].values`\".format(gene_symbols)\n else:\n if gene_symbols is None:\n gene_error = \"`adata.var_names`\"\n else:\n gene_error = \"gene_symbols column `adata.var[{}].values`\".format(gene_symbols)\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.obs` or in\"\n f\" {gene_error}.\"\n )\n\n # Make df\n df = pd.DataFrame(index=adata.obs_names)\n for k, l in zip(keys, lookup_keys):\n if not use_raw or k in adata.obs.columns:\n df[k] = adata.obs_vector(l, layer=layer)\n else:\n df[k] = adata.raw.obs_vector(l)\n for k, idx in obsm_keys:\n added_k = f\"{k}-{idx}\"\n val = adata.obsm[k]\n if isinstance(val, np.ndarray):\n df[added_k] = np.ravel(val[:, idx])\n elif isinstance(val, spmatrix):\n df[added_k] = np.ravel(val[:, idx].toarray())\n elif isinstance(val, pd.DataFrame):\n df[added_k] = val.loc[:, idx]\n return df\n\n\ndef var_df(\n adata: AnnData,\n keys: Iterable[str] = (),\n varm_keys: Iterable[Tuple[str, int]] = (),\n *,\n layer: str = None,\n) -> pd.DataFrame:\n \"\"\"\\\n Return values for observations in adata.\n\n Params\n ------\n adata\n AnnData object to get values from.\n keys\n Keys from either `.obs_names`, or `.var.columns`.\n varm_keys\n Tuple of `(key from varm, column index of varm[key])`.\n layer\n Layer of `adata` to use as expression values.\n\n Returns\n -------\n A dataframe with `adata.var_names` as index, and values specified by `keys`\n and `varm_keys`.\n \"\"\"\n # Argument handling\n lookup_keys = []\n not_found = []\n for key in keys:\n if key in adata.var.columns:\n lookup_keys.append(key)\n elif key in adata.obs_names:\n lookup_keys.append(key)\n else:\n not_found.append(key)\n if len(not_found) > 0:\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.var` or\"\n \" in `adata.obs_names`.\"\n )\n\n # Make df\n df = pd.DataFrame(index=adata.var_names)\n for k, l in zip(keys, lookup_keys):\n df[k] = adata.var_vector(l, layer=layer)\n for k, idx in varm_keys:\n added_k = f\"{k}-{idx}\"\n val = adata.varm[k]\n if isinstance(val, np.ndarray):\n df[added_k] = np.ravel(val[:, idx])\n elif isinstance(val, spmatrix):\n df[added_k] = np.ravel(val[:, idx].toarray())\n elif isinstance(val, pd.DataFrame):\n df[added_k] = val.loc[:, idx]\n return df\n", "path": "scanpy/get.py"}], "after_files": [{"content": "\"\"\"This module contains helper functions for accessing data.\"\"\"\nfrom typing import Optional, Iterable, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import spmatrix\n\nfrom anndata import AnnData\n# --------------------------------------------------------------------------------\n# Plotting data helpers\n# --------------------------------------------------------------------------------\n\n\n# TODO: implement diffxpy method, make singledispatch\ndef rank_genes_groups_df(\n adata: AnnData,\n group: str, # Can this be something other than a str?\n *,\n key: str = \"rank_genes_groups\",\n pval_cutoff: Optional[float] = None,\n log2fc_min: Optional[float] = None,\n log2fc_max: Optional[float] = None,\n gene_symbols: Optional[str] = None\n) -> pd.DataFrame:\n \"\"\"\n :func:`scanpy.tl.rank_genes_groups` results in the form of a :class:`pd.DataFrame`.\n\n Params\n ------\n adata\n Object to get results from.\n group\n Which group (as in :func:`scanpy.tl.rank_genes_groups`'s `groupby`\n argument) to return results from.\n key\n Key differential expression groups were stored under.\n pval_cutoff\n Minimum adjusted pval to return.\n log2fc_min\n Minumum logfc to return.\n log2fc_max\n Maximum logfc to return.\n gene_symbols\n Column name in `.var` DataFrame that stores gene symbols. Specifying\n this will add that column to the returned dataframe.\n\n Example\n -------\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.rank_genes_groups(pbmc, groupby=\"louvain\", use_raw=True, n_genes=pbmc.shape[1])\n >>> dedf = sc.get.rank_genes_groups_df(pbmc, group=\"0\")\n \"\"\"\n d = pd.DataFrame()\n for k in ['scores', 'names', 'logfoldchanges', 'pvals', 'pvals_adj']:\n d[k] = adata.uns[key][k][group]\n if pval_cutoff is not None:\n d = d[d[\"pvals_adj\"] < pval_cutoff]\n if log2fc_min is not None:\n d = d[d[\"logfoldchanges\"] > log2fc_min]\n if log2fc_max is not None:\n d = d[d[\"logfoldchanges\"] < log2fc_max]\n if gene_symbols is not None:\n d = d.join(adata.var[gene_symbols], on=\"names\")\n return d\n\n\ndef obs_df(\n adata: AnnData,\n keys: Iterable[str] = (),\n obsm_keys: Iterable[Tuple[str, int]] = (),\n *,\n layer: str = None,\n gene_symbols: str = None,\n use_raw: bool = False\n) -> pd.DataFrame:\n \"\"\"\\\n Return values for observations in adata.\n\n Params\n ------\n adata\n AnnData object to get values from.\n keys\n Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`.\n obsm_keys\n Tuple of `(key from obsm, column index of obsm[key])`.\n layer\n Layer of `adata` to use as expression values.\n gene_symbols\n Column of `adata.var` to search for `keys` in.\n use_raw\n Whether to get expression values from `adata.raw`.\n\n Returns\n -------\n A dataframe with `adata.obs_names` as index, and values specified by `keys`\n and `obsm_keys`.\n\n Examples\n --------\n Getting value for plotting:\n\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> plotdf = sc.get.obs_df(\n pbmc,\n keys=[\"CD8B\", \"n_genes\"],\n obsm_keys=[(\"X_umap\", 0), (\"X_umap\", 1)]\n )\n >>> plotdf.plot.scatter(\"X_umap0\", \"X_umap1\", c=\"CD8B\")\n\n Calculating mean expression for marker genes by cluster:\n\n >>> pbmc = sc.datasets.pbmc68k_reduced()\n >>> marker_genes = ['CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ']\n >>> genedf = sc.get.obs_df(\n pbmc,\n keys=[\"louvain\", *marker_genes]\n )\n >>> grouped = genedf.groupby(\"louvain\")\n >>> mean, var = grouped.mean(), grouped.var()\n \"\"\"\n if use_raw:\n assert layer is None, \"Cannot specify use_raw=True and a layer at the same time.\"\n if gene_symbols is not None:\n gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var[gene_symbols])\n else:\n gene_names = pd.Series(adata.raw.var_names, index=adata.raw.var_names)\n else:\n if gene_symbols is not None:\n gene_names = pd.Series(adata.var_names, index=adata.var[gene_symbols])\n else:\n gene_names = pd.Series(adata.var_names, index=adata.var_names)\n lookup_keys = []\n not_found = []\n for key in keys:\n if key in adata.obs.columns:\n lookup_keys.append(key)\n elif key in gene_names.index:\n lookup_keys.append(gene_names[key])\n else:\n not_found.append(key)\n if len(not_found) > 0:\n if use_raw:\n if gene_symbols is None:\n gene_error = \"`adata.raw.var_names`\"\n else:\n gene_error = \"gene_symbols column `adata.raw.var[{}].values`\".format(gene_symbols)\n else:\n if gene_symbols is None:\n gene_error = \"`adata.var_names`\"\n else:\n gene_error = \"gene_symbols column `adata.var[{}].values`\".format(gene_symbols)\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.obs` or in\"\n f\" {gene_error}.\"\n )\n\n # Make df\n df = pd.DataFrame(index=adata.obs_names)\n for k, l in zip(keys, lookup_keys):\n if not use_raw or k in adata.obs.columns:\n df[k] = adata.obs_vector(l, layer=layer)\n else:\n df[k] = adata.raw.obs_vector(l)\n for k, idx in obsm_keys:\n added_k = f\"{k}-{idx}\"\n val = adata.obsm[k]\n if isinstance(val, np.ndarray):\n df[added_k] = np.ravel(val[:, idx])\n elif isinstance(val, spmatrix):\n df[added_k] = np.ravel(val[:, idx].toarray())\n elif isinstance(val, pd.DataFrame):\n df[added_k] = val.loc[:, idx]\n return df\n\n\ndef var_df(\n adata: AnnData,\n keys: Iterable[str] = (),\n varm_keys: Iterable[Tuple[str, int]] = (),\n *,\n layer: str = None,\n) -> pd.DataFrame:\n \"\"\"\\\n Return values for observations in adata.\n\n Params\n ------\n adata\n AnnData object to get values from.\n keys\n Keys from either `.obs_names`, or `.var.columns`.\n varm_keys\n Tuple of `(key from varm, column index of varm[key])`.\n layer\n Layer of `adata` to use as expression values.\n\n Returns\n -------\n A dataframe with `adata.var_names` as index, and values specified by `keys`\n and `varm_keys`.\n \"\"\"\n # Argument handling\n lookup_keys = []\n not_found = []\n for key in keys:\n if key in adata.var.columns:\n lookup_keys.append(key)\n elif key in adata.obs_names:\n lookup_keys.append(key)\n else:\n not_found.append(key)\n if len(not_found) > 0:\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.var` or\"\n \" in `adata.obs_names`.\"\n )\n\n # Make df\n df = pd.DataFrame(index=adata.var_names)\n for k, l in zip(keys, lookup_keys):\n df[k] = adata.var_vector(l, layer=layer)\n for k, idx in varm_keys:\n added_k = f\"{k}-{idx}\"\n val = adata.varm[k]\n if isinstance(val, np.ndarray):\n df[added_k] = np.ravel(val[:, idx])\n elif isinstance(val, spmatrix):\n df[added_k] = np.ravel(val[:, idx].toarray())\n elif isinstance(val, pd.DataFrame):\n df[added_k] = val.loc[:, idx]\n return df\n", "path": "scanpy/get.py"}]}
| 2,864 | 145 |
gh_patches_debug_23301
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-2266
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
matplotlib.use('Agg') conflicts with user code
In this line in `chainer/chainer/training/extensions/plot_report.py` the `matplotlib` backend is changed [Source](https://github.com/pfnet/chainer/blob/master/chainer/training/extensions/plot_report.py#L16):
matplotlib.use('Agg')
Unfortunately, this can interfere with users code. For example, when the user sets the backend himself anywhere, it is not known, whether his setting or the Chainer settings wins (is imported first).
The `plot_report` gets imported, when `extensions` is imported. For now, I just removed from the corresponding `__init__` file locally, which is definitely not a clean solution.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/training/extensions/plot_report.py`
Content:
```
1 import json
2 from os import path
3 import warnings
4
5 import numpy
6 import six
7
8 from chainer import reporter
9 import chainer.serializer as serializer_module
10 from chainer.training import extension
11 import chainer.training.trigger as trigger_module
12
13 try:
14 import matplotlib
15
16 matplotlib.use('Agg')
17 from matplotlib import pyplot as plot
18
19 _available = True
20
21 except ImportError:
22 _available = False
23
24
25 def _check_available():
26 if not _available:
27 warnings.warn('matplotlib is not installed on your environment, '
28 'so nothing will be plotted at this time. '
29 'Please install matplotlib to plot figures.\n\n'
30 ' $ pip install matplotlib\n')
31
32
33 class PlotReport(extension.Extension):
34
35 """Trainer extension to output plots.
36
37 This extension accumulates the observations of the trainer to
38 :class:`~chainer.DictSummary` at a regular interval specified by a supplied
39 trigger, and plot a graph with using them.
40
41 There are two triggers to handle this extension. One is the trigger to
42 invoke this extension, which is used to handle the timing of accumulating
43 the results. It is set to ``1, 'iteration'`` by default. The other is the
44 trigger to determine when to emit the result. When this trigger returns
45 True, this extension appends the summary of accumulated values to the list
46 of past summaries, and writes the list to the log file. Then, this
47 extension makes a new fresh summary object which is used until the next
48 time that the trigger fires.
49
50 It also adds ``'epoch'`` and ``'iteration'`` entries to each result
51 dictionary, which are the epoch and iteration counts at the output.
52
53 Args:
54 y_keys (iterable of strs): Keys of values regarded as y. If this is
55 None, nothing is output to the graph.
56 x_key (str): Keys of values regarded as x. The default value is
57 'iteration'.
58 trigger: Trigger that decides when to aggregate the result and output
59 the values. This is distinct from the trigger of this extension
60 itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,
61 'iteration'``, it is passed to :class:`IntervalTrigger`.
62 postprocess: Callback to postprocess the result dictionaries. Figure
63 object, Axes object, and all plot data are passed to this callback
64 in this order. This callback can modify the figure.
65 file_name (str): Name of the figure file under the output directory.
66 It can be a format string.
67 marker (str): The marker used to plot the graph. Default is ``'x'``. If
68 ``None`` is given, it draws with no markers.
69 grid (bool): Set the axis grid on if True. Default is True.
70
71 """
72
73 def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),
74 postprocess=None, file_name='plot.png', marker='x',
75 grid=True):
76
77 _check_available()
78
79 if not _available:
80 return
81
82 self._x_key = x_key
83 if isinstance(y_keys, str):
84 y_keys = (y_keys,)
85
86 self._y_keys = y_keys
87 self._trigger = trigger_module.get_trigger(trigger)
88 self._file_name = file_name
89 self._marker = marker
90 self._grid = grid
91 self._postprocess = postprocess
92 self._init_summary()
93 self._data = {k: [] for k in y_keys}
94
95 def __call__(self, trainer):
96 if not _available:
97 return
98
99 keys = self._y_keys
100 observation = trainer.observation
101 summary = self._summary
102
103 if keys is None:
104 summary.add(observation)
105 else:
106 summary.add({k: observation[k] for k in keys if k in observation})
107
108 if self._trigger(trainer):
109 stats = self._summary.compute_mean()
110 stats_cpu = {}
111 for name, value in six.iteritems(stats):
112 stats_cpu[name] = float(value) # copy to CPU
113
114 updater = trainer.updater
115 stats_cpu['epoch'] = updater.epoch
116 stats_cpu['iteration'] = updater.iteration
117 x = stats_cpu[self._x_key]
118 data = self._data
119
120 for k in keys:
121 if k in stats_cpu:
122 data[k].append((x, stats_cpu[k]))
123
124 f = plot.figure()
125 a = f.add_subplot(111)
126 a.set_xlabel(self._x_key)
127 if self._grid:
128 a.grid()
129
130 for k in keys:
131 xy = data[k]
132 if len(xy) == 0:
133 continue
134
135 xy = numpy.array(xy)
136 a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)
137
138 if a.has_data():
139 if self._postprocess is not None:
140 self._postprocess(f, a, summary)
141 l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
142 f.savefig(path.join(trainer.out, self._file_name),
143 bbox_extra_artists=(l,), bbox_inches='tight')
144
145 plot.close()
146 self._init_summary()
147
148 def serialize(self, serializer):
149 if isinstance(serializer, serializer_module.Serializer):
150 serializer('_plot_{}'.format(self._file_name),
151 json.dumps(self._data))
152
153 else:
154 self._data = json.loads(
155 serializer('_plot_{}'.format(self._file_name), ''))
156
157 def _init_summary(self):
158 self._summary = reporter.DictSummary()
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/training/extensions/plot_report.py b/chainer/training/extensions/plot_report.py
--- a/chainer/training/extensions/plot_report.py
+++ b/chainer/training/extensions/plot_report.py
@@ -11,9 +11,6 @@
import chainer.training.trigger as trigger_module
try:
- import matplotlib
-
- matplotlib.use('Agg')
from matplotlib import pyplot as plot
_available = True
@@ -50,6 +47,25 @@
It also adds ``'epoch'`` and ``'iteration'`` entries to each result
dictionary, which are the epoch and iteration counts at the output.
+ .. warning::
+
+ If your environment needs to specify a backend of matplotlib
+ explicitly, please call ``matplotlib.use`` before importing Chainer.
+ For example:
+
+ .. code-block:: python
+
+ import matplotlib
+ matplotlib.use('Agg')
+
+ import chainer
+
+ Then, once ``chainer.training.extensions`` is imported,
+ ``matplotlib.use`` will have no effect.
+
+ For the details, please see here:
+ http://matplotlib.org/faq/usage_faq.html#what-is-a-backend
+
Args:
y_keys (iterable of strs): Keys of values regarded as y. If this is
None, nothing is output to the graph.
|
{"golden_diff": "diff --git a/chainer/training/extensions/plot_report.py b/chainer/training/extensions/plot_report.py\n--- a/chainer/training/extensions/plot_report.py\n+++ b/chainer/training/extensions/plot_report.py\n@@ -11,9 +11,6 @@\n import chainer.training.trigger as trigger_module\n \n try:\n- import matplotlib\n-\n- matplotlib.use('Agg')\n from matplotlib import pyplot as plot\n \n _available = True\n@@ -50,6 +47,25 @@\n It also adds ``'epoch'`` and ``'iteration'`` entries to each result\n dictionary, which are the epoch and iteration counts at the output.\n \n+ .. warning::\n+\n+ If your environment needs to specify a backend of matplotlib\n+ explicitly, please call ``matplotlib.use`` before importing Chainer.\n+ For example:\n+\n+ .. code-block:: python\n+\n+ import matplotlib\n+ matplotlib.use('Agg')\n+\n+ import chainer\n+\n+ Then, once ``chainer.training.extensions`` is imported,\n+ ``matplotlib.use`` will have no effect.\n+\n+ For the details, please see here:\n+ http://matplotlib.org/faq/usage_faq.html#what-is-a-backend\n+\n Args:\n y_keys (iterable of strs): Keys of values regarded as y. If this is\n None, nothing is output to the graph.\n", "issue": "matplotlib.use('Agg') conflicts with user code\nIn this line in `chainer/chainer/training/extensions/plot_report.py` the `matplotlib` backend is changed [Source](https://github.com/pfnet/chainer/blob/master/chainer/training/extensions/plot_report.py#L16):\r\n\r\n matplotlib.use('Agg')\r\n\r\nUnfortunately, this can interfere with users code. For example, when the user sets the backend himself anywhere, it is not known, whether his setting or the Chainer settings wins (is imported first).\r\n\r\nThe `plot_report` gets imported, when `extensions` is imported. For now, I just removed from the corresponding `__init__` file locally, which is definitely not a clean solution.\n", "before_files": [{"content": "import json\nfrom os import path\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import reporter\nimport chainer.serializer as serializer_module\nfrom chainer.training import extension\nimport chainer.training.trigger as trigger_module\n\ntry:\n import matplotlib\n\n matplotlib.use('Agg')\n from matplotlib import pyplot as plot\n\n _available = True\n\nexcept ImportError:\n _available = False\n\n\ndef _check_available():\n if not _available:\n warnings.warn('matplotlib is not installed on your environment, '\n 'so nothing will be plotted at this time. '\n 'Please install matplotlib to plot figures.\\n\\n'\n ' $ pip install matplotlib\\n')\n\n\nclass PlotReport(extension.Extension):\n\n \"\"\"Trainer extension to output plots.\n\n This extension accumulates the observations of the trainer to\n :class:`~chainer.DictSummary` at a regular interval specified by a supplied\n trigger, and plot a graph with using them.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds ``'epoch'`` and ``'iteration'`` entries to each result\n dictionary, which are the epoch and iteration counts at the output.\n\n Args:\n y_keys (iterable of strs): Keys of values regarded as y. If this is\n None, nothing is output to the graph.\n x_key (str): Keys of values regarded as x. The default value is\n 'iteration'.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,\n 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Figure\n object, Axes object, and all plot data are passed to this callback\n in this order. This callback can modify the figure.\n file_name (str): Name of the figure file under the output directory.\n It can be a format string.\n marker (str): The marker used to plot the graph. Default is ``'x'``. If\n ``None`` is given, it draws with no markers.\n grid (bool): Set the axis grid on if True. Default is True.\n\n \"\"\"\n\n def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),\n postprocess=None, file_name='plot.png', marker='x',\n grid=True):\n\n _check_available()\n\n if not _available:\n return\n\n self._x_key = x_key\n if isinstance(y_keys, str):\n y_keys = (y_keys,)\n\n self._y_keys = y_keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._file_name = file_name\n self._marker = marker\n self._grid = grid\n self._postprocess = postprocess\n self._init_summary()\n self._data = {k: [] for k in y_keys}\n\n def __call__(self, trainer):\n if not _available:\n return\n\n keys = self._y_keys\n observation = trainer.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n if self._trigger(trainer):\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in six.iteritems(stats):\n stats_cpu[name] = float(value) # copy to CPU\n\n updater = trainer.updater\n stats_cpu['epoch'] = updater.epoch\n stats_cpu['iteration'] = updater.iteration\n x = stats_cpu[self._x_key]\n data = self._data\n\n for k in keys:\n if k in stats_cpu:\n data[k].append((x, stats_cpu[k]))\n\n f = plot.figure()\n a = f.add_subplot(111)\n a.set_xlabel(self._x_key)\n if self._grid:\n a.grid()\n\n for k in keys:\n xy = data[k]\n if len(xy) == 0:\n continue\n\n xy = numpy.array(xy)\n a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)\n\n if a.has_data():\n if self._postprocess is not None:\n self._postprocess(f, a, summary)\n l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n f.savefig(path.join(trainer.out, self._file_name),\n bbox_extra_artists=(l,), bbox_inches='tight')\n\n plot.close()\n self._init_summary()\n\n def serialize(self, serializer):\n if isinstance(serializer, serializer_module.Serializer):\n serializer('_plot_{}'.format(self._file_name),\n json.dumps(self._data))\n\n else:\n self._data = json.loads(\n serializer('_plot_{}'.format(self._file_name), ''))\n\n def _init_summary(self):\n self._summary = reporter.DictSummary()\n", "path": "chainer/training/extensions/plot_report.py"}], "after_files": [{"content": "import json\nfrom os import path\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import reporter\nimport chainer.serializer as serializer_module\nfrom chainer.training import extension\nimport chainer.training.trigger as trigger_module\n\ntry:\n from matplotlib import pyplot as plot\n\n _available = True\n\nexcept ImportError:\n _available = False\n\n\ndef _check_available():\n if not _available:\n warnings.warn('matplotlib is not installed on your environment, '\n 'so nothing will be plotted at this time. '\n 'Please install matplotlib to plot figures.\\n\\n'\n ' $ pip install matplotlib\\n')\n\n\nclass PlotReport(extension.Extension):\n\n \"\"\"Trainer extension to output plots.\n\n This extension accumulates the observations of the trainer to\n :class:`~chainer.DictSummary` at a regular interval specified by a supplied\n trigger, and plot a graph with using them.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds ``'epoch'`` and ``'iteration'`` entries to each result\n dictionary, which are the epoch and iteration counts at the output.\n\n .. warning::\n\n If your environment needs to specify a backend of matplotlib\n explicitly, please call ``matplotlib.use`` before importing Chainer.\n For example:\n\n .. code-block:: python\n\n import matplotlib\n matplotlib.use('Agg')\n\n import chainer\n\n Then, once ``chainer.training.extensions`` is imported,\n ``matplotlib.use`` will have no effect.\n\n For the details, please see here:\n http://matplotlib.org/faq/usage_faq.html#what-is-a-backend\n\n Args:\n y_keys (iterable of strs): Keys of values regarded as y. If this is\n None, nothing is output to the graph.\n x_key (str): Keys of values regarded as x. The default value is\n 'iteration'.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,\n 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Figure\n object, Axes object, and all plot data are passed to this callback\n in this order. This callback can modify the figure.\n file_name (str): Name of the figure file under the output directory.\n It can be a format string.\n marker (str): The marker used to plot the graph. Default is ``'x'``. If\n ``None`` is given, it draws with no markers.\n grid (bool): Set the axis grid on if True. Default is True.\n\n \"\"\"\n\n def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),\n postprocess=None, file_name='plot.png', marker='x',\n grid=True):\n\n _check_available()\n\n if not _available:\n return\n\n self._x_key = x_key\n if isinstance(y_keys, str):\n y_keys = (y_keys,)\n\n self._y_keys = y_keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._file_name = file_name\n self._marker = marker\n self._grid = grid\n self._postprocess = postprocess\n self._init_summary()\n self._data = {k: [] for k in y_keys}\n\n def __call__(self, trainer):\n if not _available:\n return\n\n keys = self._y_keys\n observation = trainer.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n if self._trigger(trainer):\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in six.iteritems(stats):\n stats_cpu[name] = float(value) # copy to CPU\n\n updater = trainer.updater\n stats_cpu['epoch'] = updater.epoch\n stats_cpu['iteration'] = updater.iteration\n x = stats_cpu[self._x_key]\n data = self._data\n\n for k in keys:\n if k in stats_cpu:\n data[k].append((x, stats_cpu[k]))\n\n f = plot.figure()\n a = f.add_subplot(111)\n a.set_xlabel(self._x_key)\n if self._grid:\n a.grid()\n\n for k in keys:\n xy = data[k]\n if len(xy) == 0:\n continue\n\n xy = numpy.array(xy)\n a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)\n\n if a.has_data():\n if self._postprocess is not None:\n self._postprocess(f, a, summary)\n l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n f.savefig(path.join(trainer.out, self._file_name),\n bbox_extra_artists=(l,), bbox_inches='tight')\n\n plot.close()\n self._init_summary()\n\n def serialize(self, serializer):\n if isinstance(serializer, serializer_module.Serializer):\n serializer('_plot_{}'.format(self._file_name),\n json.dumps(self._data))\n\n else:\n self._data = json.loads(\n serializer('_plot_{}'.format(self._file_name), ''))\n\n def _init_summary(self):\n self._summary = reporter.DictSummary()\n", "path": "chainer/training/extensions/plot_report.py"}]}
| 2,014 | 306 |
gh_patches_debug_37064
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-2111
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`qtile --log-level=INFO` no longer works
My `.xsession` runs qtile using `qtile --log-level=INFO`.
This no longer works.
```
qtile: error: unrecognized arguments: --log-level=INFO
```
I'm guessing due to 908b910d00087ece13bb576f672c94bcf9e6fc43?
No big deal, but the changelog says
```
Running `qtile` without arguments will continue to work for the
forseeable future, but will be eventually deprecated.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/scripts/main.py`
Content:
```
1 import argparse
2 import sys
3
4 from libqtile.scripts import cmd_obj, run_cmd, shell, start, top
5
6 try:
7 import pkg_resources
8 VERSION = pkg_resources.require("qtile")[0].version
9 except (pkg_resources.DistributionNotFound, ImportError):
10 VERSION = 'dev'
11
12
13 def main():
14 # backward compat hack: `qtile` with no args (or non-subcommand args)
15 # should default to `qtile start`. it seems impolite for commands to do
16 # nothing when run with no args, so let's warn about this being deprecated.
17 if len(sys.argv) == 1:
18 print("please move to `qtile start` as your qtile invocation, "
19 "instead of just `qtile`; this default will be removed Soon(TM)")
20 sys.argv.insert(1, "start")
21
22 parser = argparse.ArgumentParser(
23 prog='qtile',
24 description='A full-featured, pure-Python tiling window manager.',
25 )
26 parser.add_argument(
27 '--version',
28 action='version',
29 version=VERSION,
30 )
31
32 subparsers = parser.add_subparsers()
33 start.add_subcommand(subparsers)
34 shell.add_subcommand(subparsers)
35 top.add_subcommand(subparsers)
36 run_cmd.add_subcommand(subparsers)
37 cmd_obj.add_subcommand(subparsers)
38
39 # `qtile help` should print help
40 def print_help(options):
41 parser.print_help()
42 help_ = subparsers.add_parser("help", help="Print help information and exit")
43 help_.set_defaults(func=print_help)
44
45 options = parser.parse_args()
46 options.func(options)
47
```
Path: `libqtile/scripts/start.py`
Content:
```
1 # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
2 # Copyright (c) 2011, Florian Mounier
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a copy
5 # of this software and associated documentation files (the "Software"), to deal
6 # in the Software without restriction, including without limitation the rights
7 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 # copies of the Software, and to permit persons to whom the Software is
9 # furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice shall be included in
12 # all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 # SOFTWARE.
21
22 # Set the locale before any widgets or anything are imported, so any widget
23 # whose defaults depend on a reasonable locale sees something reasonable.
24 import locale
25 import logging
26 from os import getenv, makedirs, path
27 from sys import exit, stdout
28
29 import libqtile.backend
30 from libqtile import confreader
31 from libqtile.log_utils import init_log, logger
32
33
34 def rename_process():
35 """
36 Try to rename the qtile process if py-setproctitle is installed:
37
38 http://code.google.com/p/py-setproctitle/
39
40 Will fail silently if it's not installed. Setting the title lets you do
41 stuff like "killall qtile".
42 """
43 try:
44 import setproctitle
45 setproctitle.setproctitle("qtile")
46 except ImportError:
47 pass
48
49
50 def make_qtile(options):
51 log_level = getattr(logging, options.log_level)
52 init_log(log_level=log_level, log_color=stdout.isatty())
53 kore = libqtile.backend.get_core(options.backend)
54
55 if not path.isfile(options.configfile):
56 try:
57 makedirs(path.dirname(options.configfile), exist_ok=True)
58 from shutil import copyfile
59 default_config_path = path.join(path.dirname(__file__),
60 "..",
61 "resources",
62 "default_config.py")
63 copyfile(default_config_path, options.configfile)
64 logger.info('Copied default_config.py to %s', options.configfile)
65 except Exception as e:
66 logger.exception('Failed to copy default_config.py to %s: (%s)',
67 options.configfile, e)
68
69 config = confreader.Config(options.configfile, kore=kore)
70
71 # XXX: the import is here because we need to call init_log
72 # before start importing stuff
73 from libqtile.core.manager import Qtile
74 return Qtile(
75 kore,
76 config,
77 no_spawn=options.no_spawn,
78 state=options.state,
79 socket_path=options.socket,
80 )
81
82
83 def start(options):
84 try:
85 locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore
86 except locale.Error:
87 pass
88
89 rename_process()
90 q = make_qtile(options)
91 try:
92 q.loop()
93 except Exception:
94 logger.exception('Qtile crashed')
95 exit(1)
96 logger.info('Exiting...')
97
98
99 def add_subcommand(subparsers):
100 parser = subparsers.add_parser("start", help="Start the window manager")
101 parser.add_argument(
102 "-c", "--config",
103 action="store",
104 default=path.expanduser(path.join(
105 getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),
106 dest="configfile",
107 help='Use the specified configuration file',
108 )
109 parser.add_argument(
110 "-s", "--socket",
111 action="store",
112 default=None,
113 dest="socket",
114 help='Path of the Qtile IPC socket.'
115 )
116 parser.add_argument(
117 "-n", "--no-spawn",
118 action="store_true",
119 default=False,
120 dest="no_spawn",
121 help='Avoid spawning apps. (Used for restart)'
122 )
123 parser.add_argument(
124 '-l', '--log-level',
125 default='WARNING',
126 dest='log_level',
127 choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
128 help='Set qtile log level'
129 )
130 parser.add_argument(
131 '--with-state',
132 default=None,
133 dest='state',
134 help='Pickled QtileState object (typically used only internally)',
135 )
136 parser.add_argument(
137 '-b', '--backend',
138 default='x11',
139 dest='backend',
140 choices=libqtile.backend.CORES,
141 help='Use specified backend. Currently only x11 is implemented.',
142 )
143 parser.set_defaults(func=start)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libqtile/scripts/main.py b/libqtile/scripts/main.py
--- a/libqtile/scripts/main.py
+++ b/libqtile/scripts/main.py
@@ -1,6 +1,8 @@
import argparse
+import logging
import sys
+from libqtile.log_utils import init_log
from libqtile.scripts import cmd_obj, run_cmd, shell, start, top
try:
@@ -28,6 +30,14 @@
action='version',
version=VERSION,
)
+ parser.add_argument(
+ '-l', '--log-level',
+ default='WARNING',
+ dest='log_level',
+ type=str.upper,
+ choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
+ help='Set qtile log level'
+ )
subparsers = parser.add_subparsers()
start.add_subcommand(subparsers)
@@ -43,4 +53,6 @@
help_.set_defaults(func=print_help)
options = parser.parse_args()
+ log_level = getattr(logging, options.log_level)
+ init_log(log_level=log_level, log_color=sys.stdout.isatty())
options.func(options)
diff --git a/libqtile/scripts/start.py b/libqtile/scripts/start.py
--- a/libqtile/scripts/start.py
+++ b/libqtile/scripts/start.py
@@ -22,13 +22,12 @@
# Set the locale before any widgets or anything are imported, so any widget
# whose defaults depend on a reasonable locale sees something reasonable.
import locale
-import logging
from os import getenv, makedirs, path
-from sys import exit, stdout
+from sys import exit
import libqtile.backend
from libqtile import confreader
-from libqtile.log_utils import init_log, logger
+from libqtile.log_utils import logger
def rename_process():
@@ -48,8 +47,6 @@
def make_qtile(options):
- log_level = getattr(logging, options.log_level)
- init_log(log_level=log_level, log_color=stdout.isatty())
kore = libqtile.backend.get_core(options.backend)
if not path.isfile(options.configfile):
@@ -120,13 +117,6 @@
dest="no_spawn",
help='Avoid spawning apps. (Used for restart)'
)
- parser.add_argument(
- '-l', '--log-level',
- default='WARNING',
- dest='log_level',
- choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
- help='Set qtile log level'
- )
parser.add_argument(
'--with-state',
default=None,
|
{"golden_diff": "diff --git a/libqtile/scripts/main.py b/libqtile/scripts/main.py\n--- a/libqtile/scripts/main.py\n+++ b/libqtile/scripts/main.py\n@@ -1,6 +1,8 @@\n import argparse\n+import logging\n import sys\n \n+from libqtile.log_utils import init_log\n from libqtile.scripts import cmd_obj, run_cmd, shell, start, top\n \n try:\n@@ -28,6 +30,14 @@\n action='version',\n version=VERSION,\n )\n+ parser.add_argument(\n+ '-l', '--log-level',\n+ default='WARNING',\n+ dest='log_level',\n+ type=str.upper,\n+ choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n+ help='Set qtile log level'\n+ )\n \n subparsers = parser.add_subparsers()\n start.add_subcommand(subparsers)\n@@ -43,4 +53,6 @@\n help_.set_defaults(func=print_help)\n \n options = parser.parse_args()\n+ log_level = getattr(logging, options.log_level)\n+ init_log(log_level=log_level, log_color=sys.stdout.isatty())\n options.func(options)\ndiff --git a/libqtile/scripts/start.py b/libqtile/scripts/start.py\n--- a/libqtile/scripts/start.py\n+++ b/libqtile/scripts/start.py\n@@ -22,13 +22,12 @@\n # Set the locale before any widgets or anything are imported, so any widget\n # whose defaults depend on a reasonable locale sees something reasonable.\n import locale\n-import logging\n from os import getenv, makedirs, path\n-from sys import exit, stdout\n+from sys import exit\n \n import libqtile.backend\n from libqtile import confreader\n-from libqtile.log_utils import init_log, logger\n+from libqtile.log_utils import logger\n \n \n def rename_process():\n@@ -48,8 +47,6 @@\n \n \n def make_qtile(options):\n- log_level = getattr(logging, options.log_level)\n- init_log(log_level=log_level, log_color=stdout.isatty())\n kore = libqtile.backend.get_core(options.backend)\n \n if not path.isfile(options.configfile):\n@@ -120,13 +117,6 @@\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n- parser.add_argument(\n- '-l', '--log-level',\n- default='WARNING',\n- dest='log_level',\n- choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n- help='Set qtile log level'\n- )\n parser.add_argument(\n '--with-state',\n default=None,\n", "issue": "`qtile --log-level=INFO` no longer works\nMy `.xsession` runs qtile using `qtile --log-level=INFO`.\r\n\r\nThis no longer works.\r\n\r\n```\r\nqtile: error: unrecognized arguments: --log-level=INFO\r\n```\r\n\r\nI'm guessing due to 908b910d00087ece13bb576f672c94bcf9e6fc43?\r\n\r\nNo big deal, but the changelog says\r\n\r\n```\r\n Running `qtile` without arguments will continue to work for the\r\n forseeable future, but will be eventually deprecated.\r\n```\n", "before_files": [{"content": "import argparse\nimport sys\n\nfrom libqtile.scripts import cmd_obj, run_cmd, shell, start, top\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef main():\n # backward compat hack: `qtile` with no args (or non-subcommand args)\n # should default to `qtile start`. it seems impolite for commands to do\n # nothing when run with no args, so let's warn about this being deprecated.\n if len(sys.argv) == 1:\n print(\"please move to `qtile start` as your qtile invocation, \"\n \"instead of just `qtile`; this default will be removed Soon(TM)\")\n sys.argv.insert(1, \"start\")\n\n parser = argparse.ArgumentParser(\n prog='qtile',\n description='A full-featured, pure-Python tiling window manager.',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n )\n\n subparsers = parser.add_subparsers()\n start.add_subcommand(subparsers)\n shell.add_subcommand(subparsers)\n top.add_subcommand(subparsers)\n run_cmd.add_subcommand(subparsers)\n cmd_obj.add_subcommand(subparsers)\n\n # `qtile help` should print help\n def print_help(options):\n parser.print_help()\n help_ = subparsers.add_parser(\"help\", help=\"Print help information and exit\")\n help_.set_defaults(func=print_help)\n\n options = parser.parse_args()\n options.func(options)\n", "path": "libqtile/scripts/main.py"}, {"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n# Copyright (c) 2011, Florian Mounier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport locale\nimport logging\nfrom os import getenv, makedirs, path\nfrom sys import exit, stdout\n\nimport libqtile.backend\nfrom libqtile import confreader\nfrom libqtile.log_utils import init_log, logger\n\n\ndef rename_process():\n \"\"\"\n Try to rename the qtile process if py-setproctitle is installed:\n\n http://code.google.com/p/py-setproctitle/\n\n Will fail silently if it's not installed. Setting the title lets you do\n stuff like \"killall qtile\".\n \"\"\"\n try:\n import setproctitle\n setproctitle.setproctitle(\"qtile\")\n except ImportError:\n pass\n\n\ndef make_qtile(options):\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level, log_color=stdout.isatty())\n kore = libqtile.backend.get_core(options.backend)\n\n if not path.isfile(options.configfile):\n try:\n makedirs(path.dirname(options.configfile), exist_ok=True)\n from shutil import copyfile\n default_config_path = path.join(path.dirname(__file__),\n \"..\",\n \"resources\",\n \"default_config.py\")\n copyfile(default_config_path, options.configfile)\n logger.info('Copied default_config.py to %s', options.configfile)\n except Exception as e:\n logger.exception('Failed to copy default_config.py to %s: (%s)',\n options.configfile, e)\n\n config = confreader.Config(options.configfile, kore=kore)\n\n # XXX: the import is here because we need to call init_log\n # before start importing stuff\n from libqtile.core.manager import Qtile\n return Qtile(\n kore,\n config,\n no_spawn=options.no_spawn,\n state=options.state,\n socket_path=options.socket,\n )\n\n\ndef start(options):\n try:\n locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n except locale.Error:\n pass\n\n rename_process()\n q = make_qtile(options)\n try:\n q.loop()\n except Exception:\n logger.exception('Qtile crashed')\n exit(1)\n logger.info('Exiting...')\n\n\ndef add_subcommand(subparsers):\n parser = subparsers.add_parser(\"start\", help=\"Start the window manager\")\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.add_argument(\n \"-s\", \"--socket\",\n action=\"store\",\n default=None,\n dest=\"socket\",\n help='Path of the Qtile IPC socket.'\n )\n parser.add_argument(\n \"-n\", \"--no-spawn\",\n action=\"store_true\",\n default=False,\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n parser.add_argument(\n '--with-state',\n default=None,\n dest='state',\n help='Pickled QtileState object (typically used only internally)',\n )\n parser.add_argument(\n '-b', '--backend',\n default='x11',\n dest='backend',\n choices=libqtile.backend.CORES,\n help='Use specified backend. Currently only x11 is implemented.',\n )\n parser.set_defaults(func=start)\n", "path": "libqtile/scripts/start.py"}], "after_files": [{"content": "import argparse\nimport logging\nimport sys\n\nfrom libqtile.log_utils import init_log\nfrom libqtile.scripts import cmd_obj, run_cmd, shell, start, top\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef main():\n # backward compat hack: `qtile` with no args (or non-subcommand args)\n # should default to `qtile start`. it seems impolite for commands to do\n # nothing when run with no args, so let's warn about this being deprecated.\n if len(sys.argv) == 1:\n print(\"please move to `qtile start` as your qtile invocation, \"\n \"instead of just `qtile`; this default will be removed Soon(TM)\")\n sys.argv.insert(1, \"start\")\n\n parser = argparse.ArgumentParser(\n prog='qtile',\n description='A full-featured, pure-Python tiling window manager.',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n )\n parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n type=str.upper,\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n\n subparsers = parser.add_subparsers()\n start.add_subcommand(subparsers)\n shell.add_subcommand(subparsers)\n top.add_subcommand(subparsers)\n run_cmd.add_subcommand(subparsers)\n cmd_obj.add_subcommand(subparsers)\n\n # `qtile help` should print help\n def print_help(options):\n parser.print_help()\n help_ = subparsers.add_parser(\"help\", help=\"Print help information and exit\")\n help_.set_defaults(func=print_help)\n\n options = parser.parse_args()\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level, log_color=sys.stdout.isatty())\n options.func(options)\n", "path": "libqtile/scripts/main.py"}, {"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n# Copyright (c) 2011, Florian Mounier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport locale\nfrom os import getenv, makedirs, path\nfrom sys import exit\n\nimport libqtile.backend\nfrom libqtile import confreader\nfrom libqtile.log_utils import logger\n\n\ndef rename_process():\n \"\"\"\n Try to rename the qtile process if py-setproctitle is installed:\n\n http://code.google.com/p/py-setproctitle/\n\n Will fail silently if it's not installed. Setting the title lets you do\n stuff like \"killall qtile\".\n \"\"\"\n try:\n import setproctitle\n setproctitle.setproctitle(\"qtile\")\n except ImportError:\n pass\n\n\ndef make_qtile(options):\n kore = libqtile.backend.get_core(options.backend)\n\n if not path.isfile(options.configfile):\n try:\n makedirs(path.dirname(options.configfile), exist_ok=True)\n from shutil import copyfile\n default_config_path = path.join(path.dirname(__file__),\n \"..\",\n \"resources\",\n \"default_config.py\")\n copyfile(default_config_path, options.configfile)\n logger.info('Copied default_config.py to %s', options.configfile)\n except Exception as e:\n logger.exception('Failed to copy default_config.py to %s: (%s)',\n options.configfile, e)\n\n config = confreader.Config(options.configfile, kore=kore)\n\n # XXX: the import is here because we need to call init_log\n # before start importing stuff\n from libqtile.core.manager import Qtile\n return Qtile(\n kore,\n config,\n no_spawn=options.no_spawn,\n state=options.state,\n socket_path=options.socket,\n )\n\n\ndef start(options):\n try:\n locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n except locale.Error:\n pass\n\n rename_process()\n q = make_qtile(options)\n try:\n q.loop()\n except Exception:\n logger.exception('Qtile crashed')\n exit(1)\n logger.info('Exiting...')\n\n\ndef add_subcommand(subparsers):\n parser = subparsers.add_parser(\"start\", help=\"Start the window manager\")\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.add_argument(\n \"-s\", \"--socket\",\n action=\"store\",\n default=None,\n dest=\"socket\",\n help='Path of the Qtile IPC socket.'\n )\n parser.add_argument(\n \"-n\", \"--no-spawn\",\n action=\"store_true\",\n default=False,\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n parser.add_argument(\n '--with-state',\n default=None,\n dest='state',\n help='Pickled QtileState object (typically used only internally)',\n )\n parser.add_argument(\n '-b', '--backend',\n default='x11',\n dest='backend',\n choices=libqtile.backend.CORES,\n help='Use specified backend. Currently only x11 is implemented.',\n )\n parser.set_defaults(func=start)\n", "path": "libqtile/scripts/start.py"}]}
| 2,255 | 587 |
gh_patches_debug_14122
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-5800
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bipartite Closeness centrality algorithm does not report nodes with zero value.
### Current Behavior
The function saves closeness values of '0.0' always at closeness[len(nodes)] which is closeness[n]. This results a returned dictionary with less elements than number of nodes in the graph.
### Expected Behavior
The values of '0.0' should instead be saved at closeness[node] within the for-loops iterating over all nodes.
### Steps to Reproduce
/
### Environment
Python version: 3.9.12
NetworkX version: 2.8
### Additional context
In both cases within the source code below
`else:
closeness[n] = 0.
`
needs to be changed to:
`else:
closeness[node] = 0.
`
See sourcecode:
https://networkx.org/documentation/stable/_modules/networkx/algorithms/bipartite/centrality.html#closeness_centrality
Bipartite Closeness centrality algorithm does not report nodes with zero value.
### Current Behavior
The function saves closeness values of '0.0' always at closeness[len(nodes)] which is closeness[n]. This results a returned dictionary with less elements than number of nodes in the graph.
### Expected Behavior
The values of '0.0' should instead be saved at closeness[node] within the for-loops iterating over all nodes.
### Steps to Reproduce
/
### Environment
Python version: 3.9.12
NetworkX version: 2.8
### Additional context
In both cases within the source code below
`else:
closeness[n] = 0.
`
needs to be changed to:
`else:
closeness[node] = 0.
`
See sourcecode:
https://networkx.org/documentation/stable/_modules/networkx/algorithms/bipartite/centrality.html#closeness_centrality
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/bipartite/centrality.py`
Content:
```
1 import networkx as nx
2
3 __all__ = ["degree_centrality", "betweenness_centrality", "closeness_centrality"]
4
5
6 def degree_centrality(G, nodes):
7 r"""Compute the degree centrality for nodes in a bipartite network.
8
9 The degree centrality for a node `v` is the fraction of nodes
10 connected to it.
11
12 Parameters
13 ----------
14 G : graph
15 A bipartite network
16
17 nodes : list or container
18 Container with all nodes in one bipartite node set.
19
20 Returns
21 -------
22 centrality : dictionary
23 Dictionary keyed by node with bipartite degree centrality as the value.
24
25 See Also
26 --------
27 betweenness_centrality
28 closeness_centrality
29 :func:`~networkx.algorithms.bipartite.basic.sets`
30 :func:`~networkx.algorithms.bipartite.basic.is_bipartite`
31
32 Notes
33 -----
34 The nodes input parameter must contain all nodes in one bipartite node set,
35 but the dictionary returned contains all nodes from both bipartite node
36 sets. See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
37 for further details on how bipartite graphs are handled in NetworkX.
38
39 For unipartite networks, the degree centrality values are
40 normalized by dividing by the maximum possible degree (which is
41 `n-1` where `n` is the number of nodes in G).
42
43 In the bipartite case, the maximum possible degree of a node in a
44 bipartite node set is the number of nodes in the opposite node set
45 [1]_. The degree centrality for a node `v` in the bipartite
46 sets `U` with `n` nodes and `V` with `m` nodes is
47
48 .. math::
49
50 d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U ,
51
52 d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V ,
53
54
55 where `deg(v)` is the degree of node `v`.
56
57 References
58 ----------
59 .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
60 Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
61 of Social Network Analysis. Sage Publications.
62 https://dx.doi.org/10.4135/9781446294413.n28
63 """
64 top = set(nodes)
65 bottom = set(G) - top
66 s = 1.0 / len(bottom)
67 centrality = {n: d * s for n, d in G.degree(top)}
68 s = 1.0 / len(top)
69 centrality.update({n: d * s for n, d in G.degree(bottom)})
70 return centrality
71
72
73 def betweenness_centrality(G, nodes):
74 r"""Compute betweenness centrality for nodes in a bipartite network.
75
76 Betweenness centrality of a node `v` is the sum of the
77 fraction of all-pairs shortest paths that pass through `v`.
78
79 Values of betweenness are normalized by the maximum possible
80 value which for bipartite graphs is limited by the relative size
81 of the two node sets [1]_.
82
83 Let `n` be the number of nodes in the node set `U` and
84 `m` be the number of nodes in the node set `V`, then
85 nodes in `U` are normalized by dividing by
86
87 .. math::
88
89 \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,
90
91 where
92
93 .. math::
94
95 s = (n - 1) \div m , t = (n - 1) \mod m ,
96
97 and nodes in `V` are normalized by dividing by
98
99 .. math::
100
101 \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,
102
103 where,
104
105 .. math::
106
107 p = (m - 1) \div n , r = (m - 1) \mod n .
108
109 Parameters
110 ----------
111 G : graph
112 A bipartite graph
113
114 nodes : list or container
115 Container with all nodes in one bipartite node set.
116
117 Returns
118 -------
119 betweenness : dictionary
120 Dictionary keyed by node with bipartite betweenness centrality
121 as the value.
122
123 See Also
124 --------
125 degree_centrality
126 closeness_centrality
127 :func:`~networkx.algorithms.bipartite.basic.sets`
128 :func:`~networkx.algorithms.bipartite.basic.is_bipartite`
129
130 Notes
131 -----
132 The nodes input parameter must contain all nodes in one bipartite node set,
133 but the dictionary returned contains all nodes from both node sets.
134 See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
135 for further details on how bipartite graphs are handled in NetworkX.
136
137
138 References
139 ----------
140 .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
141 Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
142 of Social Network Analysis. Sage Publications.
143 https://dx.doi.org/10.4135/9781446294413.n28
144 """
145 top = set(nodes)
146 bottom = set(G) - top
147 n = len(top)
148 m = len(bottom)
149 s, t = divmod(n - 1, m)
150 bet_max_top = (
151 ((m**2) * ((s + 1) ** 2))
152 + (m * (s + 1) * (2 * t - s - 1))
153 - (t * ((2 * s) - t + 3))
154 ) / 2.0
155 p, r = divmod(m - 1, n)
156 bet_max_bot = (
157 ((n**2) * ((p + 1) ** 2))
158 + (n * (p + 1) * (2 * r - p - 1))
159 - (r * ((2 * p) - r + 3))
160 ) / 2.0
161 betweenness = nx.betweenness_centrality(G, normalized=False, weight=None)
162 for node in top:
163 betweenness[node] /= bet_max_top
164 for node in bottom:
165 betweenness[node] /= bet_max_bot
166 return betweenness
167
168
169 def closeness_centrality(G, nodes, normalized=True):
170 r"""Compute the closeness centrality for nodes in a bipartite network.
171
172 The closeness of a node is the distance to all other nodes in the
173 graph or in the case that the graph is not connected to all other nodes
174 in the connected component containing that node.
175
176 Parameters
177 ----------
178 G : graph
179 A bipartite network
180
181 nodes : list or container
182 Container with all nodes in one bipartite node set.
183
184 normalized : bool, optional
185 If True (default) normalize by connected component size.
186
187 Returns
188 -------
189 closeness : dictionary
190 Dictionary keyed by node with bipartite closeness centrality
191 as the value.
192
193 See Also
194 --------
195 betweenness_centrality
196 degree_centrality
197 :func:`~networkx.algorithms.bipartite.basic.sets`
198 :func:`~networkx.algorithms.bipartite.basic.is_bipartite`
199
200 Notes
201 -----
202 The nodes input parameter must contain all nodes in one bipartite node set,
203 but the dictionary returned contains all nodes from both node sets.
204 See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
205 for further details on how bipartite graphs are handled in NetworkX.
206
207
208 Closeness centrality is normalized by the minimum distance possible.
209 In the bipartite case the minimum distance for a node in one bipartite
210 node set is 1 from all nodes in the other node set and 2 from all
211 other nodes in its own set [1]_. Thus the closeness centrality
212 for node `v` in the two bipartite sets `U` with
213 `n` nodes and `V` with `m` nodes is
214
215 .. math::
216
217 c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U,
218
219 c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V,
220
221 where `d` is the sum of the distances from `v` to all
222 other nodes.
223
224 Higher values of closeness indicate higher centrality.
225
226 As in the unipartite case, setting normalized=True causes the
227 values to normalized further to n-1 / size(G)-1 where n is the
228 number of nodes in the connected part of graph containing the
229 node. If the graph is not completely connected, this algorithm
230 computes the closeness centrality for each connected part
231 separately.
232
233 References
234 ----------
235 .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
236 Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
237 of Social Network Analysis. Sage Publications.
238 https://dx.doi.org/10.4135/9781446294413.n28
239 """
240 closeness = {}
241 path_length = nx.single_source_shortest_path_length
242 top = set(nodes)
243 bottom = set(G) - top
244 n = len(top)
245 m = len(bottom)
246 for node in top:
247 sp = dict(path_length(G, node))
248 totsp = sum(sp.values())
249 if totsp > 0.0 and len(G) > 1:
250 closeness[node] = (m + 2 * (n - 1)) / totsp
251 if normalized:
252 s = (len(sp) - 1) / (len(G) - 1)
253 closeness[node] *= s
254 else:
255 closeness[n] = 0.0
256 for node in bottom:
257 sp = dict(path_length(G, node))
258 totsp = sum(sp.values())
259 if totsp > 0.0 and len(G) > 1:
260 closeness[node] = (n + 2 * (m - 1)) / totsp
261 if normalized:
262 s = (len(sp) - 1) / (len(G) - 1)
263 closeness[node] *= s
264 else:
265 closeness[n] = 0.0
266 return closeness
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/networkx/algorithms/bipartite/centrality.py b/networkx/algorithms/bipartite/centrality.py
--- a/networkx/algorithms/bipartite/centrality.py
+++ b/networkx/algorithms/bipartite/centrality.py
@@ -252,7 +252,7 @@
s = (len(sp) - 1) / (len(G) - 1)
closeness[node] *= s
else:
- closeness[n] = 0.0
+ closeness[node] = 0.0
for node in bottom:
sp = dict(path_length(G, node))
totsp = sum(sp.values())
@@ -262,5 +262,5 @@
s = (len(sp) - 1) / (len(G) - 1)
closeness[node] *= s
else:
- closeness[n] = 0.0
+ closeness[node] = 0.0
return closeness
|
{"golden_diff": "diff --git a/networkx/algorithms/bipartite/centrality.py b/networkx/algorithms/bipartite/centrality.py\n--- a/networkx/algorithms/bipartite/centrality.py\n+++ b/networkx/algorithms/bipartite/centrality.py\n@@ -252,7 +252,7 @@\n s = (len(sp) - 1) / (len(G) - 1)\n closeness[node] *= s\n else:\n- closeness[n] = 0.0\n+ closeness[node] = 0.0\n for node in bottom:\n sp = dict(path_length(G, node))\n totsp = sum(sp.values())\n@@ -262,5 +262,5 @@\n s = (len(sp) - 1) / (len(G) - 1)\n closeness[node] *= s\n else:\n- closeness[n] = 0.0\n+ closeness[node] = 0.0\n return closeness\n", "issue": "Bipartite Closeness centrality algorithm does not report nodes with zero value.\n### Current Behavior\r\nThe function saves closeness values of '0.0' always at closeness[len(nodes)] which is closeness[n]. This results a returned dictionary with less elements than number of nodes in the graph.\r\n\r\n### Expected Behavior\r\nThe values of '0.0' should instead be saved at closeness[node] within the for-loops iterating over all nodes.\r\n\r\n### Steps to Reproduce\r\n/\r\n\r\n### Environment\r\nPython version: 3.9.12\r\nNetworkX version: 2.8\r\n\r\n\r\n### Additional context\r\nIn both cases within the source code below\r\n`else:\r\n closeness[n] = 0.\r\n`\r\nneeds to be changed to:\r\n`else:\r\n closeness[node] = 0.\r\n`\r\nSee sourcecode:\r\nhttps://networkx.org/documentation/stable/_modules/networkx/algorithms/bipartite/centrality.html#closeness_centrality\nBipartite Closeness centrality algorithm does not report nodes with zero value.\n### Current Behavior\r\nThe function saves closeness values of '0.0' always at closeness[len(nodes)] which is closeness[n]. This results a returned dictionary with less elements than number of nodes in the graph.\r\n\r\n### Expected Behavior\r\nThe values of '0.0' should instead be saved at closeness[node] within the for-loops iterating over all nodes.\r\n\r\n### Steps to Reproduce\r\n/\r\n\r\n### Environment\r\nPython version: 3.9.12\r\nNetworkX version: 2.8\r\n\r\n\r\n### Additional context\r\nIn both cases within the source code below\r\n`else:\r\n closeness[n] = 0.\r\n`\r\nneeds to be changed to:\r\n`else:\r\n closeness[node] = 0.\r\n`\r\nSee sourcecode:\r\nhttps://networkx.org/documentation/stable/_modules/networkx/algorithms/bipartite/centrality.html#closeness_centrality\n", "before_files": [{"content": "import networkx as nx\n\n__all__ = [\"degree_centrality\", \"betweenness_centrality\", \"closeness_centrality\"]\n\n\ndef degree_centrality(G, nodes):\n r\"\"\"Compute the degree centrality for nodes in a bipartite network.\n\n The degree centrality for a node `v` is the fraction of nodes\n connected to it.\n\n Parameters\n ----------\n G : graph\n A bipartite network\n\n nodes : list or container\n Container with all nodes in one bipartite node set.\n\n Returns\n -------\n centrality : dictionary\n Dictionary keyed by node with bipartite degree centrality as the value.\n\n See Also\n --------\n betweenness_centrality\n closeness_centrality\n :func:`~networkx.algorithms.bipartite.basic.sets`\n :func:`~networkx.algorithms.bipartite.basic.is_bipartite`\n\n Notes\n -----\n The nodes input parameter must contain all nodes in one bipartite node set,\n but the dictionary returned contains all nodes from both bipartite node\n sets. See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n For unipartite networks, the degree centrality values are\n normalized by dividing by the maximum possible degree (which is\n `n-1` where `n` is the number of nodes in G).\n\n In the bipartite case, the maximum possible degree of a node in a\n bipartite node set is the number of nodes in the opposite node set\n [1]_. The degree centrality for a node `v` in the bipartite\n sets `U` with `n` nodes and `V` with `m` nodes is\n\n .. math::\n\n d_{v} = \\frac{deg(v)}{m}, \\mbox{for} v \\in U ,\n\n d_{v} = \\frac{deg(v)}{n}, \\mbox{for} v \\in V ,\n\n\n where `deg(v)` is the degree of node `v`.\n\n References\n ----------\n .. [1] Borgatti, S.P. and Halgin, D. In press. \"Analyzing Affiliation\n Networks\". In Carrington, P. and Scott, J. (eds) The Sage Handbook\n of Social Network Analysis. Sage Publications.\n https://dx.doi.org/10.4135/9781446294413.n28\n \"\"\"\n top = set(nodes)\n bottom = set(G) - top\n s = 1.0 / len(bottom)\n centrality = {n: d * s for n, d in G.degree(top)}\n s = 1.0 / len(top)\n centrality.update({n: d * s for n, d in G.degree(bottom)})\n return centrality\n\n\ndef betweenness_centrality(G, nodes):\n r\"\"\"Compute betweenness centrality for nodes in a bipartite network.\n\n Betweenness centrality of a node `v` is the sum of the\n fraction of all-pairs shortest paths that pass through `v`.\n\n Values of betweenness are normalized by the maximum possible\n value which for bipartite graphs is limited by the relative size\n of the two node sets [1]_.\n\n Let `n` be the number of nodes in the node set `U` and\n `m` be the number of nodes in the node set `V`, then\n nodes in `U` are normalized by dividing by\n\n .. math::\n\n \\frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,\n\n where\n\n .. math::\n\n s = (n - 1) \\div m , t = (n - 1) \\mod m ,\n\n and nodes in `V` are normalized by dividing by\n\n .. math::\n\n \\frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,\n\n where,\n\n .. math::\n\n p = (m - 1) \\div n , r = (m - 1) \\mod n .\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or container\n Container with all nodes in one bipartite node set.\n\n Returns\n -------\n betweenness : dictionary\n Dictionary keyed by node with bipartite betweenness centrality\n as the value.\n\n See Also\n --------\n degree_centrality\n closeness_centrality\n :func:`~networkx.algorithms.bipartite.basic.sets`\n :func:`~networkx.algorithms.bipartite.basic.is_bipartite`\n\n Notes\n -----\n The nodes input parameter must contain all nodes in one bipartite node set,\n but the dictionary returned contains all nodes from both node sets.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Borgatti, S.P. and Halgin, D. In press. \"Analyzing Affiliation\n Networks\". In Carrington, P. and Scott, J. (eds) The Sage Handbook\n of Social Network Analysis. Sage Publications.\n https://dx.doi.org/10.4135/9781446294413.n28\n \"\"\"\n top = set(nodes)\n bottom = set(G) - top\n n = len(top)\n m = len(bottom)\n s, t = divmod(n - 1, m)\n bet_max_top = (\n ((m**2) * ((s + 1) ** 2))\n + (m * (s + 1) * (2 * t - s - 1))\n - (t * ((2 * s) - t + 3))\n ) / 2.0\n p, r = divmod(m - 1, n)\n bet_max_bot = (\n ((n**2) * ((p + 1) ** 2))\n + (n * (p + 1) * (2 * r - p - 1))\n - (r * ((2 * p) - r + 3))\n ) / 2.0\n betweenness = nx.betweenness_centrality(G, normalized=False, weight=None)\n for node in top:\n betweenness[node] /= bet_max_top\n for node in bottom:\n betweenness[node] /= bet_max_bot\n return betweenness\n\n\ndef closeness_centrality(G, nodes, normalized=True):\n r\"\"\"Compute the closeness centrality for nodes in a bipartite network.\n\n The closeness of a node is the distance to all other nodes in the\n graph or in the case that the graph is not connected to all other nodes\n in the connected component containing that node.\n\n Parameters\n ----------\n G : graph\n A bipartite network\n\n nodes : list or container\n Container with all nodes in one bipartite node set.\n\n normalized : bool, optional\n If True (default) normalize by connected component size.\n\n Returns\n -------\n closeness : dictionary\n Dictionary keyed by node with bipartite closeness centrality\n as the value.\n\n See Also\n --------\n betweenness_centrality\n degree_centrality\n :func:`~networkx.algorithms.bipartite.basic.sets`\n :func:`~networkx.algorithms.bipartite.basic.is_bipartite`\n\n Notes\n -----\n The nodes input parameter must contain all nodes in one bipartite node set,\n but the dictionary returned contains all nodes from both node sets.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n Closeness centrality is normalized by the minimum distance possible.\n In the bipartite case the minimum distance for a node in one bipartite\n node set is 1 from all nodes in the other node set and 2 from all\n other nodes in its own set [1]_. Thus the closeness centrality\n for node `v` in the two bipartite sets `U` with\n `n` nodes and `V` with `m` nodes is\n\n .. math::\n\n c_{v} = \\frac{m + 2(n - 1)}{d}, \\mbox{for} v \\in U,\n\n c_{v} = \\frac{n + 2(m - 1)}{d}, \\mbox{for} v \\in V,\n\n where `d` is the sum of the distances from `v` to all\n other nodes.\n\n Higher values of closeness indicate higher centrality.\n\n As in the unipartite case, setting normalized=True causes the\n values to normalized further to n-1 / size(G)-1 where n is the\n number of nodes in the connected part of graph containing the\n node. If the graph is not completely connected, this algorithm\n computes the closeness centrality for each connected part\n separately.\n\n References\n ----------\n .. [1] Borgatti, S.P. and Halgin, D. In press. \"Analyzing Affiliation\n Networks\". In Carrington, P. and Scott, J. (eds) The Sage Handbook\n of Social Network Analysis. Sage Publications.\n https://dx.doi.org/10.4135/9781446294413.n28\n \"\"\"\n closeness = {}\n path_length = nx.single_source_shortest_path_length\n top = set(nodes)\n bottom = set(G) - top\n n = len(top)\n m = len(bottom)\n for node in top:\n sp = dict(path_length(G, node))\n totsp = sum(sp.values())\n if totsp > 0.0 and len(G) > 1:\n closeness[node] = (m + 2 * (n - 1)) / totsp\n if normalized:\n s = (len(sp) - 1) / (len(G) - 1)\n closeness[node] *= s\n else:\n closeness[n] = 0.0\n for node in bottom:\n sp = dict(path_length(G, node))\n totsp = sum(sp.values())\n if totsp > 0.0 and len(G) > 1:\n closeness[node] = (n + 2 * (m - 1)) / totsp\n if normalized:\n s = (len(sp) - 1) / (len(G) - 1)\n closeness[node] *= s\n else:\n closeness[n] = 0.0\n return closeness\n", "path": "networkx/algorithms/bipartite/centrality.py"}], "after_files": [{"content": "import networkx as nx\n\n__all__ = [\"degree_centrality\", \"betweenness_centrality\", \"closeness_centrality\"]\n\n\ndef degree_centrality(G, nodes):\n r\"\"\"Compute the degree centrality for nodes in a bipartite network.\n\n The degree centrality for a node `v` is the fraction of nodes\n connected to it.\n\n Parameters\n ----------\n G : graph\n A bipartite network\n\n nodes : list or container\n Container with all nodes in one bipartite node set.\n\n Returns\n -------\n centrality : dictionary\n Dictionary keyed by node with bipartite degree centrality as the value.\n\n See Also\n --------\n betweenness_centrality\n closeness_centrality\n :func:`~networkx.algorithms.bipartite.basic.sets`\n :func:`~networkx.algorithms.bipartite.basic.is_bipartite`\n\n Notes\n -----\n The nodes input parameter must contain all nodes in one bipartite node set,\n but the dictionary returned contains all nodes from both bipartite node\n sets. See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n For unipartite networks, the degree centrality values are\n normalized by dividing by the maximum possible degree (which is\n `n-1` where `n` is the number of nodes in G).\n\n In the bipartite case, the maximum possible degree of a node in a\n bipartite node set is the number of nodes in the opposite node set\n [1]_. The degree centrality for a node `v` in the bipartite\n sets `U` with `n` nodes and `V` with `m` nodes is\n\n .. math::\n\n d_{v} = \\frac{deg(v)}{m}, \\mbox{for} v \\in U ,\n\n d_{v} = \\frac{deg(v)}{n}, \\mbox{for} v \\in V ,\n\n\n where `deg(v)` is the degree of node `v`.\n\n References\n ----------\n .. [1] Borgatti, S.P. and Halgin, D. In press. \"Analyzing Affiliation\n Networks\". In Carrington, P. and Scott, J. (eds) The Sage Handbook\n of Social Network Analysis. Sage Publications.\n https://dx.doi.org/10.4135/9781446294413.n28\n \"\"\"\n top = set(nodes)\n bottom = set(G) - top\n s = 1.0 / len(bottom)\n centrality = {n: d * s for n, d in G.degree(top)}\n s = 1.0 / len(top)\n centrality.update({n: d * s for n, d in G.degree(bottom)})\n return centrality\n\n\ndef betweenness_centrality(G, nodes):\n r\"\"\"Compute betweenness centrality for nodes in a bipartite network.\n\n Betweenness centrality of a node `v` is the sum of the\n fraction of all-pairs shortest paths that pass through `v`.\n\n Values of betweenness are normalized by the maximum possible\n value which for bipartite graphs is limited by the relative size\n of the two node sets [1]_.\n\n Let `n` be the number of nodes in the node set `U` and\n `m` be the number of nodes in the node set `V`, then\n nodes in `U` are normalized by dividing by\n\n .. math::\n\n \\frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,\n\n where\n\n .. math::\n\n s = (n - 1) \\div m , t = (n - 1) \\mod m ,\n\n and nodes in `V` are normalized by dividing by\n\n .. math::\n\n \\frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,\n\n where,\n\n .. math::\n\n p = (m - 1) \\div n , r = (m - 1) \\mod n .\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or container\n Container with all nodes in one bipartite node set.\n\n Returns\n -------\n betweenness : dictionary\n Dictionary keyed by node with bipartite betweenness centrality\n as the value.\n\n See Also\n --------\n degree_centrality\n closeness_centrality\n :func:`~networkx.algorithms.bipartite.basic.sets`\n :func:`~networkx.algorithms.bipartite.basic.is_bipartite`\n\n Notes\n -----\n The nodes input parameter must contain all nodes in one bipartite node set,\n but the dictionary returned contains all nodes from both node sets.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Borgatti, S.P. and Halgin, D. In press. \"Analyzing Affiliation\n Networks\". In Carrington, P. and Scott, J. (eds) The Sage Handbook\n of Social Network Analysis. Sage Publications.\n https://dx.doi.org/10.4135/9781446294413.n28\n \"\"\"\n top = set(nodes)\n bottom = set(G) - top\n n = len(top)\n m = len(bottom)\n s, t = divmod(n - 1, m)\n bet_max_top = (\n ((m**2) * ((s + 1) ** 2))\n + (m * (s + 1) * (2 * t - s - 1))\n - (t * ((2 * s) - t + 3))\n ) / 2.0\n p, r = divmod(m - 1, n)\n bet_max_bot = (\n ((n**2) * ((p + 1) ** 2))\n + (n * (p + 1) * (2 * r - p - 1))\n - (r * ((2 * p) - r + 3))\n ) / 2.0\n betweenness = nx.betweenness_centrality(G, normalized=False, weight=None)\n for node in top:\n betweenness[node] /= bet_max_top\n for node in bottom:\n betweenness[node] /= bet_max_bot\n return betweenness\n\n\ndef closeness_centrality(G, nodes, normalized=True):\n r\"\"\"Compute the closeness centrality for nodes in a bipartite network.\n\n The closeness of a node is the distance to all other nodes in the\n graph or in the case that the graph is not connected to all other nodes\n in the connected component containing that node.\n\n Parameters\n ----------\n G : graph\n A bipartite network\n\n nodes : list or container\n Container with all nodes in one bipartite node set.\n\n normalized : bool, optional\n If True (default) normalize by connected component size.\n\n Returns\n -------\n closeness : dictionary\n Dictionary keyed by node with bipartite closeness centrality\n as the value.\n\n See Also\n --------\n betweenness_centrality\n degree_centrality\n :func:`~networkx.algorithms.bipartite.basic.sets`\n :func:`~networkx.algorithms.bipartite.basic.is_bipartite`\n\n Notes\n -----\n The nodes input parameter must contain all nodes in one bipartite node set,\n but the dictionary returned contains all nodes from both node sets.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n Closeness centrality is normalized by the minimum distance possible.\n In the bipartite case the minimum distance for a node in one bipartite\n node set is 1 from all nodes in the other node set and 2 from all\n other nodes in its own set [1]_. Thus the closeness centrality\n for node `v` in the two bipartite sets `U` with\n `n` nodes and `V` with `m` nodes is\n\n .. math::\n\n c_{v} = \\frac{m + 2(n - 1)}{d}, \\mbox{for} v \\in U,\n\n c_{v} = \\frac{n + 2(m - 1)}{d}, \\mbox{for} v \\in V,\n\n where `d` is the sum of the distances from `v` to all\n other nodes.\n\n Higher values of closeness indicate higher centrality.\n\n As in the unipartite case, setting normalized=True causes the\n values to normalized further to n-1 / size(G)-1 where n is the\n number of nodes in the connected part of graph containing the\n node. If the graph is not completely connected, this algorithm\n computes the closeness centrality for each connected part\n separately.\n\n References\n ----------\n .. [1] Borgatti, S.P. and Halgin, D. In press. \"Analyzing Affiliation\n Networks\". In Carrington, P. and Scott, J. (eds) The Sage Handbook\n of Social Network Analysis. Sage Publications.\n https://dx.doi.org/10.4135/9781446294413.n28\n \"\"\"\n closeness = {}\n path_length = nx.single_source_shortest_path_length\n top = set(nodes)\n bottom = set(G) - top\n n = len(top)\n m = len(bottom)\n for node in top:\n sp = dict(path_length(G, node))\n totsp = sum(sp.values())\n if totsp > 0.0 and len(G) > 1:\n closeness[node] = (m + 2 * (n - 1)) / totsp\n if normalized:\n s = (len(sp) - 1) / (len(G) - 1)\n closeness[node] *= s\n else:\n closeness[node] = 0.0\n for node in bottom:\n sp = dict(path_length(G, node))\n totsp = sum(sp.values())\n if totsp > 0.0 and len(G) > 1:\n closeness[node] = (n + 2 * (m - 1)) / totsp\n if normalized:\n s = (len(sp) - 1) / (len(G) - 1)\n closeness[node] *= s\n else:\n closeness[node] = 0.0\n return closeness\n", "path": "networkx/algorithms/bipartite/centrality.py"}]}
| 3,860 | 231 |
gh_patches_debug_4932
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-7011
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OCB mode (AESOCB3) should support nonce sizes up to 15 bytes
[`cryptography.hazmat.primitives.ciphers.aead.AESOCB3._check_params`](https://github.com/pyca/cryptography/blob/d98e0bd355544a28cc36aba3a05d6e44604ac289/src/cryptography/hazmat/primitives/ciphers/aead.py#L281) requires that the nonce size be fixed at 12 bytes, or 96 bits.
However, [RFC 7253 §4.2](https://datatracker.ietf.org/doc/html/rfc7253#section-4.2) suggests that the nonce can go up to 120 bits.
The OpenSSL backend to cryptography doesn't appear to be the source of the limitation: [OpenSSL's OCB interface](https://github.com/openssl/openssl/blob/master/doc/man3/EVP_EncryptInit.pod#gcm-and-ocb-modes) suggests that `EVP_CTRL_AEAD_SET_IVLEN` permits setting a nonce up to 15 bytes (120 bits), in line with RFC 7253. (This has to be done between the first `EVP_CipherInit_ex` (where the `iv` parameter should be `NULL`) and the subsequent `EVP_CipherInit_ex` invocation that sets the `iv`, if i'm understanding the OpenSSL interface correctly.)
The [next revision of the OpenPGP standard](https://datatracker.ietf.org/doc/draft-ietf-openpgp-crypto-refresh/) looks likely to encourage the use of OCB, and is in discussion about [how many bits to use for the nonce](https://gitlab.com/openpgp-wg/rfc4880bis/-/issues/83). If it goes with anything other than 96, any python implementation of OpenPGP (e.g. [pgpy](https://github.com/SecurityInnovation/PGPy/)) would need to write their own OCB wrapper around even lower-level `hazmat` primitives, or use some python module other than `cryptography`.
It would be great if cryptography would be able to accept nonces up to 120 bits (15 bytes) to accomodate this use case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/ciphers/aead.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import os
7 import typing
8
9 from cryptography import exceptions, utils
10 from cryptography.hazmat.backends.openssl import aead
11 from cryptography.hazmat.backends.openssl.backend import backend
12
13
14 class ChaCha20Poly1305:
15 _MAX_SIZE = 2**32
16
17 def __init__(self, key: bytes):
18 if not backend.aead_cipher_supported(self):
19 raise exceptions.UnsupportedAlgorithm(
20 "ChaCha20Poly1305 is not supported by this version of OpenSSL",
21 exceptions._Reasons.UNSUPPORTED_CIPHER,
22 )
23 utils._check_byteslike("key", key)
24
25 if len(key) != 32:
26 raise ValueError("ChaCha20Poly1305 key must be 32 bytes.")
27
28 self._key = key
29
30 @classmethod
31 def generate_key(cls) -> bytes:
32 return os.urandom(32)
33
34 def encrypt(
35 self,
36 nonce: bytes,
37 data: bytes,
38 associated_data: typing.Optional[bytes],
39 ) -> bytes:
40 if associated_data is None:
41 associated_data = b""
42
43 if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
44 # This is OverflowError to match what cffi would raise
45 raise OverflowError(
46 "Data or associated data too long. Max 2**32 bytes"
47 )
48
49 self._check_params(nonce, data, associated_data)
50 return aead._encrypt(backend, self, nonce, data, associated_data, 16)
51
52 def decrypt(
53 self,
54 nonce: bytes,
55 data: bytes,
56 associated_data: typing.Optional[bytes],
57 ) -> bytes:
58 if associated_data is None:
59 associated_data = b""
60
61 self._check_params(nonce, data, associated_data)
62 return aead._decrypt(backend, self, nonce, data, associated_data, 16)
63
64 def _check_params(
65 self,
66 nonce: bytes,
67 data: bytes,
68 associated_data: bytes,
69 ) -> None:
70 utils._check_byteslike("nonce", nonce)
71 utils._check_bytes("data", data)
72 utils._check_bytes("associated_data", associated_data)
73 if len(nonce) != 12:
74 raise ValueError("Nonce must be 12 bytes")
75
76
77 class AESCCM:
78 _MAX_SIZE = 2**32
79
80 def __init__(self, key: bytes, tag_length: int = 16):
81 utils._check_byteslike("key", key)
82 if len(key) not in (16, 24, 32):
83 raise ValueError("AESCCM key must be 128, 192, or 256 bits.")
84
85 self._key = key
86 if not isinstance(tag_length, int):
87 raise TypeError("tag_length must be an integer")
88
89 if tag_length not in (4, 6, 8, 10, 12, 14, 16):
90 raise ValueError("Invalid tag_length")
91
92 self._tag_length = tag_length
93
94 if not backend.aead_cipher_supported(self):
95 raise exceptions.UnsupportedAlgorithm(
96 "AESCCM is not supported by this version of OpenSSL",
97 exceptions._Reasons.UNSUPPORTED_CIPHER,
98 )
99
100 @classmethod
101 def generate_key(cls, bit_length: int) -> bytes:
102 if not isinstance(bit_length, int):
103 raise TypeError("bit_length must be an integer")
104
105 if bit_length not in (128, 192, 256):
106 raise ValueError("bit_length must be 128, 192, or 256")
107
108 return os.urandom(bit_length // 8)
109
110 def encrypt(
111 self,
112 nonce: bytes,
113 data: bytes,
114 associated_data: typing.Optional[bytes],
115 ) -> bytes:
116 if associated_data is None:
117 associated_data = b""
118
119 if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
120 # This is OverflowError to match what cffi would raise
121 raise OverflowError(
122 "Data or associated data too long. Max 2**32 bytes"
123 )
124
125 self._check_params(nonce, data, associated_data)
126 self._validate_lengths(nonce, len(data))
127 return aead._encrypt(
128 backend, self, nonce, data, associated_data, self._tag_length
129 )
130
131 def decrypt(
132 self,
133 nonce: bytes,
134 data: bytes,
135 associated_data: typing.Optional[bytes],
136 ) -> bytes:
137 if associated_data is None:
138 associated_data = b""
139
140 self._check_params(nonce, data, associated_data)
141 return aead._decrypt(
142 backend, self, nonce, data, associated_data, self._tag_length
143 )
144
145 def _validate_lengths(self, nonce: bytes, data_len: int) -> None:
146 # For information about computing this, see
147 # https://tools.ietf.org/html/rfc3610#section-2.1
148 l_val = 15 - len(nonce)
149 if 2 ** (8 * l_val) < data_len:
150 raise ValueError("Data too long for nonce")
151
152 def _check_params(
153 self, nonce: bytes, data: bytes, associated_data: bytes
154 ) -> None:
155 utils._check_byteslike("nonce", nonce)
156 utils._check_bytes("data", data)
157 utils._check_bytes("associated_data", associated_data)
158 if not 7 <= len(nonce) <= 13:
159 raise ValueError("Nonce must be between 7 and 13 bytes")
160
161
162 class AESGCM:
163 _MAX_SIZE = 2**32
164
165 def __init__(self, key: bytes):
166 utils._check_byteslike("key", key)
167 if len(key) not in (16, 24, 32):
168 raise ValueError("AESGCM key must be 128, 192, or 256 bits.")
169
170 self._key = key
171
172 @classmethod
173 def generate_key(cls, bit_length: int) -> bytes:
174 if not isinstance(bit_length, int):
175 raise TypeError("bit_length must be an integer")
176
177 if bit_length not in (128, 192, 256):
178 raise ValueError("bit_length must be 128, 192, or 256")
179
180 return os.urandom(bit_length // 8)
181
182 def encrypt(
183 self,
184 nonce: bytes,
185 data: bytes,
186 associated_data: typing.Optional[bytes],
187 ) -> bytes:
188 if associated_data is None:
189 associated_data = b""
190
191 if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
192 # This is OverflowError to match what cffi would raise
193 raise OverflowError(
194 "Data or associated data too long. Max 2**32 bytes"
195 )
196
197 self._check_params(nonce, data, associated_data)
198 return aead._encrypt(backend, self, nonce, data, associated_data, 16)
199
200 def decrypt(
201 self,
202 nonce: bytes,
203 data: bytes,
204 associated_data: typing.Optional[bytes],
205 ) -> bytes:
206 if associated_data is None:
207 associated_data = b""
208
209 self._check_params(nonce, data, associated_data)
210 return aead._decrypt(backend, self, nonce, data, associated_data, 16)
211
212 def _check_params(
213 self,
214 nonce: bytes,
215 data: bytes,
216 associated_data: bytes,
217 ) -> None:
218 utils._check_byteslike("nonce", nonce)
219 utils._check_bytes("data", data)
220 utils._check_bytes("associated_data", associated_data)
221 if len(nonce) < 8 or len(nonce) > 128:
222 raise ValueError("Nonce must be between 8 and 128 bytes")
223
224
225 class AESOCB3:
226 _MAX_SIZE = 2**32
227
228 def __init__(self, key: bytes):
229 utils._check_byteslike("key", key)
230 if len(key) not in (16, 24, 32):
231 raise ValueError("AESOCB3 key must be 128, 192, or 256 bits.")
232
233 self._key = key
234
235 if not backend.aead_cipher_supported(self):
236 raise exceptions.UnsupportedAlgorithm(
237 "OCB3 is not supported by this version of OpenSSL",
238 exceptions._Reasons.UNSUPPORTED_CIPHER,
239 )
240
241 @classmethod
242 def generate_key(cls, bit_length: int) -> bytes:
243 if not isinstance(bit_length, int):
244 raise TypeError("bit_length must be an integer")
245
246 if bit_length not in (128, 192, 256):
247 raise ValueError("bit_length must be 128, 192, or 256")
248
249 return os.urandom(bit_length // 8)
250
251 def encrypt(
252 self,
253 nonce: bytes,
254 data: bytes,
255 associated_data: typing.Optional[bytes],
256 ) -> bytes:
257 if associated_data is None:
258 associated_data = b""
259
260 if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
261 # This is OverflowError to match what cffi would raise
262 raise OverflowError(
263 "Data or associated data too long. Max 2**32 bytes"
264 )
265
266 self._check_params(nonce, data, associated_data)
267 return aead._encrypt(backend, self, nonce, data, associated_data, 16)
268
269 def decrypt(
270 self,
271 nonce: bytes,
272 data: bytes,
273 associated_data: typing.Optional[bytes],
274 ) -> bytes:
275 if associated_data is None:
276 associated_data = b""
277
278 self._check_params(nonce, data, associated_data)
279 return aead._decrypt(backend, self, nonce, data, associated_data, 16)
280
281 def _check_params(
282 self,
283 nonce: bytes,
284 data: bytes,
285 associated_data: bytes,
286 ) -> None:
287 utils._check_byteslike("nonce", nonce)
288 utils._check_bytes("data", data)
289 utils._check_bytes("associated_data", associated_data)
290 if len(nonce) != 12:
291 raise ValueError("Nonce must be 96-bits (12 bytes)")
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/hazmat/primitives/ciphers/aead.py b/src/cryptography/hazmat/primitives/ciphers/aead.py
--- a/src/cryptography/hazmat/primitives/ciphers/aead.py
+++ b/src/cryptography/hazmat/primitives/ciphers/aead.py
@@ -287,5 +287,5 @@
utils._check_byteslike("nonce", nonce)
utils._check_bytes("data", data)
utils._check_bytes("associated_data", associated_data)
- if len(nonce) != 12:
- raise ValueError("Nonce must be 96-bits (12 bytes)")
+ if len(nonce) < 12 or len(nonce) > 15:
+ raise ValueError("Nonce must be between 12 and 15 bytes")
|
{"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/ciphers/aead.py b/src/cryptography/hazmat/primitives/ciphers/aead.py\n--- a/src/cryptography/hazmat/primitives/ciphers/aead.py\n+++ b/src/cryptography/hazmat/primitives/ciphers/aead.py\n@@ -287,5 +287,5 @@\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n- if len(nonce) != 12:\n- raise ValueError(\"Nonce must be 96-bits (12 bytes)\")\n+ if len(nonce) < 12 or len(nonce) > 15:\n+ raise ValueError(\"Nonce must be between 12 and 15 bytes\")\n", "issue": "OCB mode (AESOCB3) should support nonce sizes up to 15 bytes\n[`cryptography.hazmat.primitives.ciphers.aead.AESOCB3._check_params`](https://github.com/pyca/cryptography/blob/d98e0bd355544a28cc36aba3a05d6e44604ac289/src/cryptography/hazmat/primitives/ciphers/aead.py#L281) requires that the nonce size be fixed at 12 bytes, or 96 bits.\r\n\r\nHowever, [RFC 7253 \u00a74.2](https://datatracker.ietf.org/doc/html/rfc7253#section-4.2) suggests that the nonce can go up to 120 bits.\r\n\r\nThe OpenSSL backend to cryptography doesn't appear to be the source of the limitation: [OpenSSL's OCB interface](https://github.com/openssl/openssl/blob/master/doc/man3/EVP_EncryptInit.pod#gcm-and-ocb-modes) suggests that `EVP_CTRL_AEAD_SET_IVLEN` permits setting a nonce up to 15 bytes (120 bits), in line with RFC 7253. (This has to be done between the first `EVP_CipherInit_ex` (where the `iv` parameter should be `NULL`) and the subsequent `EVP_CipherInit_ex` invocation that sets the `iv`, if i'm understanding the OpenSSL interface correctly.)\r\n\r\nThe [next revision of the OpenPGP standard](https://datatracker.ietf.org/doc/draft-ietf-openpgp-crypto-refresh/) looks likely to encourage the use of OCB, and is in discussion about [how many bits to use for the nonce](https://gitlab.com/openpgp-wg/rfc4880bis/-/issues/83). If it goes with anything other than 96, any python implementation of OpenPGP (e.g. [pgpy](https://github.com/SecurityInnovation/PGPy/)) would need to write their own OCB wrapper around even lower-level `hazmat` primitives, or use some python module other than `cryptography`.\r\n\r\nIt would be great if cryptography would be able to accept nonces up to 120 bits (15 bytes) to accomodate this use case.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport os\nimport typing\n\nfrom cryptography import exceptions, utils\nfrom cryptography.hazmat.backends.openssl import aead\nfrom cryptography.hazmat.backends.openssl.backend import backend\n\n\nclass ChaCha20Poly1305:\n _MAX_SIZE = 2**32\n\n def __init__(self, key: bytes):\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"ChaCha20Poly1305 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n utils._check_byteslike(\"key\", key)\n\n if len(key) != 32:\n raise ValueError(\"ChaCha20Poly1305 key must be 32 bytes.\")\n\n self._key = key\n\n @classmethod\n def generate_key(cls) -> bytes:\n return os.urandom(32)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**32 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, associated_data, 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, associated_data, 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) != 12:\n raise ValueError(\"Nonce must be 12 bytes\")\n\n\nclass AESCCM:\n _MAX_SIZE = 2**32\n\n def __init__(self, key: bytes, tag_length: int = 16):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESCCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n if not isinstance(tag_length, int):\n raise TypeError(\"tag_length must be an integer\")\n\n if tag_length not in (4, 6, 8, 10, 12, 14, 16):\n raise ValueError(\"Invalid tag_length\")\n\n self._tag_length = tag_length\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AESCCM is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**32 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n self._validate_lengths(nonce, len(data))\n return aead._encrypt(\n backend, self, nonce, data, associated_data, self._tag_length\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(\n backend, self, nonce, data, associated_data, self._tag_length\n )\n\n def _validate_lengths(self, nonce: bytes, data_len: int) -> None:\n # For information about computing this, see\n # https://tools.ietf.org/html/rfc3610#section-2.1\n l_val = 15 - len(nonce)\n if 2 ** (8 * l_val) < data_len:\n raise ValueError(\"Data too long for nonce\")\n\n def _check_params(\n self, nonce: bytes, data: bytes, associated_data: bytes\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if not 7 <= len(nonce) <= 13:\n raise ValueError(\"Nonce must be between 7 and 13 bytes\")\n\n\nclass AESGCM:\n _MAX_SIZE = 2**32\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESGCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**32 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, associated_data, 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, associated_data, 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 8 or len(nonce) > 128:\n raise ValueError(\"Nonce must be between 8 and 128 bytes\")\n\n\nclass AESOCB3:\n _MAX_SIZE = 2**32\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESOCB3 key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"OCB3 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**32 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, associated_data, 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, associated_data, 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) != 12:\n raise ValueError(\"Nonce must be 96-bits (12 bytes)\")\n", "path": "src/cryptography/hazmat/primitives/ciphers/aead.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport os\nimport typing\n\nfrom cryptography import exceptions, utils\nfrom cryptography.hazmat.backends.openssl import aead\nfrom cryptography.hazmat.backends.openssl.backend import backend\n\n\nclass ChaCha20Poly1305:\n _MAX_SIZE = 2**32\n\n def __init__(self, key: bytes):\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"ChaCha20Poly1305 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n utils._check_byteslike(\"key\", key)\n\n if len(key) != 32:\n raise ValueError(\"ChaCha20Poly1305 key must be 32 bytes.\")\n\n self._key = key\n\n @classmethod\n def generate_key(cls) -> bytes:\n return os.urandom(32)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**32 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, associated_data, 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, associated_data, 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) != 12:\n raise ValueError(\"Nonce must be 12 bytes\")\n\n\nclass AESCCM:\n _MAX_SIZE = 2**32\n\n def __init__(self, key: bytes, tag_length: int = 16):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESCCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n if not isinstance(tag_length, int):\n raise TypeError(\"tag_length must be an integer\")\n\n if tag_length not in (4, 6, 8, 10, 12, 14, 16):\n raise ValueError(\"Invalid tag_length\")\n\n self._tag_length = tag_length\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AESCCM is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**32 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n self._validate_lengths(nonce, len(data))\n return aead._encrypt(\n backend, self, nonce, data, associated_data, self._tag_length\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(\n backend, self, nonce, data, associated_data, self._tag_length\n )\n\n def _validate_lengths(self, nonce: bytes, data_len: int) -> None:\n # For information about computing this, see\n # https://tools.ietf.org/html/rfc3610#section-2.1\n l_val = 15 - len(nonce)\n if 2 ** (8 * l_val) < data_len:\n raise ValueError(\"Data too long for nonce\")\n\n def _check_params(\n self, nonce: bytes, data: bytes, associated_data: bytes\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if not 7 <= len(nonce) <= 13:\n raise ValueError(\"Nonce must be between 7 and 13 bytes\")\n\n\nclass AESGCM:\n _MAX_SIZE = 2**32\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESGCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**32 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, associated_data, 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, associated_data, 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 8 or len(nonce) > 128:\n raise ValueError(\"Nonce must be between 8 and 128 bytes\")\n\n\nclass AESOCB3:\n _MAX_SIZE = 2**32\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESOCB3 key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"OCB3 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**32 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, associated_data, 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, associated_data, 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 12 or len(nonce) > 15:\n raise ValueError(\"Nonce must be between 12 and 15 bytes\")\n", "path": "src/cryptography/hazmat/primitives/ciphers/aead.py"}]}
| 3,896 | 180 |
gh_patches_debug_12420
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-6173
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Failed to pass storage_options parameter to the to_csv function of PandasOnUnidistIO class with fsspec
Similar to #6097.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """The module holds the factory which performs I/O using pandas on unidist."""
15
16 import io
17
18 import pandas
19
20 from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
21 from modin.core.execution.unidist.generic.io import UnidistIO
22 from modin.core.io import (
23 CSVDispatcher,
24 FWFDispatcher,
25 JSONDispatcher,
26 ParquetDispatcher,
27 FeatherDispatcher,
28 SQLDispatcher,
29 ExcelDispatcher,
30 )
31 from modin.core.storage_formats.pandas.parsers import (
32 PandasCSVParser,
33 PandasFWFParser,
34 PandasJSONParser,
35 PandasParquetParser,
36 PandasFeatherParser,
37 PandasSQLParser,
38 PandasExcelParser,
39 )
40 from modin.core.execution.unidist.common import UnidistWrapper, SignalActor
41 from ..dataframe import PandasOnUnidistDataframe
42 from ..partitioning import PandasOnUnidistDataframePartition
43
44
45 class PandasOnUnidistIO(UnidistIO):
46 """Factory providing methods for performing I/O operations using pandas as storage format on unidist as engine."""
47
48 frame_cls = PandasOnUnidistDataframe
49 query_compiler_cls = PandasQueryCompiler
50 build_args = dict(
51 frame_partition_cls=PandasOnUnidistDataframePartition,
52 query_compiler_cls=PandasQueryCompiler,
53 frame_cls=PandasOnUnidistDataframe,
54 base_io=UnidistIO,
55 )
56
57 def __make_read(*classes, build_args=build_args):
58 # used to reduce code duplication
59 return type("", (UnidistWrapper, *classes), build_args).read
60
61 def __make_write(*classes, build_args=build_args):
62 # used to reduce code duplication
63 return type("", (UnidistWrapper, *classes), build_args).write
64
65 read_csv = __make_read(PandasCSVParser, CSVDispatcher)
66 read_fwf = __make_read(PandasFWFParser, FWFDispatcher)
67 read_json = __make_read(PandasJSONParser, JSONDispatcher)
68 read_parquet = __make_read(PandasParquetParser, ParquetDispatcher)
69 to_parquet = __make_write(ParquetDispatcher)
70 # Blocked on pandas-dev/pandas#12236. It is faster to default to pandas.
71 # read_hdf = __make_read(PandasHDFParser, HDFReader)
72 read_feather = __make_read(PandasFeatherParser, FeatherDispatcher)
73 read_sql = __make_read(PandasSQLParser, SQLDispatcher)
74 to_sql = __make_write(SQLDispatcher)
75 read_excel = __make_read(PandasExcelParser, ExcelDispatcher)
76
77 del __make_read # to not pollute class namespace
78 del __make_write # to not pollute class namespace
79
80 @staticmethod
81 def _to_csv_check_support(kwargs):
82 """
83 Check if parallel version of ``to_csv`` could be used.
84
85 Parameters
86 ----------
87 kwargs : dict
88 Keyword arguments passed to ``.to_csv()``.
89
90 Returns
91 -------
92 bool
93 Whether parallel version of ``to_csv`` is applicable.
94 """
95 path_or_buf = kwargs["path_or_buf"]
96 compression = kwargs["compression"]
97 if not isinstance(path_or_buf, str):
98 return False
99 # case when the pointer is placed at the beginning of the file.
100 if "r" in kwargs["mode"] and "+" in kwargs["mode"]:
101 return False
102 # encodings with BOM don't support;
103 # instead of one mark in result bytes we will have them by the number of partitions
104 # so we should fallback in pandas for `utf-16`, `utf-32` with all aliases, in instance
105 # (`utf_32_be`, `utf_16_le` and so on)
106 if kwargs["encoding"] is not None:
107 encoding = kwargs["encoding"].lower()
108 if "u" in encoding or "utf" in encoding:
109 if "16" in encoding or "32" in encoding:
110 return False
111 if compression is None or not compression == "infer":
112 return False
113 if any((path_or_buf.endswith(ext) for ext in [".gz", ".bz2", ".zip", ".xz"])):
114 return False
115 return True
116
117 @classmethod
118 def to_csv(cls, qc, **kwargs):
119 """
120 Write records stored in the `qc` to a CSV file.
121
122 Parameters
123 ----------
124 qc : BaseQueryCompiler
125 The query compiler of the Modin dataframe that we want to run ``to_csv`` on.
126 **kwargs : dict
127 Parameters for ``pandas.to_csv(**kwargs)``.
128 """
129 if not cls._to_csv_check_support(kwargs):
130 return UnidistIO.to_csv(qc, **kwargs)
131
132 signals = SignalActor.remote(len(qc._modin_frame._partitions) + 1)
133
134 def func(df, **kw): # pragma: no cover
135 """
136 Dump a chunk of rows as csv, then save them to target maintaining order.
137
138 Parameters
139 ----------
140 df : pandas.DataFrame
141 A chunk of rows to write to a CSV file.
142 **kw : dict
143 Arguments to pass to ``pandas.to_csv(**kw)`` plus an extra argument
144 `partition_idx` serving as chunk index to maintain rows order.
145 """
146 partition_idx = kw["partition_idx"]
147 # the copy is made to not implicitly change the input parameters;
148 # to write to an intermediate buffer, we need to change `path_or_buf` in kwargs
149 csv_kwargs = kwargs.copy()
150 if partition_idx != 0:
151 # we need to create a new file only for first recording
152 # all the rest should be recorded in appending mode
153 if "w" in csv_kwargs["mode"]:
154 csv_kwargs["mode"] = csv_kwargs["mode"].replace("w", "a")
155 # It is enough to write the header for the first partition
156 csv_kwargs["header"] = False
157
158 # for parallelization purposes, each partition is written to an intermediate buffer
159 path_or_buf = csv_kwargs["path_or_buf"]
160 is_binary = "b" in csv_kwargs["mode"]
161 csv_kwargs["path_or_buf"] = io.BytesIO() if is_binary else io.StringIO()
162 df.to_csv(**csv_kwargs)
163 content = csv_kwargs["path_or_buf"].getvalue()
164 csv_kwargs["path_or_buf"].close()
165
166 # each process waits for its turn to write to a file
167 UnidistWrapper.materialize(signals.wait.remote(partition_idx))
168
169 # preparing to write data from the buffer to a file
170 with pandas.io.common.get_handle(
171 path_or_buf,
172 # in case when using URL in implicit text mode
173 # pandas try to open `path_or_buf` in binary mode
174 csv_kwargs["mode"] if is_binary else csv_kwargs["mode"] + "t",
175 encoding=kwargs["encoding"],
176 errors=kwargs["errors"],
177 compression=kwargs["compression"],
178 storage_options=kwargs.get("storage_options", None),
179 is_text=not is_binary,
180 ) as handles:
181 handles.handle.write(content)
182
183 # signal that the next process can start writing to the file
184 UnidistWrapper.materialize(signals.send.remote(partition_idx + 1))
185 # used for synchronization purposes
186 return pandas.DataFrame()
187
188 # signaling that the partition with id==0 can be written to the file
189 UnidistWrapper.materialize(signals.send.remote(0))
190 # Ensure that the metadata is syncrhonized
191 qc._modin_frame._propagate_index_objs(axis=None)
192 result = qc._modin_frame._partition_mgr_cls.map_axis_partitions(
193 axis=1,
194 partitions=qc._modin_frame._partitions,
195 map_func=func,
196 keep_partitioning=True,
197 lengths=None,
198 enumerate_partitions=True,
199 max_retries=0,
200 )
201 # pending completion
202 UnidistWrapper.materialize(
203 [part.list_of_blocks[0] for row in result for part in row]
204 )
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py b/modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py
--- a/modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py
+++ b/modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py
@@ -159,7 +159,9 @@
path_or_buf = csv_kwargs["path_or_buf"]
is_binary = "b" in csv_kwargs["mode"]
csv_kwargs["path_or_buf"] = io.BytesIO() if is_binary else io.StringIO()
+ storage_options = csv_kwargs.pop("storage_options", None)
df.to_csv(**csv_kwargs)
+ csv_kwargs.update({"storage_options": storage_options})
content = csv_kwargs["path_or_buf"].getvalue()
csv_kwargs["path_or_buf"].close()
|
{"golden_diff": "diff --git a/modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py b/modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py\n--- a/modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py\n+++ b/modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py\n@@ -159,7 +159,9 @@\n path_or_buf = csv_kwargs[\"path_or_buf\"]\n is_binary = \"b\" in csv_kwargs[\"mode\"]\n csv_kwargs[\"path_or_buf\"] = io.BytesIO() if is_binary else io.StringIO()\n+ storage_options = csv_kwargs.pop(\"storage_options\", None)\n df.to_csv(**csv_kwargs)\n+ csv_kwargs.update({\"storage_options\": storage_options})\n content = csv_kwargs[\"path_or_buf\"].getvalue()\n csv_kwargs[\"path_or_buf\"].close()\n", "issue": "BUG: Failed to pass storage_options parameter to the to_csv function of PandasOnUnidistIO class with fsspec\nSimilar to #6097.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"The module holds the factory which performs I/O using pandas on unidist.\"\"\"\n\nimport io\n\nimport pandas\n\nfrom modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler\nfrom modin.core.execution.unidist.generic.io import UnidistIO\nfrom modin.core.io import (\n CSVDispatcher,\n FWFDispatcher,\n JSONDispatcher,\n ParquetDispatcher,\n FeatherDispatcher,\n SQLDispatcher,\n ExcelDispatcher,\n)\nfrom modin.core.storage_formats.pandas.parsers import (\n PandasCSVParser,\n PandasFWFParser,\n PandasJSONParser,\n PandasParquetParser,\n PandasFeatherParser,\n PandasSQLParser,\n PandasExcelParser,\n)\nfrom modin.core.execution.unidist.common import UnidistWrapper, SignalActor\nfrom ..dataframe import PandasOnUnidistDataframe\nfrom ..partitioning import PandasOnUnidistDataframePartition\n\n\nclass PandasOnUnidistIO(UnidistIO):\n \"\"\"Factory providing methods for performing I/O operations using pandas as storage format on unidist as engine.\"\"\"\n\n frame_cls = PandasOnUnidistDataframe\n query_compiler_cls = PandasQueryCompiler\n build_args = dict(\n frame_partition_cls=PandasOnUnidistDataframePartition,\n query_compiler_cls=PandasQueryCompiler,\n frame_cls=PandasOnUnidistDataframe,\n base_io=UnidistIO,\n )\n\n def __make_read(*classes, build_args=build_args):\n # used to reduce code duplication\n return type(\"\", (UnidistWrapper, *classes), build_args).read\n\n def __make_write(*classes, build_args=build_args):\n # used to reduce code duplication\n return type(\"\", (UnidistWrapper, *classes), build_args).write\n\n read_csv = __make_read(PandasCSVParser, CSVDispatcher)\n read_fwf = __make_read(PandasFWFParser, FWFDispatcher)\n read_json = __make_read(PandasJSONParser, JSONDispatcher)\n read_parquet = __make_read(PandasParquetParser, ParquetDispatcher)\n to_parquet = __make_write(ParquetDispatcher)\n # Blocked on pandas-dev/pandas#12236. It is faster to default to pandas.\n # read_hdf = __make_read(PandasHDFParser, HDFReader)\n read_feather = __make_read(PandasFeatherParser, FeatherDispatcher)\n read_sql = __make_read(PandasSQLParser, SQLDispatcher)\n to_sql = __make_write(SQLDispatcher)\n read_excel = __make_read(PandasExcelParser, ExcelDispatcher)\n\n del __make_read # to not pollute class namespace\n del __make_write # to not pollute class namespace\n\n @staticmethod\n def _to_csv_check_support(kwargs):\n \"\"\"\n Check if parallel version of ``to_csv`` could be used.\n\n Parameters\n ----------\n kwargs : dict\n Keyword arguments passed to ``.to_csv()``.\n\n Returns\n -------\n bool\n Whether parallel version of ``to_csv`` is applicable.\n \"\"\"\n path_or_buf = kwargs[\"path_or_buf\"]\n compression = kwargs[\"compression\"]\n if not isinstance(path_or_buf, str):\n return False\n # case when the pointer is placed at the beginning of the file.\n if \"r\" in kwargs[\"mode\"] and \"+\" in kwargs[\"mode\"]:\n return False\n # encodings with BOM don't support;\n # instead of one mark in result bytes we will have them by the number of partitions\n # so we should fallback in pandas for `utf-16`, `utf-32` with all aliases, in instance\n # (`utf_32_be`, `utf_16_le` and so on)\n if kwargs[\"encoding\"] is not None:\n encoding = kwargs[\"encoding\"].lower()\n if \"u\" in encoding or \"utf\" in encoding:\n if \"16\" in encoding or \"32\" in encoding:\n return False\n if compression is None or not compression == \"infer\":\n return False\n if any((path_or_buf.endswith(ext) for ext in [\".gz\", \".bz2\", \".zip\", \".xz\"])):\n return False\n return True\n\n @classmethod\n def to_csv(cls, qc, **kwargs):\n \"\"\"\n Write records stored in the `qc` to a CSV file.\n\n Parameters\n ----------\n qc : BaseQueryCompiler\n The query compiler of the Modin dataframe that we want to run ``to_csv`` on.\n **kwargs : dict\n Parameters for ``pandas.to_csv(**kwargs)``.\n \"\"\"\n if not cls._to_csv_check_support(kwargs):\n return UnidistIO.to_csv(qc, **kwargs)\n\n signals = SignalActor.remote(len(qc._modin_frame._partitions) + 1)\n\n def func(df, **kw): # pragma: no cover\n \"\"\"\n Dump a chunk of rows as csv, then save them to target maintaining order.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A chunk of rows to write to a CSV file.\n **kw : dict\n Arguments to pass to ``pandas.to_csv(**kw)`` plus an extra argument\n `partition_idx` serving as chunk index to maintain rows order.\n \"\"\"\n partition_idx = kw[\"partition_idx\"]\n # the copy is made to not implicitly change the input parameters;\n # to write to an intermediate buffer, we need to change `path_or_buf` in kwargs\n csv_kwargs = kwargs.copy()\n if partition_idx != 0:\n # we need to create a new file only for first recording\n # all the rest should be recorded in appending mode\n if \"w\" in csv_kwargs[\"mode\"]:\n csv_kwargs[\"mode\"] = csv_kwargs[\"mode\"].replace(\"w\", \"a\")\n # It is enough to write the header for the first partition\n csv_kwargs[\"header\"] = False\n\n # for parallelization purposes, each partition is written to an intermediate buffer\n path_or_buf = csv_kwargs[\"path_or_buf\"]\n is_binary = \"b\" in csv_kwargs[\"mode\"]\n csv_kwargs[\"path_or_buf\"] = io.BytesIO() if is_binary else io.StringIO()\n df.to_csv(**csv_kwargs)\n content = csv_kwargs[\"path_or_buf\"].getvalue()\n csv_kwargs[\"path_or_buf\"].close()\n\n # each process waits for its turn to write to a file\n UnidistWrapper.materialize(signals.wait.remote(partition_idx))\n\n # preparing to write data from the buffer to a file\n with pandas.io.common.get_handle(\n path_or_buf,\n # in case when using URL in implicit text mode\n # pandas try to open `path_or_buf` in binary mode\n csv_kwargs[\"mode\"] if is_binary else csv_kwargs[\"mode\"] + \"t\",\n encoding=kwargs[\"encoding\"],\n errors=kwargs[\"errors\"],\n compression=kwargs[\"compression\"],\n storage_options=kwargs.get(\"storage_options\", None),\n is_text=not is_binary,\n ) as handles:\n handles.handle.write(content)\n\n # signal that the next process can start writing to the file\n UnidistWrapper.materialize(signals.send.remote(partition_idx + 1))\n # used for synchronization purposes\n return pandas.DataFrame()\n\n # signaling that the partition with id==0 can be written to the file\n UnidistWrapper.materialize(signals.send.remote(0))\n # Ensure that the metadata is syncrhonized\n qc._modin_frame._propagate_index_objs(axis=None)\n result = qc._modin_frame._partition_mgr_cls.map_axis_partitions(\n axis=1,\n partitions=qc._modin_frame._partitions,\n map_func=func,\n keep_partitioning=True,\n lengths=None,\n enumerate_partitions=True,\n max_retries=0,\n )\n # pending completion\n UnidistWrapper.materialize(\n [part.list_of_blocks[0] for row in result for part in row]\n )\n", "path": "modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"The module holds the factory which performs I/O using pandas on unidist.\"\"\"\n\nimport io\n\nimport pandas\n\nfrom modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler\nfrom modin.core.execution.unidist.generic.io import UnidistIO\nfrom modin.core.io import (\n CSVDispatcher,\n FWFDispatcher,\n JSONDispatcher,\n ParquetDispatcher,\n FeatherDispatcher,\n SQLDispatcher,\n ExcelDispatcher,\n)\nfrom modin.core.storage_formats.pandas.parsers import (\n PandasCSVParser,\n PandasFWFParser,\n PandasJSONParser,\n PandasParquetParser,\n PandasFeatherParser,\n PandasSQLParser,\n PandasExcelParser,\n)\nfrom modin.core.execution.unidist.common import UnidistWrapper, SignalActor\nfrom ..dataframe import PandasOnUnidistDataframe\nfrom ..partitioning import PandasOnUnidistDataframePartition\n\n\nclass PandasOnUnidistIO(UnidistIO):\n \"\"\"Factory providing methods for performing I/O operations using pandas as storage format on unidist as engine.\"\"\"\n\n frame_cls = PandasOnUnidistDataframe\n query_compiler_cls = PandasQueryCompiler\n build_args = dict(\n frame_partition_cls=PandasOnUnidistDataframePartition,\n query_compiler_cls=PandasQueryCompiler,\n frame_cls=PandasOnUnidistDataframe,\n base_io=UnidistIO,\n )\n\n def __make_read(*classes, build_args=build_args):\n # used to reduce code duplication\n return type(\"\", (UnidistWrapper, *classes), build_args).read\n\n def __make_write(*classes, build_args=build_args):\n # used to reduce code duplication\n return type(\"\", (UnidistWrapper, *classes), build_args).write\n\n read_csv = __make_read(PandasCSVParser, CSVDispatcher)\n read_fwf = __make_read(PandasFWFParser, FWFDispatcher)\n read_json = __make_read(PandasJSONParser, JSONDispatcher)\n read_parquet = __make_read(PandasParquetParser, ParquetDispatcher)\n to_parquet = __make_write(ParquetDispatcher)\n # Blocked on pandas-dev/pandas#12236. It is faster to default to pandas.\n # read_hdf = __make_read(PandasHDFParser, HDFReader)\n read_feather = __make_read(PandasFeatherParser, FeatherDispatcher)\n read_sql = __make_read(PandasSQLParser, SQLDispatcher)\n to_sql = __make_write(SQLDispatcher)\n read_excel = __make_read(PandasExcelParser, ExcelDispatcher)\n\n del __make_read # to not pollute class namespace\n del __make_write # to not pollute class namespace\n\n @staticmethod\n def _to_csv_check_support(kwargs):\n \"\"\"\n Check if parallel version of ``to_csv`` could be used.\n\n Parameters\n ----------\n kwargs : dict\n Keyword arguments passed to ``.to_csv()``.\n\n Returns\n -------\n bool\n Whether parallel version of ``to_csv`` is applicable.\n \"\"\"\n path_or_buf = kwargs[\"path_or_buf\"]\n compression = kwargs[\"compression\"]\n if not isinstance(path_or_buf, str):\n return False\n # case when the pointer is placed at the beginning of the file.\n if \"r\" in kwargs[\"mode\"] and \"+\" in kwargs[\"mode\"]:\n return False\n # encodings with BOM don't support;\n # instead of one mark in result bytes we will have them by the number of partitions\n # so we should fallback in pandas for `utf-16`, `utf-32` with all aliases, in instance\n # (`utf_32_be`, `utf_16_le` and so on)\n if kwargs[\"encoding\"] is not None:\n encoding = kwargs[\"encoding\"].lower()\n if \"u\" in encoding or \"utf\" in encoding:\n if \"16\" in encoding or \"32\" in encoding:\n return False\n if compression is None or not compression == \"infer\":\n return False\n if any((path_or_buf.endswith(ext) for ext in [\".gz\", \".bz2\", \".zip\", \".xz\"])):\n return False\n return True\n\n @classmethod\n def to_csv(cls, qc, **kwargs):\n \"\"\"\n Write records stored in the `qc` to a CSV file.\n\n Parameters\n ----------\n qc : BaseQueryCompiler\n The query compiler of the Modin dataframe that we want to run ``to_csv`` on.\n **kwargs : dict\n Parameters for ``pandas.to_csv(**kwargs)``.\n \"\"\"\n if not cls._to_csv_check_support(kwargs):\n return UnidistIO.to_csv(qc, **kwargs)\n\n signals = SignalActor.remote(len(qc._modin_frame._partitions) + 1)\n\n def func(df, **kw): # pragma: no cover\n \"\"\"\n Dump a chunk of rows as csv, then save them to target maintaining order.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A chunk of rows to write to a CSV file.\n **kw : dict\n Arguments to pass to ``pandas.to_csv(**kw)`` plus an extra argument\n `partition_idx` serving as chunk index to maintain rows order.\n \"\"\"\n partition_idx = kw[\"partition_idx\"]\n # the copy is made to not implicitly change the input parameters;\n # to write to an intermediate buffer, we need to change `path_or_buf` in kwargs\n csv_kwargs = kwargs.copy()\n if partition_idx != 0:\n # we need to create a new file only for first recording\n # all the rest should be recorded in appending mode\n if \"w\" in csv_kwargs[\"mode\"]:\n csv_kwargs[\"mode\"] = csv_kwargs[\"mode\"].replace(\"w\", \"a\")\n # It is enough to write the header for the first partition\n csv_kwargs[\"header\"] = False\n\n # for parallelization purposes, each partition is written to an intermediate buffer\n path_or_buf = csv_kwargs[\"path_or_buf\"]\n is_binary = \"b\" in csv_kwargs[\"mode\"]\n csv_kwargs[\"path_or_buf\"] = io.BytesIO() if is_binary else io.StringIO()\n storage_options = csv_kwargs.pop(\"storage_options\", None)\n df.to_csv(**csv_kwargs)\n csv_kwargs.update({\"storage_options\": storage_options})\n content = csv_kwargs[\"path_or_buf\"].getvalue()\n csv_kwargs[\"path_or_buf\"].close()\n\n # each process waits for its turn to write to a file\n UnidistWrapper.materialize(signals.wait.remote(partition_idx))\n\n # preparing to write data from the buffer to a file\n with pandas.io.common.get_handle(\n path_or_buf,\n # in case when using URL in implicit text mode\n # pandas try to open `path_or_buf` in binary mode\n csv_kwargs[\"mode\"] if is_binary else csv_kwargs[\"mode\"] + \"t\",\n encoding=kwargs[\"encoding\"],\n errors=kwargs[\"errors\"],\n compression=kwargs[\"compression\"],\n storage_options=kwargs.get(\"storage_options\", None),\n is_text=not is_binary,\n ) as handles:\n handles.handle.write(content)\n\n # signal that the next process can start writing to the file\n UnidistWrapper.materialize(signals.send.remote(partition_idx + 1))\n # used for synchronization purposes\n return pandas.DataFrame()\n\n # signaling that the partition with id==0 can be written to the file\n UnidistWrapper.materialize(signals.send.remote(0))\n # Ensure that the metadata is syncrhonized\n qc._modin_frame._propagate_index_objs(axis=None)\n result = qc._modin_frame._partition_mgr_cls.map_axis_partitions(\n axis=1,\n partitions=qc._modin_frame._partitions,\n map_func=func,\n keep_partitioning=True,\n lengths=None,\n enumerate_partitions=True,\n max_retries=0,\n )\n # pending completion\n UnidistWrapper.materialize(\n [part.list_of_blocks[0] for row in result for part in row]\n )\n", "path": "modin/core/execution/unidist/implementations/pandas_on_unidist/io/io.py"}]}
| 2,729 | 209 |
gh_patches_debug_30451
|
rasdani/github-patches
|
git_diff
|
bids-standard__pybids-447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update prep_zenodo.py to only count commits in grabbit up to 0.2.6
With #369, we're dropping the grabbit dependency, so changes there will no longer contribute to pybids.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/prep_zenodo.py`
Content:
```
1 #!/usr/bin/env python3
2 import git
3 import json
4 from subprocess import run, PIPE, CalledProcessError
5 from pathlib import Path
6 from tempfile import TemporaryDirectory
7
8
9 def decommify(name):
10 return ' '.join(name.split(', ')[::-1])
11
12
13 # List of repositories whose commits should be counted as contributions
14 codependents = ['https://github.com/grabbles/grabbit.git']
15
16 # Last shablona commit
17 origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'
18
19 git_root = Path(git.Repo('.', search_parent_directories=True).working_dir)
20 zenodo_file = git_root / '.zenodo.json'
21
22 zenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}
23
24 orig_creators = zenodo.get('creators', [])
25 creator_map = {decommify(creator['name']): creator
26 for creator in orig_creators}
27
28 shortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)
29 counts = [line.split('\t', 1)[::-1]
30 for line in shortlog.stdout.decode().split('\n') if line]
31
32 # Get additional commit counts from dependencies
33 with TemporaryDirectory() as tmpdir:
34 tmppath = Path(tmpdir)
35 for repo in codependents:
36 repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])
37 try:
38 clone = run(['git', 'clone', repo, repo_dir], check=True)
39 except CalledProcessError as err:
40 raise RuntimeError("Could not clone {}".format(repo)) from err
41 tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
42 latest_tag = tag.stdout.decode().strip().rsplit('\n', 1)[1]
43 dep_shortlog = run(
44 ['git', '-C', repo_dir, 'shortlog', '-ns', latest_tag],
45 stdout=PIPE)
46 counts.extend(line.split('\t', 1)[::-1]
47 for line in dep_shortlog.stdout.decode().split('\n')
48 if line)
49
50 commit_counts = {}
51 for committer, commits in counts:
52 commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)
53
54 # Stable sort:
55 # Number of commits in reverse order
56 # Ties broken by alphabetical order of first name
57 committers = [committer
58 for committer, _ in sorted(commit_counts.items(),
59 key=lambda x: (-x[1], x[0]))]
60
61 # Tal to the top
62 first_author = 'Tal Yarkoni'
63 if committers[0] != first_author:
64 committers.remove(first_author)
65 committers.insert(0, first_author)
66
67 creators = [
68 creator_map.get(committer, {'name': committer})
69 for committer in committers
70 ]
71
72 zenodo['creators'] = creators
73 zenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True) + '\n')
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py
--- a/tools/prep_zenodo.py
+++ b/tools/prep_zenodo.py
@@ -11,7 +11,7 @@
# List of repositories whose commits should be counted as contributions
-codependents = ['https://github.com/grabbles/grabbit.git']
+codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]
# Last shablona commit
origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'
@@ -33,15 +33,23 @@
with TemporaryDirectory() as tmpdir:
tmppath = Path(tmpdir)
for repo in codependents:
+ try:
+ repo, ref = repo
+ except (TypeError, ValueError):
+ ref = None
repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])
try:
- clone = run(['git', 'clone', repo, repo_dir], check=True)
+ clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)
except CalledProcessError as err:
raise RuntimeError("Could not clone {}".format(repo)) from err
- tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
- latest_tag = tag.stdout.decode().strip().rsplit('\n', 1)[1]
+
+ if ref is None:
+ tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
+ # latest tag
+ ref = tag.stdout.decode().strip().rsplit('\n', 1)[1]
+
dep_shortlog = run(
- ['git', '-C', repo_dir, 'shortlog', '-ns', latest_tag],
+ ['git', '-C', repo_dir, 'shortlog', '-ns', ref],
stdout=PIPE)
counts.extend(line.split('\t', 1)[::-1]
for line in dep_shortlog.stdout.decode().split('\n')
|
{"golden_diff": "diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py\n--- a/tools/prep_zenodo.py\n+++ b/tools/prep_zenodo.py\n@@ -11,7 +11,7 @@\n \n \n # List of repositories whose commits should be counted as contributions\n-codependents = ['https://github.com/grabbles/grabbit.git']\n+codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n \n # Last shablona commit\n origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n@@ -33,15 +33,23 @@\n with TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n+ try:\n+ repo, ref = repo\n+ except (TypeError, ValueError):\n+ ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n- clone = run(['git', 'clone', repo, repo_dir], check=True)\n+ clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n- tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n- latest_tag = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n+\n+ if ref is None:\n+ tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n+ # latest tag\n+ ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n+\n dep_shortlog = run(\n- ['git', '-C', repo_dir, 'shortlog', '-ns', latest_tag],\n+ ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n", "issue": "Update prep_zenodo.py to only count commits in grabbit up to 0.2.6\nWith #369, we're dropping the grabbit dependency, so changes there will no longer contribute to pybids.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n\n# List of repositories whose commits should be counted as contributions\ncodependents = ['https://github.com/grabbles/grabbit.git']\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n latest_tag = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', latest_tag],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True) + '\\n')\n", "path": "tools/prep_zenodo.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n\n# List of repositories whose commits should be counted as contributions\ncodependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n try:\n repo, ref = repo\n except (TypeError, ValueError):\n ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n\n if ref is None:\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n # latest tag\n ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True) + '\\n')\n", "path": "tools/prep_zenodo.py"}]}
| 1,124 | 484 |
gh_patches_debug_10326
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-text-361
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Guardian: Fix failing tests on Travis
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.3.0
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.15.dev
##### Expected behavior
Tests pass.
##### Actual behavior
Guardian tests is failing.
##### Steps to reproduce the behavior
##### Additional info (worksheets, data, screenshots, ...)
Fix tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/guardian.py`
Content:
```
1 """ This module fetches data from The Guardian API.
2
3 To use first create :class:`TheGuardianCredentials`:
4
5 >>> from orangecontrib.text.guardian import TheGuardianCredentials
6 >>> credentials = TheGuardianCredentials('<your-api-key>')
7
8 Then create :class:`TheGuardianAPI` object and use it for searching:
9
10 >>> from orangecontrib.text.guardian import TheGuardianAPI
11 >>> api = TheGuardianAPI(credentials)
12 >>> corpus = api.search('Slovenia', max_documents=10)
13 >>> len(corpus)
14 10
15
16 """
17
18 import requests
19 import math
20 import json
21
22 from Orange import data
23
24 from orangecontrib.text.corpus import Corpus
25
26
27 BASE_URL = 'http://content.guardianapis.com/search'
28 ARTICLES_PER_PAGE = 10
29
30
31 class TheGuardianCredentials:
32 """ The Guardian API credentials. """
33 def __init__(self, key):
34 """
35 Args:
36 key (str): The Guardian API key. Use `test` for testing purposes.
37 """
38 self.key = key
39
40 @property
41 def valid(self):
42 """ Check if given API key is valid. """
43 response = requests.get(BASE_URL, {'api-key': self.key})
44 return response.status_code != 403 # 403 == Forbidden
45
46 def __eq__(self, other):
47 return self.key == other.key
48
49
50 class TheGuardianAPI:
51 attributes = []
52
53 class_vars = [
54 (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),
55 ]
56
57 tv = data.TimeVariable('Publication Date')
58 metas = [
59 (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),
60 (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),
61 (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),
62 (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),
63 (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),
64 (data.DiscreteVariable('Type'), lambda doc: doc['type']),
65 (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),
66 (data.StringVariable('Tags'),
67 lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),
68 (data.StringVariable('URL'), lambda doc: doc['webUrl']),
69 (data.ContinuousVariable('Word Count', number_of_decimals=0),
70 lambda doc: doc['fields']['wordcount']),
71 ]
72
73 text_features = [metas[0][0], metas[1][0]] # Headline + Content
74 title_indices = [-1] # Headline
75
76 def __init__(self, credentials, on_progress=None, should_break=None):
77 """
78 Args:
79 credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.
80 on_progress (callable): Function for progress reporting.
81 should_break (callable): Function for early stopping.
82 """
83 self.per_page = ARTICLES_PER_PAGE
84 self.pages = 0
85 self.credentials = credentials
86 self.on_progress = on_progress or (lambda x, y: None)
87 self.should_break = should_break or (lambda: False)
88
89 self.results = []
90
91 def _search(self, query, from_date, to_date, page=1):
92 data = self._build_query(query, from_date, to_date, page)
93
94 response = requests.get(BASE_URL, data)
95 parsed = json.loads(response.text)
96
97 if page == 1: # store number of pages
98 self.pages = parsed['response']['pages']
99
100 self.results.extend(parsed['response']['results'])
101
102 def _build_query(self, query, from_date=None, to_date=None, page=1):
103 data = {
104 'q': query,
105 'api-key': self.credentials.key,
106 'page': str(page),
107 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',
108 'show-tags': 'all',
109 }
110 if from_date is not None:
111 data['from-date'] = from_date
112 if to_date is not None:
113 data['to-date'] = to_date
114
115 return data
116
117 def search(self, query, from_date=None, to_date=None, max_documents=None,
118 accumulate=False):
119 """
120 Search The Guardian API for articles.
121
122 Args:
123 query (str): A query for searching the articles by
124 from_date (str): Search only articles newer than the date provided.
125 Date should be in ISO format; e.g. '2016-12-31'.
126 to_date (str): Search only articles older than the date provided.
127 Date should be in ISO format; e.g. '2016-12-31'.
128 max_documents (int): Maximum number of documents to retrieve.
129 When not given, retrieve all documents.
130 accumulate (bool): A flag indicating whether to accumulate results
131 of multiple consequent search calls.
132
133 Returns:
134 :ref:`Corpus`
135 """
136 if not accumulate:
137 self.results = []
138
139 self._search(query, from_date, to_date)
140
141 pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages
142 self.on_progress(self.per_page, pages * self.per_page)
143
144 for p in range(2, pages+1): # to one based
145 if self.should_break():
146 break
147 self._search(query, from_date, to_date, p)
148 self.on_progress(p*self.per_page, pages * self.per_page)
149
150 c = Corpus.from_documents(
151 self.results, 'The Guardian', self.attributes, self.class_vars,
152 self.metas, title_indices=self.title_indices)
153 c.text_features = self.text_features
154 return c
155
156
157 if __name__ == '__main__':
158 credentials = TheGuardianCredentials('test')
159 print(credentials.valid)
160 api = TheGuardianAPI(credentials=credentials)
161 c = api.search('refugees', max_documents=10)
162 print(c)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py
--- a/orangecontrib/text/guardian.py
+++ b/orangecontrib/text/guardian.py
@@ -18,6 +18,7 @@
import requests
import math
import json
+import os
from Orange import data
@@ -155,7 +156,8 @@
if __name__ == '__main__':
- credentials = TheGuardianCredentials('test')
+ key = os.getenv('THE_GUARDIAN_API_KEY', 'test')
+ credentials = TheGuardianCredentials(key)
print(credentials.valid)
api = TheGuardianAPI(credentials=credentials)
c = api.search('refugees', max_documents=10)
|
{"golden_diff": "diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py\n--- a/orangecontrib/text/guardian.py\n+++ b/orangecontrib/text/guardian.py\n@@ -18,6 +18,7 @@\n import requests\n import math\n import json\n+import os\n \n from Orange import data\n \n@@ -155,7 +156,8 @@\n \n \n if __name__ == '__main__':\n- credentials = TheGuardianCredentials('test')\n+ key = os.getenv('THE_GUARDIAN_API_KEY', 'test')\n+ credentials = TheGuardianCredentials(key)\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n", "issue": "Guardian: Fix failing tests on Travis\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.3.0\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.15.dev\r\n\r\n##### Expected behavior\r\nTests pass.\r\n\r\n\r\n##### Actual behavior\r\nGuardian tests is failing.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\n\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\nFix tests.\r\n\r\n\n", "before_files": [{"content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code != 403 # 403 == Forbidden\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n credentials = TheGuardianCredentials('test')\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py"}], "after_files": [{"content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\nimport os\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code != 403 # 403 == Forbidden\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n key = os.getenv('THE_GUARDIAN_API_KEY', 'test')\n credentials = TheGuardianCredentials(key)\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py"}]}
| 2,109 | 167 |
gh_patches_debug_33722
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1225
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
redis-py-cluster new API 2.0.0
### Which version of dd-trace-py are you using?
lastest 0.34.0
### Which version of the libraries are you using?
redis-py-cluster 2.0.0
### How can we reproduce your problem?
change https://github.com/DataDog/dd-trace-py/blob/master/tox.ini redis-py-cluster version
### What is the result that you get?
ERROR. It's no more StrictRedisCluster but just RedisCluster
### What is result that you expected?
moving to new api
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/rediscluster/patch.py`
Content:
```
1 # 3p
2 import rediscluster
3 from ddtrace.vendor import wrapt
4
5 # project
6 from ddtrace import config
7 from ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY
8 from ...pin import Pin
9 from ...ext import SpanTypes, redis as redisx
10 from ...utils.wrappers import unwrap
11 from ..redis.patch import traced_execute_command, traced_pipeline
12 from ..redis.util import format_command_args
13
14
15 def patch():
16 """Patch the instrumented methods
17 """
18 if getattr(rediscluster, '_datadog_patch', False):
19 return
20 setattr(rediscluster, '_datadog_patch', True)
21
22 _w = wrapt.wrap_function_wrapper
23 _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)
24 _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)
25 _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)
26 Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)
27
28
29 def unpatch():
30 if getattr(rediscluster, '_datadog_patch', False):
31 setattr(rediscluster, '_datadog_patch', False)
32 unwrap(rediscluster.StrictRedisCluster, 'execute_command')
33 unwrap(rediscluster.StrictRedisCluster, 'pipeline')
34 unwrap(rediscluster.StrictClusterPipeline, 'execute')
35
36
37 #
38 # tracing functions
39 #
40
41 def traced_execute_pipeline(func, instance, args, kwargs):
42 pin = Pin.get_from(instance)
43 if not pin or not pin.enabled():
44 return func(*args, **kwargs)
45
46 cmds = [format_command_args(c.args) for c in instance.command_stack]
47 resource = '\n'.join(cmds)
48 tracer = pin.tracer
49 with tracer.trace(redisx.CMD, resource=resource, service=pin.service, span_type=SpanTypes.REDIS) as s:
50 s.set_tag(SPAN_MEASURED_KEY)
51 s.set_tag(redisx.RAWCMD, resource)
52 s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack))
53
54 # set analytics sample rate if enabled
55 s.set_tag(
56 ANALYTICS_SAMPLE_RATE_KEY,
57 config.rediscluster.get_analytics_sample_rate()
58 )
59
60 return func(*args, **kwargs)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/rediscluster/patch.py b/ddtrace/contrib/rediscluster/patch.py
--- a/ddtrace/contrib/rediscluster/patch.py
+++ b/ddtrace/contrib/rediscluster/patch.py
@@ -12,6 +12,11 @@
from ..redis.util import format_command_args
+# DEV: In `2.0.0` `__version__` is a string and `VERSION` is a tuple,
+# but in `1.x.x` `__version__` is a tuple annd `VERSION` does not exist
+REDISCLUSTER_VERSION = getattr(rediscluster, 'VERSION', rediscluster.__version__)
+
+
def patch():
"""Patch the instrumented methods
"""
@@ -20,18 +25,30 @@
setattr(rediscluster, '_datadog_patch', True)
_w = wrapt.wrap_function_wrapper
- _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)
- _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)
- _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)
- Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)
+ if REDISCLUSTER_VERSION >= (2, 0, 0):
+ _w('rediscluster', 'RedisCluster.execute_command', traced_execute_command)
+ _w('rediscluster', 'RedisCluster.pipeline', traced_pipeline)
+ _w('rediscluster', 'ClusterPipeline.execute', traced_execute_pipeline)
+ Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.RedisCluster)
+ else:
+ _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)
+ _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)
+ _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)
+ Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)
def unpatch():
if getattr(rediscluster, '_datadog_patch', False):
setattr(rediscluster, '_datadog_patch', False)
- unwrap(rediscluster.StrictRedisCluster, 'execute_command')
- unwrap(rediscluster.StrictRedisCluster, 'pipeline')
- unwrap(rediscluster.StrictClusterPipeline, 'execute')
+
+ if REDISCLUSTER_VERSION >= (2, 0, 0):
+ unwrap(rediscluster.RedisCluster, 'execute_command')
+ unwrap(rediscluster.RedisCluster, 'pipeline')
+ unwrap(rediscluster.ClusterPipeline, 'execute')
+ else:
+ unwrap(rediscluster.StrictRedisCluster, 'execute_command')
+ unwrap(rediscluster.StrictRedisCluster, 'pipeline')
+ unwrap(rediscluster.StrictClusterPipeline, 'execute')
#
|
{"golden_diff": "diff --git a/ddtrace/contrib/rediscluster/patch.py b/ddtrace/contrib/rediscluster/patch.py\n--- a/ddtrace/contrib/rediscluster/patch.py\n+++ b/ddtrace/contrib/rediscluster/patch.py\n@@ -12,6 +12,11 @@\n from ..redis.util import format_command_args\n \n \n+# DEV: In `2.0.0` `__version__` is a string and `VERSION` is a tuple,\n+# but in `1.x.x` `__version__` is a tuple annd `VERSION` does not exist\n+REDISCLUSTER_VERSION = getattr(rediscluster, 'VERSION', rediscluster.__version__)\n+\n+\n def patch():\n \"\"\"Patch the instrumented methods\n \"\"\"\n@@ -20,18 +25,30 @@\n setattr(rediscluster, '_datadog_patch', True)\n \n _w = wrapt.wrap_function_wrapper\n- _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)\n- _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)\n- _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)\n- Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)\n+ if REDISCLUSTER_VERSION >= (2, 0, 0):\n+ _w('rediscluster', 'RedisCluster.execute_command', traced_execute_command)\n+ _w('rediscluster', 'RedisCluster.pipeline', traced_pipeline)\n+ _w('rediscluster', 'ClusterPipeline.execute', traced_execute_pipeline)\n+ Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.RedisCluster)\n+ else:\n+ _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)\n+ _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)\n+ _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)\n+ Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)\n \n \n def unpatch():\n if getattr(rediscluster, '_datadog_patch', False):\n setattr(rediscluster, '_datadog_patch', False)\n- unwrap(rediscluster.StrictRedisCluster, 'execute_command')\n- unwrap(rediscluster.StrictRedisCluster, 'pipeline')\n- unwrap(rediscluster.StrictClusterPipeline, 'execute')\n+\n+ if REDISCLUSTER_VERSION >= (2, 0, 0):\n+ unwrap(rediscluster.RedisCluster, 'execute_command')\n+ unwrap(rediscluster.RedisCluster, 'pipeline')\n+ unwrap(rediscluster.ClusterPipeline, 'execute')\n+ else:\n+ unwrap(rediscluster.StrictRedisCluster, 'execute_command')\n+ unwrap(rediscluster.StrictRedisCluster, 'pipeline')\n+ unwrap(rediscluster.StrictClusterPipeline, 'execute')\n \n \n #\n", "issue": "redis-py-cluster new API 2.0.0\n### Which version of dd-trace-py are you using?\r\n lastest 0.34.0\r\n\r\n### Which version of the libraries are you using?\r\n\r\nredis-py-cluster 2.0.0\r\n\r\n### How can we reproduce your problem?\r\n\r\nchange https://github.com/DataDog/dd-trace-py/blob/master/tox.ini redis-py-cluster version\r\n\r\n### What is the result that you get?\r\n\r\nERROR. It's no more StrictRedisCluster but just RedisCluster\r\n\r\n### What is result that you expected?\r\n\r\nmoving to new api\r\n\n", "before_files": [{"content": "# 3p\nimport rediscluster\nfrom ddtrace.vendor import wrapt\n\n# project\nfrom ddtrace import config\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY\nfrom ...pin import Pin\nfrom ...ext import SpanTypes, redis as redisx\nfrom ...utils.wrappers import unwrap\nfrom ..redis.patch import traced_execute_command, traced_pipeline\nfrom ..redis.util import format_command_args\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n \"\"\"\n if getattr(rediscluster, '_datadog_patch', False):\n return\n setattr(rediscluster, '_datadog_patch', True)\n\n _w = wrapt.wrap_function_wrapper\n _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)\n _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)\n _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)\n Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)\n\n\ndef unpatch():\n if getattr(rediscluster, '_datadog_patch', False):\n setattr(rediscluster, '_datadog_patch', False)\n unwrap(rediscluster.StrictRedisCluster, 'execute_command')\n unwrap(rediscluster.StrictRedisCluster, 'pipeline')\n unwrap(rediscluster.StrictClusterPipeline, 'execute')\n\n\n#\n# tracing functions\n#\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [format_command_args(c.args) for c in instance.command_stack]\n resource = '\\n'.join(cmds)\n tracer = pin.tracer\n with tracer.trace(redisx.CMD, resource=resource, service=pin.service, span_type=SpanTypes.REDIS) as s:\n s.set_tag(SPAN_MEASURED_KEY)\n s.set_tag(redisx.RAWCMD, resource)\n s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack))\n\n # set analytics sample rate if enabled\n s.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n config.rediscluster.get_analytics_sample_rate()\n )\n\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/rediscluster/patch.py"}], "after_files": [{"content": "# 3p\nimport rediscluster\nfrom ddtrace.vendor import wrapt\n\n# project\nfrom ddtrace import config\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY\nfrom ...pin import Pin\nfrom ...ext import SpanTypes, redis as redisx\nfrom ...utils.wrappers import unwrap\nfrom ..redis.patch import traced_execute_command, traced_pipeline\nfrom ..redis.util import format_command_args\n\n\n# DEV: In `2.0.0` `__version__` is a string and `VERSION` is a tuple,\n# but in `1.x.x` `__version__` is a tuple annd `VERSION` does not exist\nREDISCLUSTER_VERSION = getattr(rediscluster, 'VERSION', rediscluster.__version__)\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n \"\"\"\n if getattr(rediscluster, '_datadog_patch', False):\n return\n setattr(rediscluster, '_datadog_patch', True)\n\n _w = wrapt.wrap_function_wrapper\n if REDISCLUSTER_VERSION >= (2, 0, 0):\n _w('rediscluster', 'RedisCluster.execute_command', traced_execute_command)\n _w('rediscluster', 'RedisCluster.pipeline', traced_pipeline)\n _w('rediscluster', 'ClusterPipeline.execute', traced_execute_pipeline)\n Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.RedisCluster)\n else:\n _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)\n _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)\n _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)\n Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)\n\n\ndef unpatch():\n if getattr(rediscluster, '_datadog_patch', False):\n setattr(rediscluster, '_datadog_patch', False)\n\n if REDISCLUSTER_VERSION >= (2, 0, 0):\n unwrap(rediscluster.RedisCluster, 'execute_command')\n unwrap(rediscluster.RedisCluster, 'pipeline')\n unwrap(rediscluster.ClusterPipeline, 'execute')\n else:\n unwrap(rediscluster.StrictRedisCluster, 'execute_command')\n unwrap(rediscluster.StrictRedisCluster, 'pipeline')\n unwrap(rediscluster.StrictClusterPipeline, 'execute')\n\n\n#\n# tracing functions\n#\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [format_command_args(c.args) for c in instance.command_stack]\n resource = '\\n'.join(cmds)\n tracer = pin.tracer\n with tracer.trace(redisx.CMD, resource=resource, service=pin.service, span_type=SpanTypes.REDIS) as s:\n s.set_tag(SPAN_MEASURED_KEY)\n s.set_tag(redisx.RAWCMD, resource)\n s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack))\n\n # set analytics sample rate if enabled\n s.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n config.rediscluster.get_analytics_sample_rate()\n )\n\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/rediscluster/patch.py"}]}
| 990 | 613 |
gh_patches_debug_7855
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1540
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: FieldMeta unexpected keyword argument 'constant'
### Description
After going from `polyfactory==2.0.0alpha1` => `2.0.0` I end up with `FieldMeta.__init__() got an unexpected keyword argument 'constant'`
Looks like the example generation for the openapi docs is broken because the `constant` boolean field is removed from 2.0.0
https://github.com/litestar-org/polyfactory/blob/v2.0.0/polyfactory/field_meta.py#L39-L48 (2.0.0)
vs
https://github.com/litestar-org/polyfactory/blob/v2.0.0alpha1/polyfactory/field_meta.py#L12-L21 (2.0.0a1)
And is set by https://github.com/litestar-org/litestar/blob/v2.0.0alpha4/litestar/_openapi/schema_generation/examples.py#L44 (2.0.0a4)
Running on docker `python:3.11-alpine`
### URL to code causing the issue
_No response_
### MCVE
```python
class TestController(Controller):
path = "/test"
@post(
path="/route",
summary="Test Route",
tags=["Test"],
responses={503: ResponseSpec(data_container=ServiceUnavailableModel, description="Device or service unavailable")},
)
async def test_route(self, data: SomeDataModel) -> SomeResponseModel:
return {"test": data}
```
The `responses=` line causes this error.
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
```bash
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 149, in __call__
await self.app(scope, receive, send)
File "/usr/local/lib/python3.11/site-packages/litestar/routes/http.py", line 77, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/routes/http.py", line 129, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/routes/http.py", line 158, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/routes/http.py", line 210, in _get_response_data
data = route_handler.fn.value(**parsed_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/openapi/controller.py", line 221, in root
return Response(content=render_method(request), media_type=MediaType.HTML)
^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/openapi/controller.py", line 397, in render_redoc
schema = self.get_schema_from_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/openapi/controller.py", line 105, in get_schema_from_request
return request.app.openapi_schema
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/app.py", line 510, in openapi_schema
self.update_openapi_schema()
File "/usr/local/lib/python3.11/site-packages/litestar/app.py", line 825, in update_openapi_schema
path_item, created_operation_ids = create_path_item(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/path_item.py", line 125, in create_path_item
responses=create_responses(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/responses.py", line 259, in create_responses
for status_code, response in create_additional_responses(
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/responses.py", line 226, in create_additional_responses
schema = create_schema(
^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py", line 724, in create_schema
result = create_schema_for_pydantic_model(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py", line 541, in create_schema_for_pydantic_model
properties={
^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py", line 542, in <dictcomp>
(f.alias or f.name): create_schema(
^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py", line 769, in create_schema
return _process_schema_result(field=field, schema=result, generate_examples=generate_examples, schemas=schemas)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py", line 680, in _process_schema_result
schema.examples = create_examples_for_field(field=field)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/examples.py", line 60, in create_examples_for_field
field_meta = _create_field_meta(field)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/examples.py", line 41, in _create_field_meta
return FieldMeta(
^^^^^^^^^^
TypeError: FieldMeta.__init__() got an unexpected keyword argument 'constant'
```
### Litestar Version
Litestar 2.0.0a4
polyfactory 2.0.0alpha1 (no error)
polyfactory 2.0.0 (error)
### Platform
- [ ] Linux
- [ ] Mac
- [ ] Windows
- [X] Other (Please specify in the description above)
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/_openapi/schema_generation/examples.py`
Content:
```
1 from __future__ import annotations
2
3 from enum import Enum
4 from typing import TYPE_CHECKING, Any
5
6 from _decimal import Decimal
7 from polyfactory.exceptions import ParameterException
8 from polyfactory.field_meta import FieldMeta, Null
9
10 from litestar.openapi.spec import Example
11 from litestar.types import Empty
12 from litestar.utils import is_pydantic_model_instance
13
14 try:
15 from polyfactory.factories.pydantic_factory import ModelFactory as Factory
16 except ImportError:
17 from polyfactory.factories import DataclassFactory as Factory # type: ignore[assignment]
18
19
20 if TYPE_CHECKING:
21 from litestar._signature.field import SignatureField
22
23
24 def _normalize_example_value(value: Any) -> Any:
25 """Normalize the example value to make it look a bit prettier."""
26 if isinstance(value, (Decimal, float)):
27 value = round(float(value), 2)
28 if isinstance(value, Enum):
29 value = value.value
30 if is_pydantic_model_instance(value):
31 value = value.dict()
32 if isinstance(value, (list, set)):
33 value = [_normalize_example_value(v) for v in value]
34 if isinstance(value, dict):
35 for k, v in value.items():
36 value[k] = _normalize_example_value(v)
37 return value
38
39
40 def _create_field_meta(field: "SignatureField") -> FieldMeta:
41 return FieldMeta(
42 name=field.name,
43 annotation=field.field_type,
44 constant=field.is_const,
45 default=field.default_value if field.default_value is not Empty else Null,
46 children=[_create_field_meta(child) for child in field.children] if field.children else None,
47 )
48
49
50 def create_examples_for_field(field: "SignatureField") -> list["Example"]:
51 """Create an OpenAPI Example instance.
52
53 Args:
54 field: A signature field.
55
56 Returns:
57 A list including a single example.
58 """
59 try:
60 field_meta = _create_field_meta(field)
61 value = _normalize_example_value(Factory.get_field_value(field_meta))
62 return [Example(description=f"Example {field.name} value", value=value)]
63 except ParameterException: # pragma: no cover
64 return []
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/litestar/_openapi/schema_generation/examples.py b/litestar/_openapi/schema_generation/examples.py
--- a/litestar/_openapi/schema_generation/examples.py
+++ b/litestar/_openapi/schema_generation/examples.py
@@ -41,7 +41,7 @@
return FieldMeta(
name=field.name,
annotation=field.field_type,
- constant=field.is_const,
+ constraints={"constant": field.is_const},
default=field.default_value if field.default_value is not Empty else Null,
children=[_create_field_meta(child) for child in field.children] if field.children else None,
)
|
{"golden_diff": "diff --git a/litestar/_openapi/schema_generation/examples.py b/litestar/_openapi/schema_generation/examples.py\n--- a/litestar/_openapi/schema_generation/examples.py\n+++ b/litestar/_openapi/schema_generation/examples.py\n@@ -41,7 +41,7 @@\n return FieldMeta(\n name=field.name,\n annotation=field.field_type,\n- constant=field.is_const,\n+ constraints={\"constant\": field.is_const},\n default=field.default_value if field.default_value is not Empty else Null,\n children=[_create_field_meta(child) for child in field.children] if field.children else None,\n )\n", "issue": "Bug: FieldMeta unexpected keyword argument 'constant'\n### Description\r\n\r\nAfter going from `polyfactory==2.0.0alpha1` => `2.0.0` I end up with `FieldMeta.__init__() got an unexpected keyword argument 'constant'`\r\n\r\nLooks like the example generation for the openapi docs is broken because the `constant` boolean field is removed from 2.0.0\r\n\r\nhttps://github.com/litestar-org/polyfactory/blob/v2.0.0/polyfactory/field_meta.py#L39-L48 (2.0.0)\r\n\r\nvs\r\n\r\nhttps://github.com/litestar-org/polyfactory/blob/v2.0.0alpha1/polyfactory/field_meta.py#L12-L21 (2.0.0a1)\r\n\r\nAnd is set by https://github.com/litestar-org/litestar/blob/v2.0.0alpha4/litestar/_openapi/schema_generation/examples.py#L44 (2.0.0a4)\r\n\r\nRunning on docker `python:3.11-alpine`\r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n```python\r\nclass TestController(Controller):\r\n path = \"/test\"\r\n\r\n @post(\r\n path=\"/route\",\r\n summary=\"Test Route\",\r\n tags=[\"Test\"],\r\n responses={503: ResponseSpec(data_container=ServiceUnavailableModel, description=\"Device or service unavailable\")},\r\n )\r\n async def test_route(self, data: SomeDataModel) -> SomeResponseModel:\r\n return {\"test\": data}\r\n```\r\n\r\nThe `responses=` line causes this error. \r\n\r\n\r\n### Steps to reproduce\r\n\r\n_No response_\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n```bash\r\nTraceback (most recent call last):\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py\", line 149, in __call__\r\n await self.app(scope, receive, send)\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/routes/http.py\", line 77, in handle\r\n response = await self._get_response_for_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/routes/http.py\", line 129, in _get_response_for_request\r\n response = await self._call_handler_function(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/routes/http.py\", line 158, in _call_handler_function\r\n response_data, cleanup_group = await self._get_response_data(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/routes/http.py\", line 210, in _get_response_data\r\n data = route_handler.fn.value(**parsed_kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/openapi/controller.py\", line 221, in root\r\n return Response(content=render_method(request), media_type=MediaType.HTML)\r\n ^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/openapi/controller.py\", line 397, in render_redoc\r\n schema = self.get_schema_from_request(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/openapi/controller.py\", line 105, in get_schema_from_request\r\n return request.app.openapi_schema\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/app.py\", line 510, in openapi_schema\r\n self.update_openapi_schema()\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/app.py\", line 825, in update_openapi_schema\r\n path_item, created_operation_ids = create_path_item(\r\n ^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/path_item.py\", line 125, in create_path_item\r\n responses=create_responses(\r\n ^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/responses.py\", line 259, in create_responses\r\n for status_code, response in create_additional_responses(\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/responses.py\", line 226, in create_additional_responses\r\n schema = create_schema(\r\n ^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py\", line 724, in create_schema\r\n result = create_schema_for_pydantic_model(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py\", line 541, in create_schema_for_pydantic_model\r\n properties={\r\n ^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py\", line 542, in <dictcomp>\r\n (f.alias or f.name): create_schema(\r\n ^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py\", line 769, in create_schema\r\n return _process_schema_result(field=field, schema=result, generate_examples=generate_examples, schemas=schemas)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py\", line 680, in _process_schema_result\r\n schema.examples = create_examples_for_field(field=field)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/examples.py\", line 60, in create_examples_for_field\r\n field_meta = _create_field_meta(field)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFile \"/usr/local/lib/python3.11/site-packages/litestar/_openapi/schema_generation/examples.py\", line 41, in _create_field_meta\r\n return FieldMeta(\r\n ^^^^^^^^^^\r\nTypeError: FieldMeta.__init__() got an unexpected keyword argument 'constant'\r\n```\r\n\r\n\r\n### Litestar Version\r\n\r\nLitestar 2.0.0a4\r\npolyfactory 2.0.0alpha1 (no error)\r\npolyfactory 2.0.0 (error)\r\n\r\n### Platform\r\n\r\n- [ ] Linux\r\n- [ ] Mac\r\n- [ ] Windows\r\n- [X] Other (Please specify in the description above)\nStaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any\n\nfrom _decimal import Decimal\nfrom polyfactory.exceptions import ParameterException\nfrom polyfactory.field_meta import FieldMeta, Null\n\nfrom litestar.openapi.spec import Example\nfrom litestar.types import Empty\nfrom litestar.utils import is_pydantic_model_instance\n\ntry:\n from polyfactory.factories.pydantic_factory import ModelFactory as Factory\nexcept ImportError:\n from polyfactory.factories import DataclassFactory as Factory # type: ignore[assignment]\n\n\nif TYPE_CHECKING:\n from litestar._signature.field import SignatureField\n\n\ndef _normalize_example_value(value: Any) -> Any:\n \"\"\"Normalize the example value to make it look a bit prettier.\"\"\"\n if isinstance(value, (Decimal, float)):\n value = round(float(value), 2)\n if isinstance(value, Enum):\n value = value.value\n if is_pydantic_model_instance(value):\n value = value.dict()\n if isinstance(value, (list, set)):\n value = [_normalize_example_value(v) for v in value]\n if isinstance(value, dict):\n for k, v in value.items():\n value[k] = _normalize_example_value(v)\n return value\n\n\ndef _create_field_meta(field: \"SignatureField\") -> FieldMeta:\n return FieldMeta(\n name=field.name,\n annotation=field.field_type,\n constant=field.is_const,\n default=field.default_value if field.default_value is not Empty else Null,\n children=[_create_field_meta(child) for child in field.children] if field.children else None,\n )\n\n\ndef create_examples_for_field(field: \"SignatureField\") -> list[\"Example\"]:\n \"\"\"Create an OpenAPI Example instance.\n\n Args:\n field: A signature field.\n\n Returns:\n A list including a single example.\n \"\"\"\n try:\n field_meta = _create_field_meta(field)\n value = _normalize_example_value(Factory.get_field_value(field_meta))\n return [Example(description=f\"Example {field.name} value\", value=value)]\n except ParameterException: # pragma: no cover\n return []\n", "path": "litestar/_openapi/schema_generation/examples.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any\n\nfrom _decimal import Decimal\nfrom polyfactory.exceptions import ParameterException\nfrom polyfactory.field_meta import FieldMeta, Null\n\nfrom litestar.openapi.spec import Example\nfrom litestar.types import Empty\nfrom litestar.utils import is_pydantic_model_instance\n\ntry:\n from polyfactory.factories.pydantic_factory import ModelFactory as Factory\nexcept ImportError:\n from polyfactory.factories import DataclassFactory as Factory # type: ignore[assignment]\n\n\nif TYPE_CHECKING:\n from litestar._signature.field import SignatureField\n\n\ndef _normalize_example_value(value: Any) -> Any:\n \"\"\"Normalize the example value to make it look a bit prettier.\"\"\"\n if isinstance(value, (Decimal, float)):\n value = round(float(value), 2)\n if isinstance(value, Enum):\n value = value.value\n if is_pydantic_model_instance(value):\n value = value.dict()\n if isinstance(value, (list, set)):\n value = [_normalize_example_value(v) for v in value]\n if isinstance(value, dict):\n for k, v in value.items():\n value[k] = _normalize_example_value(v)\n return value\n\n\ndef _create_field_meta(field: \"SignatureField\") -> FieldMeta:\n return FieldMeta(\n name=field.name,\n annotation=field.field_type,\n constraints={\"constant\": field.is_const},\n default=field.default_value if field.default_value is not Empty else Null,\n children=[_create_field_meta(child) for child in field.children] if field.children else None,\n )\n\n\ndef create_examples_for_field(field: \"SignatureField\") -> list[\"Example\"]:\n \"\"\"Create an OpenAPI Example instance.\n\n Args:\n field: A signature field.\n\n Returns:\n A list including a single example.\n \"\"\"\n try:\n field_meta = _create_field_meta(field)\n value = _normalize_example_value(Factory.get_field_value(field_meta))\n return [Example(description=f\"Example {field.name} value\", value=value)]\n except ParameterException: # pragma: no cover\n return []\n", "path": "litestar/_openapi/schema_generation/examples.py"}]}
| 2,502 | 139 |
gh_patches_debug_17728
|
rasdani/github-patches
|
git_diff
|
conda__conda-build-1641
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Encoding issues on Windows build in the fix_staged_scripts step
Hello.
I'm trying to build the [waf package on windows](https://github.com/conda-forge/waf-feedstock/pull/4), but I'm having some trouble with ```conda_build``` in the ```fix_staged_scripts``` step. I couldn't find any similar issue. I'll follow with some details:
While building, there is a file named ```waf``` that is meant to be moved to the ```%SCRIPTS%``` folder. The problem seems to be that ```conda-build``` tries to read the file to do some checking, but fails to do so because of the file encoding (https://github.com/conda/conda-build/blob/master/conda_build/windows.py#L45)
I believe the waf file should be opened with encoding='iso-8859-1'. Making a local change replacing with the code bellow builds the package successfully, but is not a solution:
```
with open(join(scripts_dir, fn), encoding='iso-8859-1') as f:
```
Any suggestions here? I'm unsure how this case should be handled.
I'll leave some suggestions that occurred me:
1- Fix this at package level, by avoiding running into the ```fix_staged_scripts``` function, somehow. (I don't like this solution because it'll not really fix anything besides this specific package)
2- Do some try...catch with several encodings near the presented code above
3- Have a metadata somewhere (maybe in the recipe?) to handle the file encoding. Fallback to the current case if none.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/windows.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import sys
5 from os.path import isdir, join, dirname, isfile
6
7 # importing setuptools patches distutils so that it knows how to find VC for python 2.7
8 import setuptools # noqa
9 # Leverage the hard work done by setuptools/distutils to find vcvarsall using
10 # either the registry or the VS**COMNTOOLS environment variable
11 from distutils.msvc9compiler import find_vcvarsall as distutils_find_vcvarsall
12 from distutils.msvc9compiler import Reg, WINSDK_BASE
13
14 from .conda_interface import bits
15
16 from conda_build import environ
17 from conda_build.utils import _check_call, root_script_dir, path_prepended, copy_into
18
19
20 assert sys.platform == 'win32'
21
22
23 VS_VERSION_STRING = {
24 '8.0': 'Visual Studio 8 2005',
25 '9.0': 'Visual Studio 9 2008',
26 '10.0': 'Visual Studio 10 2010',
27 '11.0': 'Visual Studio 11 2012',
28 '12.0': 'Visual Studio 12 2013',
29 '14.0': 'Visual Studio 14 2015'
30 }
31
32
33 def fix_staged_scripts(scripts_dir):
34 """
35 Fixes scripts which have been installed unix-style to have a .bat
36 helper
37 """
38 if not isdir(scripts_dir):
39 return
40 for fn in os.listdir(scripts_dir):
41 # process all the extensionless files
42 if not isfile(join(scripts_dir, fn)) or '.' in fn:
43 continue
44
45 with open(join(scripts_dir, fn)) as f:
46 line = f.readline().lower()
47 # If it's a #!python script
48 if not (line.startswith('#!') and 'python' in line.lower()):
49 continue
50 print('Adjusting unix-style #! script %s, '
51 'and adding a .bat file for it' % fn)
52 # copy it with a .py extension (skipping that first #! line)
53 with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:
54 fo.write(f.read())
55 # now create the .exe file
56 copy_into(join(dirname(__file__), 'cli-%d.exe' % bits),
57 join(scripts_dir, fn + '.exe'))
58
59 # remove the original script
60 os.remove(join(scripts_dir, fn))
61
62
63 def build_vcvarsall_vs_path(version):
64 """
65 Given the Visual Studio version, returns the default path to the
66 Microsoft Visual Studio vcvarsall.bat file.
67 Expected versions are of the form {9.0, 10.0, 12.0, 14.0}
68 """
69 # Set up a load of paths that can be imported from the tests
70 if 'ProgramFiles(x86)' in os.environ:
71 PROGRAM_FILES_PATH = os.environ['ProgramFiles(x86)']
72 else:
73 PROGRAM_FILES_PATH = os.environ['ProgramFiles']
74
75 flatversion = str(version).replace('.', '')
76 vstools = "VS{0}COMNTOOLS".format(flatversion)
77
78 if vstools in os.environ:
79 return os.path.join(os.environ[vstools], '..\\..\\VC\\vcvarsall.bat')
80 else:
81 # prefer looking at env var; fall back to program files defaults
82 return os.path.join(PROGRAM_FILES_PATH,
83 'Microsoft Visual Studio {}'.format(version), 'VC',
84 'vcvarsall.bat')
85
86
87 def msvc_env_cmd(bits, config, override=None):
88 arch_selector = 'x86' if bits == 32 else 'amd64'
89
90 msvc_env_lines = []
91
92 version = None
93 if override is not None:
94 version = override
95
96 # The DISTUTILS_USE_SDK variable tells distutils to not try and validate
97 # the MSVC compiler. For < 3.5 this still forcibly looks for 'cl.exe'.
98 # For > 3.5 it literally just skips the validation logic.
99 # See distutils _msvccompiler.py and msvc9compiler.py / msvccompiler.py
100 # for more information.
101 msvc_env_lines.append('set DISTUTILS_USE_SDK=1')
102 # This is also required to hit the 'don't validate' logic on < 3.5.
103 # For > 3.5 this is ignored.
104 msvc_env_lines.append('set MSSdk=1')
105
106 if not version:
107 if config.PY3K and config.use_MSVC2015:
108 version = '14.0'
109 elif config.PY3K:
110 version = '10.0'
111 else:
112 version = '9.0'
113
114 if float(version) >= 14.0:
115 # For Python 3.5+, ensure that we link with the dynamic runtime. See
116 # http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info
117 msvc_env_lines.append('set PY_VCRUNTIME_REDIST=%LIBRARY_BIN%\\vcruntime{0}.dll'.format(
118 version.replace('.', '')))
119
120 vcvarsall_vs_path = build_vcvarsall_vs_path(version)
121
122 def build_vcvarsall_cmd(cmd, arch=arch_selector):
123 # Default argument `arch_selector` is defined above
124 return 'call "{cmd}" {arch}'.format(cmd=cmd, arch=arch)
125
126 msvc_env_lines.append('set "VS_VERSION={}"'.format(version))
127 msvc_env_lines.append('set "VS_MAJOR={}"'.format(version.split('.')[0]))
128 msvc_env_lines.append('set "VS_YEAR={}"'.format(VS_VERSION_STRING[version][-4:]))
129 msvc_env_lines.append('set "CMAKE_GENERATOR={}"'.format(VS_VERSION_STRING[version] +
130 {64: ' Win64', 32: ''}[bits]))
131 # tell msys2 to ignore path conversions for issue-causing windows-style flags in build
132 # See https://github.com/conda-forge/icu-feedstock/pull/5
133 msvc_env_lines.append('set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out;%MSYS2_ARG_CONV_EXCL%"')
134 msvc_env_lines.append('set "MSYS2_ENV_CONV_EXCL=CL"')
135 if version == '10.0':
136 try:
137 WIN_SDK_71_PATH = Reg.get_value(os.path.join(WINSDK_BASE, 'v7.1'),
138 'installationfolder')
139 WIN_SDK_71_BAT_PATH = os.path.join(WIN_SDK_71_PATH, 'Bin', 'SetEnv.cmd')
140
141 win_sdk_arch = '/Release /x86' if bits == 32 else '/Release /x64'
142 win_sdk_cmd = build_vcvarsall_cmd(WIN_SDK_71_BAT_PATH, arch=win_sdk_arch)
143
144 # There are two methods of building Python 3.3 and 3.4 extensions (both
145 # of which required Visual Studio 2010 - as explained in the Python wiki
146 # https://wiki.python.org/moin/WindowsCompilers)
147 # 1) Use the Windows SDK 7.1
148 # 2) Use Visual Studio 2010 (any edition)
149 # However, VS2010 never shipped with a 64-bit compiler, so in this case
150 # **only** option (1) applies. For this reason, we always try and
151 # activate the Windows SDK first. Unfortunately, unsuccessfully setting
152 # up the environment does **not EXIT 1** and therefore we must fall
153 # back to attempting to set up VS2010.
154 # DelayedExpansion is required for the SetEnv.cmd
155 msvc_env_lines.append('Setlocal EnableDelayedExpansion')
156 msvc_env_lines.append(win_sdk_cmd)
157 # If the WindowsSDKDir environment variable has not been successfully
158 # set then try activating VS2010
159 msvc_env_lines.append('if not "%WindowsSDKDir%" == "{}" ( {} )'.format(
160 WIN_SDK_71_PATH, build_vcvarsall_cmd(vcvarsall_vs_path)))
161 # sdk is not installed. Fall back to only trying VS 2010
162 except KeyError:
163 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
164 elif version == '9.0':
165 # Get the Visual Studio 2008 path (not the Visual C++ for Python path)
166 # and get the 'vcvars64.bat' from inside the bin (in the directory above
167 # that returned by distutils_find_vcvarsall)
168 try:
169 VCVARS64_VS9_BAT_PATH = os.path.join(os.path.dirname(distutils_find_vcvarsall(9)),
170 'bin', 'vcvars64.bat')
171 # there's an exception if VS or the VC compiler for python are not actually installed.
172 except (KeyError, TypeError):
173 VCVARS64_VS9_BAT_PATH = None
174
175 error1 = 'if errorlevel 1 {}'
176
177 # Prefer VS9 proper over Microsoft Visual C++ Compiler for Python 2.7
178 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
179 # The Visual Studio 2008 Express edition does not properly contain
180 # the amd64 build files, so we call the vcvars64.bat manually,
181 # rather than using the vcvarsall.bat which would try and call the
182 # missing bat file.
183 if arch_selector == 'amd64' and VCVARS64_VS9_BAT_PATH:
184 msvc_env_lines.append(error1.format(
185 build_vcvarsall_cmd(VCVARS64_VS9_BAT_PATH)))
186 # Otherwise, fall back to icrosoft Visual C++ Compiler for Python 2.7+
187 # by using the logic provided by setuptools
188 msvc_env_lines.append(error1.format(
189 build_vcvarsall_cmd(distutils_find_vcvarsall(9))))
190 else:
191 # Visual Studio 14 or otherwise
192 msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))
193
194 return '\n'.join(msvc_env_lines) + '\n'
195
196
197 def build(m, bld_bat, config):
198 with path_prepended(config.build_prefix):
199 env = environ.get_dict(config=config, m=m)
200 env["CONDA_BUILD_STATE"] = "BUILD"
201
202 for name in 'BIN', 'INC', 'LIB':
203 path = env['LIBRARY_' + name]
204 if not isdir(path):
205 os.makedirs(path)
206
207 src_dir = config.work_dir
208 if os.path.isfile(bld_bat):
209 with open(bld_bat) as fi:
210 data = fi.read()
211 with open(join(src_dir, 'bld.bat'), 'w') as fo:
212 # more debuggable with echo on
213 fo.write('@echo on\n')
214 for key, value in env.items():
215 fo.write('set "{key}={value}"\n'.format(key=key, value=value))
216 fo.write(msvc_env_cmd(bits=bits, config=config,
217 override=m.get_value('build/msvc_compiler', None)))
218 # Reset echo on, because MSVC scripts might have turned it off
219 fo.write('@echo on\n')
220 fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
221 fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
222 if config.activate:
223 fo.write('call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
224 conda_root=root_script_dir,
225 prefix=config.build_prefix))
226 fo.write("REM ===== end generated header =====\n")
227 fo.write(data)
228
229 cmd = ['cmd.exe', '/c', 'bld.bat']
230 _check_call(cmd, cwd=src_dir)
231
232 fix_staged_scripts(join(config.build_prefix, 'Scripts'))
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda_build/windows.py b/conda_build/windows.py
--- a/conda_build/windows.py
+++ b/conda_build/windows.py
@@ -4,6 +4,7 @@
import sys
from os.path import isdir, join, dirname, isfile
+import bs4
# importing setuptools patches distutils so that it knows how to find VC for python 2.7
import setuptools # noqa
# Leverage the hard work done by setuptools/distutils to find vcvarsall using
@@ -43,7 +44,7 @@
continue
with open(join(scripts_dir, fn)) as f:
- line = f.readline().lower()
+ line = bs4.UnicodeDammit(f.readline()).unicode_markup.lower()
# If it's a #!python script
if not (line.startswith('#!') and 'python' in line.lower()):
continue
|
{"golden_diff": "diff --git a/conda_build/windows.py b/conda_build/windows.py\n--- a/conda_build/windows.py\n+++ b/conda_build/windows.py\n@@ -4,6 +4,7 @@\n import sys\n from os.path import isdir, join, dirname, isfile\n \n+import bs4\n # importing setuptools patches distutils so that it knows how to find VC for python 2.7\n import setuptools # noqa\n # Leverage the hard work done by setuptools/distutils to find vcvarsall using\n@@ -43,7 +44,7 @@\n continue\n \n with open(join(scripts_dir, fn)) as f:\n- line = f.readline().lower()\n+ line = bs4.UnicodeDammit(f.readline()).unicode_markup.lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n", "issue": "Encoding issues on Windows build in the fix_staged_scripts step\nHello.\r\n\r\nI'm trying to build the [waf package on windows](https://github.com/conda-forge/waf-feedstock/pull/4), but I'm having some trouble with ```conda_build``` in the ```fix_staged_scripts``` step. I couldn't find any similar issue. I'll follow with some details:\r\n\r\nWhile building, there is a file named ```waf``` that is meant to be moved to the ```%SCRIPTS%``` folder. The problem seems to be that ```conda-build``` tries to read the file to do some checking, but fails to do so because of the file encoding (https://github.com/conda/conda-build/blob/master/conda_build/windows.py#L45)\r\n\r\nI believe the waf file should be opened with encoding='iso-8859-1'. Making a local change replacing with the code bellow builds the package successfully, but is not a solution:\r\n\r\n```\r\nwith open(join(scripts_dir, fn), encoding='iso-8859-1') as f:\r\n```\r\n\r\nAny suggestions here? I'm unsure how this case should be handled.\r\nI'll leave some suggestions that occurred me:\r\n\r\n1- Fix this at package level, by avoiding running into the ```fix_staged_scripts``` function, somehow. (I don't like this solution because it'll not really fix anything besides this specific package)\r\n\r\n2- Do some try...catch with several encodings near the presented code above\r\n\r\n3- Have a metadata somewhere (maybe in the recipe?) to handle the file encoding. Fallback to the current case if none.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import isdir, join, dirname, isfile\n\n# importing setuptools patches distutils so that it knows how to find VC for python 2.7\nimport setuptools # noqa\n# Leverage the hard work done by setuptools/distutils to find vcvarsall using\n# either the registry or the VS**COMNTOOLS environment variable\nfrom distutils.msvc9compiler import find_vcvarsall as distutils_find_vcvarsall\nfrom distutils.msvc9compiler import Reg, WINSDK_BASE\n\nfrom .conda_interface import bits\n\nfrom conda_build import environ\nfrom conda_build.utils import _check_call, root_script_dir, path_prepended, copy_into\n\n\nassert sys.platform == 'win32'\n\n\nVS_VERSION_STRING = {\n '8.0': 'Visual Studio 8 2005',\n '9.0': 'Visual Studio 9 2008',\n '10.0': 'Visual Studio 10 2010',\n '11.0': 'Visual Studio 11 2012',\n '12.0': 'Visual Studio 12 2013',\n '14.0': 'Visual Studio 14 2015'\n}\n\n\ndef fix_staged_scripts(scripts_dir):\n \"\"\"\n Fixes scripts which have been installed unix-style to have a .bat\n helper\n \"\"\"\n if not isdir(scripts_dir):\n return\n for fn in os.listdir(scripts_dir):\n # process all the extensionless files\n if not isfile(join(scripts_dir, fn)) or '.' in fn:\n continue\n\n with open(join(scripts_dir, fn)) as f:\n line = f.readline().lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n print('Adjusting unix-style #! script %s, '\n 'and adding a .bat file for it' % fn)\n # copy it with a .py extension (skipping that first #! line)\n with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n fo.write(f.read())\n # now create the .exe file\n copy_into(join(dirname(__file__), 'cli-%d.exe' % bits),\n join(scripts_dir, fn + '.exe'))\n\n # remove the original script\n os.remove(join(scripts_dir, fn))\n\n\ndef build_vcvarsall_vs_path(version):\n \"\"\"\n Given the Visual Studio version, returns the default path to the\n Microsoft Visual Studio vcvarsall.bat file.\n Expected versions are of the form {9.0, 10.0, 12.0, 14.0}\n \"\"\"\n # Set up a load of paths that can be imported from the tests\n if 'ProgramFiles(x86)' in os.environ:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles(x86)']\n else:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles']\n\n flatversion = str(version).replace('.', '')\n vstools = \"VS{0}COMNTOOLS\".format(flatversion)\n\n if vstools in os.environ:\n return os.path.join(os.environ[vstools], '..\\\\..\\\\VC\\\\vcvarsall.bat')\n else:\n # prefer looking at env var; fall back to program files defaults\n return os.path.join(PROGRAM_FILES_PATH,\n 'Microsoft Visual Studio {}'.format(version), 'VC',\n 'vcvarsall.bat')\n\n\ndef msvc_env_cmd(bits, config, override=None):\n arch_selector = 'x86' if bits == 32 else 'amd64'\n\n msvc_env_lines = []\n\n version = None\n if override is not None:\n version = override\n\n # The DISTUTILS_USE_SDK variable tells distutils to not try and validate\n # the MSVC compiler. For < 3.5 this still forcibly looks for 'cl.exe'.\n # For > 3.5 it literally just skips the validation logic.\n # See distutils _msvccompiler.py and msvc9compiler.py / msvccompiler.py\n # for more information.\n msvc_env_lines.append('set DISTUTILS_USE_SDK=1')\n # This is also required to hit the 'don't validate' logic on < 3.5.\n # For > 3.5 this is ignored.\n msvc_env_lines.append('set MSSdk=1')\n\n if not version:\n if config.PY3K and config.use_MSVC2015:\n version = '14.0'\n elif config.PY3K:\n version = '10.0'\n else:\n version = '9.0'\n\n if float(version) >= 14.0:\n # For Python 3.5+, ensure that we link with the dynamic runtime. See\n # http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info\n msvc_env_lines.append('set PY_VCRUNTIME_REDIST=%LIBRARY_BIN%\\\\vcruntime{0}.dll'.format(\n version.replace('.', '')))\n\n vcvarsall_vs_path = build_vcvarsall_vs_path(version)\n\n def build_vcvarsall_cmd(cmd, arch=arch_selector):\n # Default argument `arch_selector` is defined above\n return 'call \"{cmd}\" {arch}'.format(cmd=cmd, arch=arch)\n\n msvc_env_lines.append('set \"VS_VERSION={}\"'.format(version))\n msvc_env_lines.append('set \"VS_MAJOR={}\"'.format(version.split('.')[0]))\n msvc_env_lines.append('set \"VS_YEAR={}\"'.format(VS_VERSION_STRING[version][-4:]))\n msvc_env_lines.append('set \"CMAKE_GENERATOR={}\"'.format(VS_VERSION_STRING[version] +\n {64: ' Win64', 32: ''}[bits]))\n # tell msys2 to ignore path conversions for issue-causing windows-style flags in build\n # See https://github.com/conda-forge/icu-feedstock/pull/5\n msvc_env_lines.append('set \"MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out;%MSYS2_ARG_CONV_EXCL%\"')\n msvc_env_lines.append('set \"MSYS2_ENV_CONV_EXCL=CL\"')\n if version == '10.0':\n try:\n WIN_SDK_71_PATH = Reg.get_value(os.path.join(WINSDK_BASE, 'v7.1'),\n 'installationfolder')\n WIN_SDK_71_BAT_PATH = os.path.join(WIN_SDK_71_PATH, 'Bin', 'SetEnv.cmd')\n\n win_sdk_arch = '/Release /x86' if bits == 32 else '/Release /x64'\n win_sdk_cmd = build_vcvarsall_cmd(WIN_SDK_71_BAT_PATH, arch=win_sdk_arch)\n\n # There are two methods of building Python 3.3 and 3.4 extensions (both\n # of which required Visual Studio 2010 - as explained in the Python wiki\n # https://wiki.python.org/moin/WindowsCompilers)\n # 1) Use the Windows SDK 7.1\n # 2) Use Visual Studio 2010 (any edition)\n # However, VS2010 never shipped with a 64-bit compiler, so in this case\n # **only** option (1) applies. For this reason, we always try and\n # activate the Windows SDK first. Unfortunately, unsuccessfully setting\n # up the environment does **not EXIT 1** and therefore we must fall\n # back to attempting to set up VS2010.\n # DelayedExpansion is required for the SetEnv.cmd\n msvc_env_lines.append('Setlocal EnableDelayedExpansion')\n msvc_env_lines.append(win_sdk_cmd)\n # If the WindowsSDKDir environment variable has not been successfully\n # set then try activating VS2010\n msvc_env_lines.append('if not \"%WindowsSDKDir%\" == \"{}\" ( {} )'.format(\n WIN_SDK_71_PATH, build_vcvarsall_cmd(vcvarsall_vs_path)))\n # sdk is not installed. Fall back to only trying VS 2010\n except KeyError:\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n elif version == '9.0':\n # Get the Visual Studio 2008 path (not the Visual C++ for Python path)\n # and get the 'vcvars64.bat' from inside the bin (in the directory above\n # that returned by distutils_find_vcvarsall)\n try:\n VCVARS64_VS9_BAT_PATH = os.path.join(os.path.dirname(distutils_find_vcvarsall(9)),\n 'bin', 'vcvars64.bat')\n # there's an exception if VS or the VC compiler for python are not actually installed.\n except (KeyError, TypeError):\n VCVARS64_VS9_BAT_PATH = None\n\n error1 = 'if errorlevel 1 {}'\n\n # Prefer VS9 proper over Microsoft Visual C++ Compiler for Python 2.7\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n # The Visual Studio 2008 Express edition does not properly contain\n # the amd64 build files, so we call the vcvars64.bat manually,\n # rather than using the vcvarsall.bat which would try and call the\n # missing bat file.\n if arch_selector == 'amd64' and VCVARS64_VS9_BAT_PATH:\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(VCVARS64_VS9_BAT_PATH)))\n # Otherwise, fall back to icrosoft Visual C++ Compiler for Python 2.7+\n # by using the logic provided by setuptools\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(distutils_find_vcvarsall(9))))\n else:\n # Visual Studio 14 or otherwise\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n\n return '\\n'.join(msvc_env_lines) + '\\n'\n\n\ndef build(m, bld_bat, config):\n with path_prepended(config.build_prefix):\n env = environ.get_dict(config=config, m=m)\n env[\"CONDA_BUILD_STATE\"] = \"BUILD\"\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = config.work_dir\n if os.path.isfile(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n # more debuggable with echo on\n fo.write('@echo on\\n')\n for key, value in env.items():\n fo.write('set \"{key}={value}\"\\n'.format(key=key, value=value))\n fo.write(msvc_env_cmd(bits=bits, config=config,\n override=m.get_value('build/msvc_compiler', None)))\n # Reset echo on, because MSVC scripts might have turned it off\n fo.write('@echo on\\n')\n fo.write('set \"INCLUDE={};%INCLUDE%\"\\n'.format(env[\"LIBRARY_INC\"]))\n fo.write('set \"LIB={};%LIB%\"\\n'.format(env[\"LIBRARY_LIB\"]))\n if config.activate:\n fo.write('call \"{conda_root}\\\\activate.bat\" \"{prefix}\"\\n'.format(\n conda_root=root_script_dir,\n prefix=config.build_prefix))\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = ['cmd.exe', '/c', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n\n fix_staged_scripts(join(config.build_prefix, 'Scripts'))\n", "path": "conda_build/windows.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import isdir, join, dirname, isfile\n\nimport bs4\n# importing setuptools patches distutils so that it knows how to find VC for python 2.7\nimport setuptools # noqa\n# Leverage the hard work done by setuptools/distutils to find vcvarsall using\n# either the registry or the VS**COMNTOOLS environment variable\nfrom distutils.msvc9compiler import find_vcvarsall as distutils_find_vcvarsall\nfrom distutils.msvc9compiler import Reg, WINSDK_BASE\n\nfrom .conda_interface import bits\n\nfrom conda_build import environ\nfrom conda_build.utils import _check_call, root_script_dir, path_prepended, copy_into\n\n\nassert sys.platform == 'win32'\n\n\nVS_VERSION_STRING = {\n '8.0': 'Visual Studio 8 2005',\n '9.0': 'Visual Studio 9 2008',\n '10.0': 'Visual Studio 10 2010',\n '11.0': 'Visual Studio 11 2012',\n '12.0': 'Visual Studio 12 2013',\n '14.0': 'Visual Studio 14 2015'\n}\n\n\ndef fix_staged_scripts(scripts_dir):\n \"\"\"\n Fixes scripts which have been installed unix-style to have a .bat\n helper\n \"\"\"\n if not isdir(scripts_dir):\n return\n for fn in os.listdir(scripts_dir):\n # process all the extensionless files\n if not isfile(join(scripts_dir, fn)) or '.' in fn:\n continue\n\n with open(join(scripts_dir, fn)) as f:\n line = bs4.UnicodeDammit(f.readline()).unicode_markup.lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n print('Adjusting unix-style #! script %s, '\n 'and adding a .bat file for it' % fn)\n # copy it with a .py extension (skipping that first #! line)\n with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n fo.write(f.read())\n # now create the .exe file\n copy_into(join(dirname(__file__), 'cli-%d.exe' % bits),\n join(scripts_dir, fn + '.exe'))\n\n # remove the original script\n os.remove(join(scripts_dir, fn))\n\n\ndef build_vcvarsall_vs_path(version):\n \"\"\"\n Given the Visual Studio version, returns the default path to the\n Microsoft Visual Studio vcvarsall.bat file.\n Expected versions are of the form {9.0, 10.0, 12.0, 14.0}\n \"\"\"\n # Set up a load of paths that can be imported from the tests\n if 'ProgramFiles(x86)' in os.environ:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles(x86)']\n else:\n PROGRAM_FILES_PATH = os.environ['ProgramFiles']\n\n flatversion = str(version).replace('.', '')\n vstools = \"VS{0}COMNTOOLS\".format(flatversion)\n\n if vstools in os.environ:\n return os.path.join(os.environ[vstools], '..\\\\..\\\\VC\\\\vcvarsall.bat')\n else:\n # prefer looking at env var; fall back to program files defaults\n return os.path.join(PROGRAM_FILES_PATH,\n 'Microsoft Visual Studio {}'.format(version), 'VC',\n 'vcvarsall.bat')\n\n\ndef msvc_env_cmd(bits, config, override=None):\n arch_selector = 'x86' if bits == 32 else 'amd64'\n\n msvc_env_lines = []\n\n version = None\n if override is not None:\n version = override\n\n # The DISTUTILS_USE_SDK variable tells distutils to not try and validate\n # the MSVC compiler. For < 3.5 this still forcibly looks for 'cl.exe'.\n # For > 3.5 it literally just skips the validation logic.\n # See distutils _msvccompiler.py and msvc9compiler.py / msvccompiler.py\n # for more information.\n msvc_env_lines.append('set DISTUTILS_USE_SDK=1')\n # This is also required to hit the 'don't validate' logic on < 3.5.\n # For > 3.5 this is ignored.\n msvc_env_lines.append('set MSSdk=1')\n\n if not version:\n if config.PY3K and config.use_MSVC2015:\n version = '14.0'\n elif config.PY3K:\n version = '10.0'\n else:\n version = '9.0'\n\n if float(version) >= 14.0:\n # For Python 3.5+, ensure that we link with the dynamic runtime. See\n # http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info\n msvc_env_lines.append('set PY_VCRUNTIME_REDIST=%LIBRARY_BIN%\\\\vcruntime{0}.dll'.format(\n version.replace('.', '')))\n\n vcvarsall_vs_path = build_vcvarsall_vs_path(version)\n\n def build_vcvarsall_cmd(cmd, arch=arch_selector):\n # Default argument `arch_selector` is defined above\n return 'call \"{cmd}\" {arch}'.format(cmd=cmd, arch=arch)\n\n msvc_env_lines.append('set \"VS_VERSION={}\"'.format(version))\n msvc_env_lines.append('set \"VS_MAJOR={}\"'.format(version.split('.')[0]))\n msvc_env_lines.append('set \"VS_YEAR={}\"'.format(VS_VERSION_STRING[version][-4:]))\n msvc_env_lines.append('set \"CMAKE_GENERATOR={}\"'.format(VS_VERSION_STRING[version] +\n {64: ' Win64', 32: ''}[bits]))\n # tell msys2 to ignore path conversions for issue-causing windows-style flags in build\n # See https://github.com/conda-forge/icu-feedstock/pull/5\n msvc_env_lines.append('set \"MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out;%MSYS2_ARG_CONV_EXCL%\"')\n msvc_env_lines.append('set \"MSYS2_ENV_CONV_EXCL=CL\"')\n if version == '10.0':\n try:\n WIN_SDK_71_PATH = Reg.get_value(os.path.join(WINSDK_BASE, 'v7.1'),\n 'installationfolder')\n WIN_SDK_71_BAT_PATH = os.path.join(WIN_SDK_71_PATH, 'Bin', 'SetEnv.cmd')\n\n win_sdk_arch = '/Release /x86' if bits == 32 else '/Release /x64'\n win_sdk_cmd = build_vcvarsall_cmd(WIN_SDK_71_BAT_PATH, arch=win_sdk_arch)\n\n # There are two methods of building Python 3.3 and 3.4 extensions (both\n # of which required Visual Studio 2010 - as explained in the Python wiki\n # https://wiki.python.org/moin/WindowsCompilers)\n # 1) Use the Windows SDK 7.1\n # 2) Use Visual Studio 2010 (any edition)\n # However, VS2010 never shipped with a 64-bit compiler, so in this case\n # **only** option (1) applies. For this reason, we always try and\n # activate the Windows SDK first. Unfortunately, unsuccessfully setting\n # up the environment does **not EXIT 1** and therefore we must fall\n # back to attempting to set up VS2010.\n # DelayedExpansion is required for the SetEnv.cmd\n msvc_env_lines.append('Setlocal EnableDelayedExpansion')\n msvc_env_lines.append(win_sdk_cmd)\n # If the WindowsSDKDir environment variable has not been successfully\n # set then try activating VS2010\n msvc_env_lines.append('if not \"%WindowsSDKDir%\" == \"{}\" ( {} )'.format(\n WIN_SDK_71_PATH, build_vcvarsall_cmd(vcvarsall_vs_path)))\n # sdk is not installed. Fall back to only trying VS 2010\n except KeyError:\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n elif version == '9.0':\n # Get the Visual Studio 2008 path (not the Visual C++ for Python path)\n # and get the 'vcvars64.bat' from inside the bin (in the directory above\n # that returned by distutils_find_vcvarsall)\n try:\n VCVARS64_VS9_BAT_PATH = os.path.join(os.path.dirname(distutils_find_vcvarsall(9)),\n 'bin', 'vcvars64.bat')\n # there's an exception if VS or the VC compiler for python are not actually installed.\n except (KeyError, TypeError):\n VCVARS64_VS9_BAT_PATH = None\n\n error1 = 'if errorlevel 1 {}'\n\n # Prefer VS9 proper over Microsoft Visual C++ Compiler for Python 2.7\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n # The Visual Studio 2008 Express edition does not properly contain\n # the amd64 build files, so we call the vcvars64.bat manually,\n # rather than using the vcvarsall.bat which would try and call the\n # missing bat file.\n if arch_selector == 'amd64' and VCVARS64_VS9_BAT_PATH:\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(VCVARS64_VS9_BAT_PATH)))\n # Otherwise, fall back to icrosoft Visual C++ Compiler for Python 2.7+\n # by using the logic provided by setuptools\n msvc_env_lines.append(error1.format(\n build_vcvarsall_cmd(distutils_find_vcvarsall(9))))\n else:\n # Visual Studio 14 or otherwise\n msvc_env_lines.append(build_vcvarsall_cmd(vcvarsall_vs_path))\n\n return '\\n'.join(msvc_env_lines) + '\\n'\n\n\ndef build(m, bld_bat, config):\n with path_prepended(config.build_prefix):\n env = environ.get_dict(config=config, m=m)\n env[\"CONDA_BUILD_STATE\"] = \"BUILD\"\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = config.work_dir\n if os.path.isfile(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n # more debuggable with echo on\n fo.write('@echo on\\n')\n for key, value in env.items():\n fo.write('set \"{key}={value}\"\\n'.format(key=key, value=value))\n fo.write(msvc_env_cmd(bits=bits, config=config,\n override=m.get_value('build/msvc_compiler', None)))\n # Reset echo on, because MSVC scripts might have turned it off\n fo.write('@echo on\\n')\n fo.write('set \"INCLUDE={};%INCLUDE%\"\\n'.format(env[\"LIBRARY_INC\"]))\n fo.write('set \"LIB={};%LIB%\"\\n'.format(env[\"LIBRARY_LIB\"]))\n if config.activate:\n fo.write('call \"{conda_root}\\\\activate.bat\" \"{prefix}\"\\n'.format(\n conda_root=root_script_dir,\n prefix=config.build_prefix))\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = ['cmd.exe', '/c', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n\n fix_staged_scripts(join(config.build_prefix, 'Scripts'))\n", "path": "conda_build/windows.py"}]}
| 3,845 | 193 |
gh_patches_debug_34609
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-285
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for Message Reactions to ActivityHandler
ActivityHandler should be extended to include MessageReactions. This has now been added to the C# and The JavaScript.
Here is a pointer to the JavaScript implementation:
https://github.com/microsoft/botbuilder-js/pull/1038
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-core/botbuilder/core/activity_handler.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from botbuilder.schema import ActivityTypes, ChannelAccount
5 from .turn_context import TurnContext
6
7
8 class ActivityHandler:
9 async def on_turn(self, turn_context: TurnContext):
10 if turn_context is None:
11 raise TypeError("ActivityHandler.on_turn(): turn_context cannot be None.")
12
13 if hasattr(turn_context, "activity") and turn_context.activity is None:
14 raise TypeError(
15 "ActivityHandler.on_turn(): turn_context must have a non-None activity."
16 )
17
18 if (
19 hasattr(turn_context.activity, "type")
20 and turn_context.activity.type is None
21 ):
22 raise TypeError(
23 "ActivityHandler.on_turn(): turn_context activity must have a non-None type."
24 )
25
26 if turn_context.activity.type == ActivityTypes.message:
27 await self.on_message_activity(turn_context)
28 elif turn_context.activity.type == ActivityTypes.conversation_update:
29 await self.on_conversation_update_activity(turn_context)
30 elif turn_context.activity.type == ActivityTypes.event:
31 await self.on_event_activity(turn_context)
32 else:
33 await self.on_unrecognized_activity_type(turn_context)
34
35 async def on_message_activity( # pylint: disable=unused-argument
36 self, turn_context: TurnContext
37 ):
38 return
39
40 async def on_conversation_update_activity(self, turn_context: TurnContext):
41 if (
42 turn_context.activity.members_added is not None
43 and turn_context.activity.members_added
44 ):
45 return await self.on_members_added_activity(
46 turn_context.activity.members_added, turn_context
47 )
48 if (
49 turn_context.activity.members_removed is not None
50 and turn_context.activity.members_removed
51 ):
52 return await self.on_members_removed_activity(
53 turn_context.activity.members_removed, turn_context
54 )
55 return
56
57 async def on_members_added_activity(
58 self, members_added: ChannelAccount, turn_context: TurnContext
59 ): # pylint: disable=unused-argument
60 return
61
62 async def on_members_removed_activity(
63 self, members_removed: ChannelAccount, turn_context: TurnContext
64 ): # pylint: disable=unused-argument
65 return
66
67 async def on_event_activity(self, turn_context: TurnContext):
68 if turn_context.activity.name == "tokens/response":
69 return await self.on_token_response_event(turn_context)
70
71 return await self.on_event(turn_context)
72
73 async def on_token_response_event( # pylint: disable=unused-argument
74 self, turn_context: TurnContext
75 ):
76 return
77
78 async def on_event( # pylint: disable=unused-argument
79 self, turn_context: TurnContext
80 ):
81 return
82
83 async def on_unrecognized_activity_type( # pylint: disable=unused-argument
84 self, turn_context: TurnContext
85 ):
86 return
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libraries/botbuilder-core/botbuilder/core/activity_handler.py b/libraries/botbuilder-core/botbuilder/core/activity_handler.py
--- a/libraries/botbuilder-core/botbuilder/core/activity_handler.py
+++ b/libraries/botbuilder-core/botbuilder/core/activity_handler.py
@@ -1,7 +1,8 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
+from typing import List
-from botbuilder.schema import ActivityTypes, ChannelAccount
+from botbuilder.schema import ActivityTypes, ChannelAccount, MessageReaction
from .turn_context import TurnContext
@@ -27,6 +28,8 @@
await self.on_message_activity(turn_context)
elif turn_context.activity.type == ActivityTypes.conversation_update:
await self.on_conversation_update_activity(turn_context)
+ elif turn_context.activity.type == ActivityTypes.message_reaction:
+ await self.on_message_reaction_activity(turn_context)
elif turn_context.activity.type == ActivityTypes.event:
await self.on_event_activity(turn_context)
else:
@@ -64,6 +67,27 @@
): # pylint: disable=unused-argument
return
+ async def on_message_reaction_activity(self, turn_context: TurnContext):
+ if turn_context.activity.reactions_added is not None:
+ await self.on_reactions_added(
+ turn_context.activity.reactions_added, turn_context
+ )
+
+ if turn_context.activity.reactions_removed is not None:
+ await self.on_reactions_removed(
+ turn_context.activity.reactions_removed, turn_context
+ )
+
+ async def on_reactions_added( # pylint: disable=unused-argument
+ self, message_reactions: List[MessageReaction], turn_context: TurnContext
+ ):
+ return
+
+ async def on_reactions_removed( # pylint: disable=unused-argument
+ self, message_reactions: List[MessageReaction], turn_context: TurnContext
+ ):
+ return
+
async def on_event_activity(self, turn_context: TurnContext):
if turn_context.activity.name == "tokens/response":
return await self.on_token_response_event(turn_context)
|
{"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/activity_handler.py b/libraries/botbuilder-core/botbuilder/core/activity_handler.py\n--- a/libraries/botbuilder-core/botbuilder/core/activity_handler.py\n+++ b/libraries/botbuilder-core/botbuilder/core/activity_handler.py\n@@ -1,7 +1,8 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n+from typing import List\n \n-from botbuilder.schema import ActivityTypes, ChannelAccount\n+from botbuilder.schema import ActivityTypes, ChannelAccount, MessageReaction\n from .turn_context import TurnContext\n \n \n@@ -27,6 +28,8 @@\n await self.on_message_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.conversation_update:\n await self.on_conversation_update_activity(turn_context)\n+ elif turn_context.activity.type == ActivityTypes.message_reaction:\n+ await self.on_message_reaction_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.event:\n await self.on_event_activity(turn_context)\n else:\n@@ -64,6 +67,27 @@\n ): # pylint: disable=unused-argument\n return\n \n+ async def on_message_reaction_activity(self, turn_context: TurnContext):\n+ if turn_context.activity.reactions_added is not None:\n+ await self.on_reactions_added(\n+ turn_context.activity.reactions_added, turn_context\n+ )\n+\n+ if turn_context.activity.reactions_removed is not None:\n+ await self.on_reactions_removed(\n+ turn_context.activity.reactions_removed, turn_context\n+ )\n+\n+ async def on_reactions_added( # pylint: disable=unused-argument\n+ self, message_reactions: List[MessageReaction], turn_context: TurnContext\n+ ):\n+ return\n+\n+ async def on_reactions_removed( # pylint: disable=unused-argument\n+ self, message_reactions: List[MessageReaction], turn_context: TurnContext\n+ ):\n+ return\n+\n async def on_event_activity(self, turn_context: TurnContext):\n if turn_context.activity.name == \"tokens/response\":\n return await self.on_token_response_event(turn_context)\n", "issue": "Add support for Message Reactions to ActivityHandler \nActivityHandler should be extended to include MessageReactions. This has now been added to the C# and The JavaScript.\r\n\r\nHere is a pointer to the JavaScript implementation:\r\n\r\nhttps://github.com/microsoft/botbuilder-js/pull/1038\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom botbuilder.schema import ActivityTypes, ChannelAccount\nfrom .turn_context import TurnContext\n\n\nclass ActivityHandler:\n async def on_turn(self, turn_context: TurnContext):\n if turn_context is None:\n raise TypeError(\"ActivityHandler.on_turn(): turn_context cannot be None.\")\n\n if hasattr(turn_context, \"activity\") and turn_context.activity is None:\n raise TypeError(\n \"ActivityHandler.on_turn(): turn_context must have a non-None activity.\"\n )\n\n if (\n hasattr(turn_context.activity, \"type\")\n and turn_context.activity.type is None\n ):\n raise TypeError(\n \"ActivityHandler.on_turn(): turn_context activity must have a non-None type.\"\n )\n\n if turn_context.activity.type == ActivityTypes.message:\n await self.on_message_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.conversation_update:\n await self.on_conversation_update_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.event:\n await self.on_event_activity(turn_context)\n else:\n await self.on_unrecognized_activity_type(turn_context)\n\n async def on_message_activity( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_conversation_update_activity(self, turn_context: TurnContext):\n if (\n turn_context.activity.members_added is not None\n and turn_context.activity.members_added\n ):\n return await self.on_members_added_activity(\n turn_context.activity.members_added, turn_context\n )\n if (\n turn_context.activity.members_removed is not None\n and turn_context.activity.members_removed\n ):\n return await self.on_members_removed_activity(\n turn_context.activity.members_removed, turn_context\n )\n return\n\n async def on_members_added_activity(\n self, members_added: ChannelAccount, turn_context: TurnContext\n ): # pylint: disable=unused-argument\n return\n\n async def on_members_removed_activity(\n self, members_removed: ChannelAccount, turn_context: TurnContext\n ): # pylint: disable=unused-argument\n return\n\n async def on_event_activity(self, turn_context: TurnContext):\n if turn_context.activity.name == \"tokens/response\":\n return await self.on_token_response_event(turn_context)\n\n return await self.on_event(turn_context)\n\n async def on_token_response_event( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_event( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_unrecognized_activity_type( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n", "path": "libraries/botbuilder-core/botbuilder/core/activity_handler.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom typing import List\n\nfrom botbuilder.schema import ActivityTypes, ChannelAccount, MessageReaction\nfrom .turn_context import TurnContext\n\n\nclass ActivityHandler:\n async def on_turn(self, turn_context: TurnContext):\n if turn_context is None:\n raise TypeError(\"ActivityHandler.on_turn(): turn_context cannot be None.\")\n\n if hasattr(turn_context, \"activity\") and turn_context.activity is None:\n raise TypeError(\n \"ActivityHandler.on_turn(): turn_context must have a non-None activity.\"\n )\n\n if (\n hasattr(turn_context.activity, \"type\")\n and turn_context.activity.type is None\n ):\n raise TypeError(\n \"ActivityHandler.on_turn(): turn_context activity must have a non-None type.\"\n )\n\n if turn_context.activity.type == ActivityTypes.message:\n await self.on_message_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.conversation_update:\n await self.on_conversation_update_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.message_reaction:\n await self.on_message_reaction_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.event:\n await self.on_event_activity(turn_context)\n else:\n await self.on_unrecognized_activity_type(turn_context)\n\n async def on_message_activity( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_conversation_update_activity(self, turn_context: TurnContext):\n if (\n turn_context.activity.members_added is not None\n and turn_context.activity.members_added\n ):\n return await self.on_members_added_activity(\n turn_context.activity.members_added, turn_context\n )\n if (\n turn_context.activity.members_removed is not None\n and turn_context.activity.members_removed\n ):\n return await self.on_members_removed_activity(\n turn_context.activity.members_removed, turn_context\n )\n return\n\n async def on_members_added_activity(\n self, members_added: ChannelAccount, turn_context: TurnContext\n ): # pylint: disable=unused-argument\n return\n\n async def on_members_removed_activity(\n self, members_removed: ChannelAccount, turn_context: TurnContext\n ): # pylint: disable=unused-argument\n return\n\n async def on_message_reaction_activity(self, turn_context: TurnContext):\n if turn_context.activity.reactions_added is not None:\n await self.on_reactions_added(\n turn_context.activity.reactions_added, turn_context\n )\n\n if turn_context.activity.reactions_removed is not None:\n await self.on_reactions_removed(\n turn_context.activity.reactions_removed, turn_context\n )\n\n async def on_reactions_added( # pylint: disable=unused-argument\n self, message_reactions: List[MessageReaction], turn_context: TurnContext\n ):\n return\n\n async def on_reactions_removed( # pylint: disable=unused-argument\n self, message_reactions: List[MessageReaction], turn_context: TurnContext\n ):\n return\n\n async def on_event_activity(self, turn_context: TurnContext):\n if turn_context.activity.name == \"tokens/response\":\n return await self.on_token_response_event(turn_context)\n\n return await self.on_event(turn_context)\n\n async def on_token_response_event( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_event( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_unrecognized_activity_type( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n", "path": "libraries/botbuilder-core/botbuilder/core/activity_handler.py"}]}
| 1,103 | 474 |
gh_patches_debug_8428
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-1661
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add option to list tables in ClickHouse integration :bookmark_tabs:
When users create a connection to the ClickHouse database it will be useful to show them tips with a list of tables. To be able to do this we need a new method `get_tables_list` implemented in the ClickHouse integration class.
## Steps :male_detective: :female_detective:
- Frok MindsDB repo
- Add new implementation in https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/clickhouse/clickhouse.py#L25
- Make a PR to staging branch
## Additional rewards :1st_place_medal:
Each code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/integrations/clickhouse/clickhouse.py`
Content:
```
1 import requests
2 from lightwood.api import dtype
3 from mindsdb.integrations.base import Integration
4 from mindsdb.utilities.log import log
5
6
7 class ClickhouseConnectionChecker:
8 def __init__(self, **kwargs):
9 self.host = kwargs.get("host")
10 self.port = kwargs.get("port")
11 self.user = kwargs.get("user")
12 self.password = kwargs.get("password")
13
14 def check_connection(self):
15 try:
16 res = requests.post(f"http://{self.host}:{self.port}",
17 data="select 1;",
18 params={'user': self.user, 'password': self.password})
19 connected = res.status_code == 200
20 except Exception:
21 connected = False
22 return connected
23
24
25 class Clickhouse(Integration, ClickhouseConnectionChecker):
26 def __init__(self, config, name, db_info):
27 super().__init__(config, name)
28 self.user = db_info.get('user', 'default')
29 self.password = db_info.get('password', None)
30 self.host = db_info.get('host')
31 self.port = db_info.get('port')
32
33 def _to_clickhouse_table(self, dtype_dict, predicted_cols, columns):
34 subtype_map = {
35 dtype.integer: 'Nullable(Int64)',
36 dtype.float: 'Nullable(Float64)',
37 dtype.binary: 'Nullable(UInt8)',
38 dtype.date: 'Nullable(Date)',
39 dtype.datetime: 'Nullable(Datetime)',
40 dtype.binary: 'Nullable(String)',
41 dtype.categorical: 'Nullable(String)',
42 dtype.tags: 'Nullable(String)',
43 dtype.image: 'Nullable(String)',
44 dtype.video: 'Nullable(String)',
45 dtype.audio: 'Nullable(String)',
46 dtype.short_text: 'Nullable(String)',
47 dtype.rich_text: 'Nullable(String)',
48 dtype.array: 'Nullable(String)'
49 }
50
51 column_declaration = []
52 for name in columns:
53 try:
54 col_subtype = dtype_dict[name]
55 new_type = subtype_map[col_subtype]
56 column_declaration.append(f' `{name}` {new_type} ')
57 if name in predicted_cols:
58 column_declaration.append(f' `{name}_original` {new_type} ')
59 except Exception as e:
60 log.error(f'Error: can not determine clickhouse data type for column {name}: {e}')
61
62 return column_declaration
63
64 def _query(self, query):
65 params = {'user': self.user}
66
67 if self.password is not None:
68 params['password'] = self.password
69
70 host = self.host
71 port = self.port
72
73 response = requests.post(f'http://{host}:{port}', data=query, params=params)
74
75 if response.status_code != 200:
76 raise Exception(f'Error: {response.content}\nQuery:{query}')
77
78 return response
79
80 def _get_mysql_user(self):
81 return f"{self.config['api']['mysql']['user']}_{self.name}"
82
83 def _escape_table_name(self, name):
84 return '`' + name.replace('`', '\\`') + '`'
85
86 def setup(self):
87 self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')
88 self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')
89
90 msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])
91 msqyl_pass = self.config['api']['mysql']['password']
92 msqyl_user = self._get_mysql_user()
93
94 q = f"""
95 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (
96 name String,
97 status String,
98 accuracy String,
99 predict String,
100 select_data_query String,
101 external_datasource String,
102 training_options String
103 ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'predictors', '{msqyl_user}', '{msqyl_pass}')
104 """
105 self._query(q)
106 q = f"""
107 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (
108 command String
109 ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'commands', '{msqyl_user}', '{msqyl_pass}')
110 """
111 self._query(q)
112
113 def register_predictors(self, model_data_arr):
114 for model_meta in model_data_arr:
115 name = self._escape_table_name(model_meta['name'])
116
117 predict = model_meta['predict']
118 if not isinstance(predict, list):
119 predict = [predict]
120
121 columns_sql = ','.join(self._to_clickhouse_table(
122 model_meta['dtype_dict'],
123 predict,
124 list(model_meta['dtype_dict'].keys())
125 ))
126 columns_sql += ',`when_data` Nullable(String)'
127 columns_sql += ',`select_data_query` Nullable(String)'
128 columns_sql += ',`external_datasource` Nullable(String)'
129 for col in predict:
130 columns_sql += f',`{col}_confidence` Nullable(Float64)'
131
132 if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):
133 columns_sql += f',`{col}_min` Nullable(Float64)'
134 columns_sql += f',`{col}_max` Nullable(Float64)'
135 columns_sql += f',`{col}_explain` Nullable(String)'
136
137 msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])
138 msqyl_pass = self.config['api']['mysql']['password']
139 msqyl_user = self._get_mysql_user()
140
141 self.unregister_predictor(model_meta['name'])
142 q = f"""
143 CREATE TABLE {self.mindsdb_database}.{name}
144 ({columns_sql}
145 ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', {name}, '{msqyl_user}', '{msqyl_pass}')
146 """
147 self._query(q)
148
149 def unregister_predictor(self, name):
150 q = f"""
151 drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
152 """
153 self._query(q)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mindsdb/integrations/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py
--- a/mindsdb/integrations/clickhouse/clickhouse.py
+++ b/mindsdb/integrations/clickhouse/clickhouse.py
@@ -151,3 +151,13 @@
drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
"""
self._query(q)
+
+ def get_tables_list(self):
+ q = f"""SELECT database, table
+ FROM system.parts
+ WHERE active and database NOT IN ('system', 'mdb_system')
+ GROUP BY database, table
+ ORDER BY database, table;"""
+ tables_list = self._query(q)
+ tables= [f"{table[0]}.{table[1]}" for table in tables_list]
+ return tables
\ No newline at end of file
|
{"golden_diff": "diff --git a/mindsdb/integrations/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py\n--- a/mindsdb/integrations/clickhouse/clickhouse.py\n+++ b/mindsdb/integrations/clickhouse/clickhouse.py\n@@ -151,3 +151,13 @@\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n+\n+ def get_tables_list(self):\n+ q = f\"\"\"SELECT database, table\n+ FROM system.parts\n+ WHERE active and database NOT IN ('system', 'mdb_system')\n+ GROUP BY database, table\n+ ORDER BY database, table;\"\"\"\n+ tables_list = self._query(q)\n+ tables= [f\"{table[0]}.{table[1]}\" for table in tables_list]\n+ return tables\n\\ No newline at end of file\n", "issue": "Add option to list tables in ClickHouse integration :bookmark_tabs: \nWhen users create a connection to the ClickHouse database it will be useful to show them tips with a list of tables. To be able to do this we need a new method `get_tables_list` implemented in the ClickHouse integration class.\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Frok MindsDB repo\r\n- Add new implementation in https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/clickhouse/clickhouse.py#L25\r\n- Make a PR to staging branch\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\n", "before_files": [{"content": "import requests\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass ClickhouseConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get(\"host\")\n self.port = kwargs.get(\"port\")\n self.user = kwargs.get(\"user\")\n self.password = kwargs.get(\"password\")\n\n def check_connection(self):\n try:\n res = requests.post(f\"http://{self.host}:{self.port}\",\n data=\"select 1;\",\n params={'user': self.user, 'password': self.password})\n connected = res.status_code == 200\n except Exception:\n connected = False\n return connected\n\n\nclass Clickhouse(Integration, ClickhouseConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user', 'default')\n self.password = db_info.get('password', None)\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n\n def _to_clickhouse_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'Nullable(Int64)',\n dtype.float: 'Nullable(Float64)',\n dtype.binary: 'Nullable(UInt8)',\n dtype.date: 'Nullable(Date)',\n dtype.datetime: 'Nullable(Datetime)',\n dtype.binary: 'Nullable(String)',\n dtype.categorical: 'Nullable(String)',\n dtype.tags: 'Nullable(String)',\n dtype.image: 'Nullable(String)',\n dtype.video: 'Nullable(String)',\n dtype.audio: 'Nullable(String)',\n dtype.short_text: 'Nullable(String)',\n dtype.rich_text: 'Nullable(String)',\n dtype.array: 'Nullable(String)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine clickhouse data type for column {name}: {e}')\n\n return column_declaration\n\n def _query(self, query):\n params = {'user': self.user}\n\n if self.password is not None:\n params['password'] = self.password\n\n host = self.host\n port = self.port\n\n response = requests.post(f'http://{host}:{port}', data=query, params=params)\n\n if response.status_code != 200:\n raise Exception(f'Error: {response.content}\\nQuery:{query}')\n\n return response\n\n def _get_mysql_user(self):\n return f\"{self.config['api']['mysql']['user']}_{self.name}\"\n\n def _escape_table_name(self, name):\n return '`' + name.replace('`', '\\\\`') + '`'\n\n def setup(self):\n self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')\n self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')\n\n msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])\n msqyl_pass = self.config['api']['mysql']['password']\n msqyl_user = self._get_mysql_user()\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name String,\n status String,\n accuracy String,\n predict String,\n select_data_query String,\n external_datasource String,\n training_options String\n ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'predictors', '{msqyl_user}', '{msqyl_pass}')\n \"\"\"\n self._query(q)\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command String\n ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'commands', '{msqyl_user}', '{msqyl_pass}')\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = self._escape_table_name(model_meta['name'])\n\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n\n columns_sql = ','.join(self._to_clickhouse_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',`when_data` Nullable(String)'\n columns_sql += ',`select_data_query` Nullable(String)'\n columns_sql += ',`external_datasource` Nullable(String)'\n for col in predict:\n columns_sql += f',`{col}_confidence` Nullable(Float64)'\n\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',`{col}_min` Nullable(Float64)'\n columns_sql += f',`{col}_max` Nullable(Float64)'\n columns_sql += f',`{col}_explain` Nullable(String)'\n\n msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])\n msqyl_pass = self.config['api']['mysql']['password']\n msqyl_user = self._get_mysql_user()\n\n self.unregister_predictor(model_meta['name'])\n q = f\"\"\"\n CREATE TABLE {self.mindsdb_database}.{name}\n ({columns_sql}\n ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', {name}, '{msqyl_user}', '{msqyl_pass}')\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n", "path": "mindsdb/integrations/clickhouse/clickhouse.py"}], "after_files": [{"content": "import requests\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass ClickhouseConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get(\"host\")\n self.port = kwargs.get(\"port\")\n self.user = kwargs.get(\"user\")\n self.password = kwargs.get(\"password\")\n\n def check_connection(self):\n try:\n res = requests.post(f\"http://{self.host}:{self.port}\",\n data=\"select 1;\",\n params={'user': self.user, 'password': self.password})\n connected = res.status_code == 200\n except Exception:\n connected = False\n return connected\n\n\nclass Clickhouse(Integration, ClickhouseConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user', 'default')\n self.password = db_info.get('password', None)\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n\n def _to_clickhouse_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'Nullable(Int64)',\n dtype.float: 'Nullable(Float64)',\n dtype.binary: 'Nullable(UInt8)',\n dtype.date: 'Nullable(Date)',\n dtype.datetime: 'Nullable(Datetime)',\n dtype.binary: 'Nullable(String)',\n dtype.categorical: 'Nullable(String)',\n dtype.tags: 'Nullable(String)',\n dtype.image: 'Nullable(String)',\n dtype.video: 'Nullable(String)',\n dtype.audio: 'Nullable(String)',\n dtype.short_text: 'Nullable(String)',\n dtype.rich_text: 'Nullable(String)',\n dtype.array: 'Nullable(String)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine clickhouse data type for column {name}: {e}')\n\n return column_declaration\n\n def _query(self, query):\n params = {'user': self.user}\n\n if self.password is not None:\n params['password'] = self.password\n\n host = self.host\n port = self.port\n\n response = requests.post(f'http://{host}:{port}', data=query, params=params)\n\n if response.status_code != 200:\n raise Exception(f'Error: {response.content}\\nQuery:{query}')\n\n return response\n\n def _get_mysql_user(self):\n return f\"{self.config['api']['mysql']['user']}_{self.name}\"\n\n def _escape_table_name(self, name):\n return '`' + name.replace('`', '\\\\`') + '`'\n\n def setup(self):\n self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')\n self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')\n\n msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])\n msqyl_pass = self.config['api']['mysql']['password']\n msqyl_user = self._get_mysql_user()\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name String,\n status String,\n accuracy String,\n predict String,\n select_data_query String,\n external_datasource String,\n training_options String\n ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'predictors', '{msqyl_user}', '{msqyl_pass}')\n \"\"\"\n self._query(q)\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command String\n ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'commands', '{msqyl_user}', '{msqyl_pass}')\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = self._escape_table_name(model_meta['name'])\n\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n\n columns_sql = ','.join(self._to_clickhouse_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',`when_data` Nullable(String)'\n columns_sql += ',`select_data_query` Nullable(String)'\n columns_sql += ',`external_datasource` Nullable(String)'\n for col in predict:\n columns_sql += f',`{col}_confidence` Nullable(Float64)'\n\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',`{col}_min` Nullable(Float64)'\n columns_sql += f',`{col}_max` Nullable(Float64)'\n columns_sql += f',`{col}_explain` Nullable(String)'\n\n msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])\n msqyl_pass = self.config['api']['mysql']['password']\n msqyl_user = self._get_mysql_user()\n\n self.unregister_predictor(model_meta['name'])\n q = f\"\"\"\n CREATE TABLE {self.mindsdb_database}.{name}\n ({columns_sql}\n ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', {name}, '{msqyl_user}', '{msqyl_pass}')\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n\n def get_tables_list(self):\n q = f\"\"\"SELECT database, table\n FROM system.parts\n WHERE active and database NOT IN ('system', 'mdb_system')\n GROUP BY database, table\n ORDER BY database, table;\"\"\"\n tables_list = self._query(q)\n tables= [f\"{table[0]}.{table[1]}\" for table in tables_list]\n return tables", "path": "mindsdb/integrations/clickhouse/clickhouse.py"}]}
| 2,108 | 210 |
gh_patches_debug_3776
|
rasdani/github-patches
|
git_diff
|
esphome__esphome-docs-1150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add airquality wp6003 + am312 tutorial
Add air quality + am312 tutorial
## Description:
**Related issue (if applicable):** fixes <link to issue>
**Pull request in [esphome](https://github.com/esphome/esphome) with YAML changes (if applicable):** esphome/esphome#<esphome PR number goes here>
## Checklist:
- [ ] Branch: `next` is for changes and new documentation that will go public with the next ESPHome release. Fixes, changes and adjustments for the current release should be created against `current`.
- [ ] Link added in `/index.rst` when creating new documents for new components or cookbook.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # esphome documentation build configuration file, created by
5 # sphinx-quickstart on Mon Jan 22 21:44:07 2018.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 # import os
21 # import sys
22 # sys.path.insert(0, os.path.abspath('.'))
23 import hashlib
24 import os
25 import sys
26
27
28 sys.path.append(os.path.abspath("."))
29
30 # -- General configuration ------------------------------------------------
31
32 # If your documentation needs a minimal Sphinx version, state it here.
33 #
34 # needs_sphinx = '1.0'
35
36 # Add any Sphinx extension module names here, as strings. They can be
37 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
38 # ones.
39 extensions = [
40 "github",
41 "seo",
42 "sitemap",
43 "schema_doc",
44 ]
45
46 # Add any paths that contain templates here, relative to this directory.
47 templates_path = ["_templates"]
48
49 # The suffix(es) of source filenames.
50 # You can specify multiple suffix as a list of string:
51 #
52 # source_suffix = ['.rst', '.md']
53 source_suffix = ".rst"
54
55 # The master toctree document.
56 master_doc = "index"
57
58 # General information about the project.
59 project = "ESPHome"
60 copyright = "2019, Otto Winter"
61 html_show_copyright = False
62 html_show_sphinx = False
63 author = "Otto Winter"
64
65 # The version info for the project you're documenting, acts as replacement for
66 # |version| and |release|, also used in various other places throughout the
67 # built documents.
68 #
69 # The short X.Y version.
70 version = "1.17"
71 # The full version, including alpha/beta/rc tags.
72 release = "1.17.2"
73
74 # The language for content autogenerated by Sphinx. Refer to documentation
75 # for a list of supported languages.
76 #
77 # This is also used if you do content translation via gettext catalogs.
78 # Usually you set "language" from the command line for these cases.
79 language = "en"
80
81 # List of patterns, relative to source directory, that match files and
82 # directories to ignore when looking for source files.
83 # This patterns also effect to html_static_path and html_extra_path
84 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
85
86 # The reST default role (used for this markup: `text`) to use for all documents.
87 # default_role = 'cpp:any'
88
89 # The name of the Pygments (syntax highlighting) style to use.
90 pygments_style = "xcode"
91
92 highlight_language = "yaml"
93
94 primary_domain = None
95
96 # If true, `todo` and `todoList` produce output, else they produce nothing.
97 todo_include_todos = False
98
99
100 # -- Options for HTML output ----------------------------------------------
101
102 # The theme to use for HTML and HTML Help pages. See the documentation for
103 # a list of builtin themes.
104 #
105 html_theme = "alabaster"
106
107 # Theme options are theme-specific and customize the look and feel of a theme
108 # further. For a list of options available for each theme, see the
109 # documentation.
110 #
111 html_baseurl = os.getenv("BASE_URL", "https://esphome.io")
112 with open("_static/custom.css", "rb") as f:
113 custom_css_hash = hashlib.md5(f.read()).hexdigest()[:8]
114
115 html_theme_options = {
116 # 'logo': 'logo-full.png',
117 "logo_name": False,
118 "show_related": False,
119 "sidebar_collapse": True,
120 "fixed_sidebar": True,
121 "show_powered_by": False,
122 }
123
124 html_context = {
125 "custom_css_hash": custom_css_hash,
126 }
127
128 html_logo = "images/logo-text.svg"
129 html_copy_source = True
130 html_show_sourcelink = False
131 html_last_updated_fmt = None
132 html_use_smartypants = False
133 html_title = "ESPHome"
134
135 # Add any paths that contain custom static files (such as style sheets) here,
136 # relative to this directory. They are copied after the builtin static files,
137 # so a file named "default.css" will overwrite the builtin "default.css".
138 html_static_path = ["_static"]
139
140 # Custom sidebar templates, must be a dictionary that maps document names
141 # to template names.
142 #
143 # This is required for the alabaster theme
144 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
145 html_sidebars = {
146 "**": [
147 # 'about.html',
148 "searchbox.html",
149 "localtoc.html",
150 ]
151 }
152
153
154 # -- Options for HTMLHelp output ------------------------------------------
155
156 # Output file base name for HTML help builder.
157 htmlhelp_basename = "esphomedoc"
158
159
160 # -- Options for LaTeX output ---------------------------------------------
161
162 latex_elements = {
163 # The paper size ('letterpaper' or 'a4paper').
164 #
165 # 'papersize': 'letterpaper',
166 # The font size ('10pt', '11pt' or '12pt').
167 #
168 # 'pointsize': '10pt',
169 # Additional stuff for the LaTeX preamble.
170 #
171 # 'preamble': '',
172 # Latex figure (float) alignment
173 #
174 # 'figure_align': 'htbp',
175 }
176
177 # Grouping the document tree into LaTeX files. List of tuples
178 # (source start file, target name, title,
179 # author, documentclass [howto, manual, or own class]).
180 latex_documents = [
181 (master_doc, "esphome.tex", "ESPHome Documentation", "Otto Winter", "manual"),
182 ]
183
184 latex_engine = "xelatex"
185
186
187 # -- Options for manual page output ---------------------------------------
188
189 # One entry per manual page. List of tuples
190 # (source start file, name, description, authors, manual section).
191 man_pages = [(master_doc, "esphome", "ESPHome Documentation", [author], 1)]
192
193
194 # -- Options for Texinfo output -------------------------------------------
195
196 # Grouping the document tree into Texinfo files. List of tuples
197 # (source start file, target name, title, author,
198 # dir menu entry, description, category)
199 texinfo_documents = [
200 (
201 master_doc,
202 "esphome",
203 "ESPHome Documentation",
204 author,
205 "esphome",
206 "One line description of project.",
207 "Miscellaneous",
208 ),
209 ]
210 linkcheck_ignore = [r"https://github.com/.*", r"https://discord.gg/.*"]
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conf.py b/conf.py
--- a/conf.py
+++ b/conf.py
@@ -67,9 +67,9 @@
# built documents.
#
# The short X.Y version.
-version = "1.17"
+version = "1.18"
# The full version, including alpha/beta/rc tags.
-release = "1.17.2"
+release = "1.18.0b1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
|
{"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -67,9 +67,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = \"1.17\"\n+version = \"1.18\"\n # The full version, including alpha/beta/rc tags.\n-release = \"1.17.2\"\n+release = \"1.18.0b1\"\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n", "issue": "Add airquality wp6003 + am312 tutorial\nAdd air quality + am312 tutorial\r\n\r\n## Description:\r\n\r\n\r\n**Related issue (if applicable):** fixes <link to issue>\r\n\r\n**Pull request in [esphome](https://github.com/esphome/esphome) with YAML changes (if applicable):** esphome/esphome#<esphome PR number goes here>\r\n\r\n## Checklist:\r\n\r\n - [ ] Branch: `next` is for changes and new documentation that will go public with the next ESPHome release. Fixes, changes and adjustments for the current release should be created against `current`.\r\n - [ ] Link added in `/index.rst` when creating new documents for new components or cookbook.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# esphome documentation build configuration file, created by\n# sphinx-quickstart on Mon Jan 22 21:44:07 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport hashlib\nimport os\nimport sys\n\n\nsys.path.append(os.path.abspath(\".\"))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"github\",\n \"seo\",\n \"sitemap\",\n \"schema_doc\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"ESPHome\"\ncopyright = \"2019, Otto Winter\"\nhtml_show_copyright = False\nhtml_show_sphinx = False\nauthor = \"Otto Winter\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"1.17\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"1.17.2\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = 'cpp:any'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"xcode\"\n\nhighlight_language = \"yaml\"\n\nprimary_domain = None\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_baseurl = os.getenv(\"BASE_URL\", \"https://esphome.io\")\nwith open(\"_static/custom.css\", \"rb\") as f:\n custom_css_hash = hashlib.md5(f.read()).hexdigest()[:8]\n\nhtml_theme_options = {\n # 'logo': 'logo-full.png',\n \"logo_name\": False,\n \"show_related\": False,\n \"sidebar_collapse\": True,\n \"fixed_sidebar\": True,\n \"show_powered_by\": False,\n}\n\nhtml_context = {\n \"custom_css_hash\": custom_css_hash,\n}\n\nhtml_logo = \"images/logo-text.svg\"\nhtml_copy_source = True\nhtml_show_sourcelink = False\nhtml_last_updated_fmt = None\nhtml_use_smartypants = False\nhtml_title = \"ESPHome\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n \"**\": [\n # 'about.html',\n \"searchbox.html\",\n \"localtoc.html\",\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"esphomedoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"esphome.tex\", \"ESPHome Documentation\", \"Otto Winter\", \"manual\"),\n]\n\nlatex_engine = \"xelatex\"\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"esphome\", \"ESPHome Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"esphome\",\n \"ESPHome Documentation\",\n author,\n \"esphome\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\nlinkcheck_ignore = [r\"https://github.com/.*\", r\"https://discord.gg/.*\"]\n", "path": "conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# esphome documentation build configuration file, created by\n# sphinx-quickstart on Mon Jan 22 21:44:07 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport hashlib\nimport os\nimport sys\n\n\nsys.path.append(os.path.abspath(\".\"))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"github\",\n \"seo\",\n \"sitemap\",\n \"schema_doc\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"ESPHome\"\ncopyright = \"2019, Otto Winter\"\nhtml_show_copyright = False\nhtml_show_sphinx = False\nauthor = \"Otto Winter\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"1.18\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"1.18.0b1\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = 'cpp:any'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"xcode\"\n\nhighlight_language = \"yaml\"\n\nprimary_domain = None\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_baseurl = os.getenv(\"BASE_URL\", \"https://esphome.io\")\nwith open(\"_static/custom.css\", \"rb\") as f:\n custom_css_hash = hashlib.md5(f.read()).hexdigest()[:8]\n\nhtml_theme_options = {\n # 'logo': 'logo-full.png',\n \"logo_name\": False,\n \"show_related\": False,\n \"sidebar_collapse\": True,\n \"fixed_sidebar\": True,\n \"show_powered_by\": False,\n}\n\nhtml_context = {\n \"custom_css_hash\": custom_css_hash,\n}\n\nhtml_logo = \"images/logo-text.svg\"\nhtml_copy_source = True\nhtml_show_sourcelink = False\nhtml_last_updated_fmt = None\nhtml_use_smartypants = False\nhtml_title = \"ESPHome\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n \"**\": [\n # 'about.html',\n \"searchbox.html\",\n \"localtoc.html\",\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"esphomedoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"esphome.tex\", \"ESPHome Documentation\", \"Otto Winter\", \"manual\"),\n]\n\nlatex_engine = \"xelatex\"\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"esphome\", \"ESPHome Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"esphome\",\n \"ESPHome Documentation\",\n author,\n \"esphome\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\nlinkcheck_ignore = [r\"https://github.com/.*\", r\"https://discord.gg/.*\"]\n", "path": "conf.py"}]}
| 2,445 | 118 |
gh_patches_debug_4811
|
rasdani/github-patches
|
git_diff
|
pytorch__text-254
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use getattr rather than __dict__ in Batch (adds support for __slots__ in Example subclasses)
This is a proposal to change [one line of code](https://github.com/pytorch/text/blob/c839a7934930819be7e240ea972e4d600966afdc/torchtext/data/batch.py#L27) in Batch.py
I suggest `[x.__dict__[name] for x in data]` should become `[getattr(x, name) for x in data]`
A major advantage to doing this is compatibility with `__slots__`. A class that is going to be instantiated for every data point is an ideal use-case for `__slots__`, which reduces per-instance memory overhead. It makes sense for specific projects to subclass Example using `__slots__` with the known fields of the project. If you do, the instances will have empty `__dicts__` but the slots can be accessed via `getattr`.
I don't _think_ this change would break anything...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/batch.py`
Content:
```
1 from torch import typename
2 from torch.tensor import _TensorBase
3
4
5 class Batch(object):
6 """Defines a batch of examples along with its Fields.
7
8 Attributes:
9 batch_size: Number of examples in the batch.
10 dataset: A reference to the dataset object the examples come from
11 (which itself contains the dataset's Field objects).
12 train: Whether the batch is from a training set.
13
14 Also stores the Variable for each column in the batch as an attribute.
15 """
16
17 def __init__(self, data=None, dataset=None, device=None, train=True):
18 """Create a Batch from a list of examples."""
19 if data is not None:
20 self.batch_size = len(data)
21 self.dataset = dataset
22 self.train = train
23 self.fields = dataset.fields.keys() # copy field names
24
25 for (name, field) in dataset.fields.items():
26 if field is not None:
27 batch = [x.__dict__[name] for x in data]
28 setattr(self, name, field.process(batch, device=device, train=train))
29
30 @classmethod
31 def fromvars(cls, dataset, batch_size, train=True, **kwargs):
32 """Create a Batch directly from a number of Variables."""
33 batch = cls()
34 batch.batch_size = batch_size
35 batch.dataset = dataset
36 batch.train = train
37 for k, v in kwargs.items():
38 setattr(batch, k, v)
39 return batch
40
41 def __repr__(self):
42 return str(self)
43
44 def __str__(self):
45 if not self.__dict__:
46 return 'Empty {} instance'.format(typename(self))
47
48 var_strs = '\n'.join(['\t[.' + name + ']' + ":" + _short_str(getattr(self, name))
49 for name in self.fields if hasattr(self, name)])
50
51 data_str = (' from {}'.format(self.dataset.name.upper())
52 if hasattr(self.dataset, 'name') and
53 isinstance(self.dataset.name, str) else '')
54
55 strt = '[{} of size {}{}]\n{}'.format(typename(self),
56 self.batch_size, data_str, var_strs)
57 return '\n' + strt
58
59 def __len__(self):
60 return self.batch_size
61
62
63 def _short_str(tensor):
64 # unwrap variable to tensor
65 if hasattr(tensor, 'data'):
66 tensor = tensor.data
67
68 # fallback in case of wrong argument type
69 if issubclass(type(tensor), _TensorBase) is False:
70 return str(tensor)
71
72 # copied from torch _tensor_str
73 size_str = 'x'.join(str(size) for size in tensor.size())
74 device_str = '' if not tensor.is_cuda else \
75 ' (GPU {})'.format(tensor.get_device())
76 strt = '[{} of size {}{}]'.format(typename(tensor),
77 size_str, device_str)
78 return strt
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchtext/data/batch.py b/torchtext/data/batch.py
--- a/torchtext/data/batch.py
+++ b/torchtext/data/batch.py
@@ -24,7 +24,7 @@
for (name, field) in dataset.fields.items():
if field is not None:
- batch = [x.__dict__[name] for x in data]
+ batch = [getattr(x, name) for x in data]
setattr(self, name, field.process(batch, device=device, train=train))
@classmethod
|
{"golden_diff": "diff --git a/torchtext/data/batch.py b/torchtext/data/batch.py\n--- a/torchtext/data/batch.py\n+++ b/torchtext/data/batch.py\n@@ -24,7 +24,7 @@\n \n for (name, field) in dataset.fields.items():\n if field is not None:\n- batch = [x.__dict__[name] for x in data]\n+ batch = [getattr(x, name) for x in data]\n setattr(self, name, field.process(batch, device=device, train=train))\n \n @classmethod\n", "issue": "Use getattr rather than __dict__ in Batch (adds support for __slots__ in Example subclasses)\nThis is a proposal to change [one line of code](https://github.com/pytorch/text/blob/c839a7934930819be7e240ea972e4d600966afdc/torchtext/data/batch.py#L27) in Batch.py\r\n\r\nI suggest `[x.__dict__[name] for x in data]` should become `[getattr(x, name) for x in data]`\r\n\r\nA major advantage to doing this is compatibility with `__slots__`. A class that is going to be instantiated for every data point is an ideal use-case for `__slots__`, which reduces per-instance memory overhead. It makes sense for specific projects to subclass Example using `__slots__` with the known fields of the project. If you do, the instances will have empty `__dicts__` but the slots can be accessed via `getattr`.\r\n\r\nI don't _think_ this change would break anything...\n", "before_files": [{"content": "from torch import typename\nfrom torch.tensor import _TensorBase\n\n\nclass Batch(object):\n \"\"\"Defines a batch of examples along with its Fields.\n\n Attributes:\n batch_size: Number of examples in the batch.\n dataset: A reference to the dataset object the examples come from\n (which itself contains the dataset's Field objects).\n train: Whether the batch is from a training set.\n\n Also stores the Variable for each column in the batch as an attribute.\n \"\"\"\n\n def __init__(self, data=None, dataset=None, device=None, train=True):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n self.dataset = dataset\n self.train = train\n self.fields = dataset.fields.keys() # copy field names\n\n for (name, field) in dataset.fields.items():\n if field is not None:\n batch = [x.__dict__[name] for x in data]\n setattr(self, name, field.process(batch, device=device, train=train))\n\n @classmethod\n def fromvars(cls, dataset, batch_size, train=True, **kwargs):\n \"\"\"Create a Batch directly from a number of Variables.\"\"\"\n batch = cls()\n batch.batch_size = batch_size\n batch.dataset = dataset\n batch.train = train\n for k, v in kwargs.items():\n setattr(batch, k, v)\n return batch\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n if not self.__dict__:\n return 'Empty {} instance'.format(typename(self))\n\n var_strs = '\\n'.join(['\\t[.' + name + ']' + \":\" + _short_str(getattr(self, name))\n for name in self.fields if hasattr(self, name)])\n\n data_str = (' from {}'.format(self.dataset.name.upper())\n if hasattr(self.dataset, 'name') and\n isinstance(self.dataset.name, str) else '')\n\n strt = '[{} of size {}{}]\\n{}'.format(typename(self),\n self.batch_size, data_str, var_strs)\n return '\\n' + strt\n\n def __len__(self):\n return self.batch_size\n\n\ndef _short_str(tensor):\n # unwrap variable to tensor\n if hasattr(tensor, 'data'):\n tensor = tensor.data\n\n # fallback in case of wrong argument type\n if issubclass(type(tensor), _TensorBase) is False:\n return str(tensor)\n\n # copied from torch _tensor_str\n size_str = 'x'.join(str(size) for size in tensor.size())\n device_str = '' if not tensor.is_cuda else \\\n ' (GPU {})'.format(tensor.get_device())\n strt = '[{} of size {}{}]'.format(typename(tensor),\n size_str, device_str)\n return strt\n", "path": "torchtext/data/batch.py"}], "after_files": [{"content": "from torch import typename\nfrom torch.tensor import _TensorBase\n\n\nclass Batch(object):\n \"\"\"Defines a batch of examples along with its Fields.\n\n Attributes:\n batch_size: Number of examples in the batch.\n dataset: A reference to the dataset object the examples come from\n (which itself contains the dataset's Field objects).\n train: Whether the batch is from a training set.\n\n Also stores the Variable for each column in the batch as an attribute.\n \"\"\"\n\n def __init__(self, data=None, dataset=None, device=None, train=True):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n self.dataset = dataset\n self.train = train\n self.fields = dataset.fields.keys() # copy field names\n\n for (name, field) in dataset.fields.items():\n if field is not None:\n batch = [getattr(x, name) for x in data]\n setattr(self, name, field.process(batch, device=device, train=train))\n\n @classmethod\n def fromvars(cls, dataset, batch_size, train=True, **kwargs):\n \"\"\"Create a Batch directly from a number of Variables.\"\"\"\n batch = cls()\n batch.batch_size = batch_size\n batch.dataset = dataset\n batch.train = train\n for k, v in kwargs.items():\n setattr(batch, k, v)\n return batch\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n if not self.__dict__:\n return 'Empty {} instance'.format(typename(self))\n\n var_strs = '\\n'.join(['\\t[.' + name + ']' + \":\" + _short_str(getattr(self, name))\n for name in self.fields if hasattr(self, name)])\n\n data_str = (' from {}'.format(self.dataset.name.upper())\n if hasattr(self.dataset, 'name') and\n isinstance(self.dataset.name, str) else '')\n\n strt = '[{} of size {}{}]\\n{}'.format(typename(self),\n self.batch_size, data_str, var_strs)\n return '\\n' + strt\n\n def __len__(self):\n return self.batch_size\n\n\ndef _short_str(tensor):\n # unwrap variable to tensor\n if hasattr(tensor, 'data'):\n tensor = tensor.data\n\n # fallback in case of wrong argument type\n if issubclass(type(tensor), _TensorBase) is False:\n return str(tensor)\n\n # copied from torch _tensor_str\n size_str = 'x'.join(str(size) for size in tensor.size())\n device_str = '' if not tensor.is_cuda else \\\n ' (GPU {})'.format(tensor.get_device())\n strt = '[{} of size {}{}]'.format(typename(tensor),\n size_str, device_str)\n return strt\n", "path": "torchtext/data/batch.py"}]}
| 1,247 | 124 |
gh_patches_debug_16345
|
rasdani/github-patches
|
git_diff
|
Pyomo__pyomo-573
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ipopt has_capability('integer') returns True
```
>>> import pyomo.environ as pe
>>> pe.SolverFactory('ipopt').has_capability('integer')
True
```
I think this should return False. There is a comment in the code that says returning False might create headaches for some people, but I don't see how. Can I change this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/solvers/plugins/solvers/IPOPT.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 import os
12
13 import pyutilib.services
14 import pyutilib.misc
15
16 import pyomo.common.plugin
17 from pyomo.opt.base import *
18 from pyomo.opt.base.solvers import _extract_version
19 from pyomo.opt.results import *
20 from pyomo.opt.solver import *
21
22 import logging
23 logger = logging.getLogger('pyomo.solvers')
24
25 try:
26 unicode
27 except:
28 basestring = str
29
30 class IPOPT(SystemCallSolver):
31 """
32 An interface to the Ipopt optimizer that uses the AMPL Solver Library.
33 """
34
35 pyomo.common.plugin.alias('ipopt', doc='The Ipopt NLP solver')
36
37 def __init__(self, **kwds):
38 #
39 # Call base constructor
40 #
41 kwds["type"] = "ipopt"
42 super(IPOPT, self).__init__(**kwds)
43 #
44 # Setup valid problem formats, and valid results for each problem format
45 # Also set the default problem and results formats.
46 #
47 self._valid_problem_formats=[ProblemFormat.nl]
48 self._valid_result_formats = {}
49 self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol]
50 self.set_problem_format(ProblemFormat.nl)
51
52 # Note: Undefined capabilities default to 'None'
53 self._capabilities = pyutilib.misc.Options()
54 self._capabilities.linear = True
55 # Should we set this to False? Doing so might cause
56 # a headache for some folks.
57 self._capabilities.integer = True
58 self._capabilities.quadratic_objective = True
59 self._capabilities.quadratic_constraint = True
60 self._capabilities.sos1 = True
61 self._capabilities.sos2 = True
62
63 def _default_results_format(self, prob_format):
64 return ResultsFormat.sol
65
66 def _default_executable(self):
67 executable = pyutilib.services.registered_executable("ipopt")
68 if executable is None:
69 logger.warning("Could not locate the 'ipopt' executable, "
70 "which is required for solver %s" % self.name)
71 self.enable = False
72 return None
73 return executable.get_path()
74
75 def _get_version(self):
76 """
77 Returns a tuple describing the solver executable version.
78 """
79 solver_exec = self.executable()
80 if solver_exec is None:
81 return _extract_version('')
82 results = pyutilib.subprocess.run( [solver_exec,"-v"], timelimit=1 )
83 return _extract_version(results[1])
84
85 def create_command_line(self, executable, problem_files):
86
87 assert(self._problem_format == ProblemFormat.nl)
88 assert(self._results_format == ResultsFormat.sol)
89
90 #
91 # Define log file
92 #
93 if self._log_file is None:
94 self._log_file = pyutilib.services.TempfileManager.\
95 create_tempfile(suffix="_ipopt.log")
96
97 fname = problem_files[0]
98 if '.' in fname:
99 tmp = fname.split('.')
100 if len(tmp) > 2:
101 fname = '.'.join(tmp[:-1])
102 else:
103 fname = tmp[0]
104 self._soln_file = fname+".sol"
105
106 #
107 # Define results file (since an external parser is used)
108 #
109 self._results_file = self._soln_file
110
111 #
112 # Define command line
113 #
114 env=os.environ.copy()
115 #
116 # Merge the PYOMO_AMPLFUNC (externals defined within
117 # Pyomo/Pyomo) with any user-specified external function
118 # libraries
119 #
120 if 'PYOMO_AMPLFUNC' in env:
121 if 'AMPLFUNC' in env:
122 env['AMPLFUNC'] += "\n" + env['PYOMO_AMPLFUNC']
123 else:
124 env['AMPLFUNC'] = env['PYOMO_AMPLFUNC']
125
126 cmd = [executable, problem_files[0], '-AMPL']
127 if self._timer:
128 cmd.insert(0, self._timer)
129
130 env_opt = []
131 of_opt = []
132 ofn_option_used = False
133 for key in self.options:
134 if key == 'solver':
135 continue
136 elif key.startswith("OF_"):
137 assert len(key) > 3
138 of_opt.append((key[3:], self.options[key]))
139 else:
140 if key == "option_file_name":
141 ofn_option_used = True
142 if isinstance(self.options[key], basestring) and ' ' in self.options[key]:
143 env_opt.append(key+"=\""+str(self.options[key])+"\"")
144 cmd.append(str(key)+"="+str(self.options[key]))
145 else:
146 env_opt.append(key+"="+str(self.options[key]))
147 cmd.append(str(key)+"="+str(self.options[key]))
148
149 if len(of_opt) > 0:
150 # If the 'option_file_name' command-line option
151 # was used, we don't know if we should overwrite,
152 # merge it, or it is was a mistake, so raise an
153 # exception. Maybe this can be changed.
154 if ofn_option_used:
155 raise ValueError(
156 "The 'option_file_name' command-line "
157 "option for Ipopt can not be used "
158 "when specifying options for the "
159 "options file (i.e., options that "
160 "start with 'OF_'")
161
162 # Now check if an 'ipopt.opt' file exists in the
163 # current working directory. If so, we need to
164 # make it clear that this file will be ignored.
165 default_of_name = os.path.join(os.getcwd(), 'ipopt.opt')
166 if os.path.exists(default_of_name):
167 logger.warning("A file named '%s' exists in "
168 "the current working directory, but "
169 "Ipopt options file options (i.e., "
170 "options that start with 'OF_') were "
171 "provided. The options file '%s' will "
172 "be ignored." % (default_of_name,
173 default_of_name))
174
175 # Now write the new options file
176 options_filename = pyutilib.services.TempfileManager.\
177 create_tempfile(suffix="_ipopt.opt")
178 with open(options_filename, "w") as f:
179 for key, val in of_opt:
180 f.write(key+" "+str(val)+"\n")
181
182 # Now set the command-line option telling Ipopt
183 # to use this file
184 env_opt.append('option_file_name="'+str(options_filename)+'"')
185 cmd.append('option_file_name='+str(options_filename))
186
187 envstr = "%s_options" % self.options.solver
188 # Merge with any options coming in through the environment
189 env[envstr] = " ".join(env_opt)
190
191 return pyutilib.misc.Bunch(cmd=cmd, log_file=self._log_file, env=env)
192
193 def process_output(self, rc):
194 if os.path.exists(self._results_file):
195 return super(IPOPT, self).process_output(rc)
196 else:
197 res = SolverResults()
198 res.solver.status = SolverStatus.warning
199 res.solver.termination_condition = TerminationCondition.other
200 if os.path.exists(self._log_file):
201 with open(self._log_file) as f:
202 for line in f:
203 if "TOO_FEW_DEGREES_OF_FREEDOM" in line:
204 res.solver.message = line.split(':')[2].strip()
205 assert "degrees of freedom" in res.solver.message
206 return res
207
208 pyutilib.services.register_executable(name="ipopt")
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyomo/solvers/plugins/solvers/IPOPT.py b/pyomo/solvers/plugins/solvers/IPOPT.py
--- a/pyomo/solvers/plugins/solvers/IPOPT.py
+++ b/pyomo/solvers/plugins/solvers/IPOPT.py
@@ -52,13 +52,11 @@
# Note: Undefined capabilities default to 'None'
self._capabilities = pyutilib.misc.Options()
self._capabilities.linear = True
- # Should we set this to False? Doing so might cause
- # a headache for some folks.
- self._capabilities.integer = True
+ self._capabilities.integer = False
self._capabilities.quadratic_objective = True
self._capabilities.quadratic_constraint = True
- self._capabilities.sos1 = True
- self._capabilities.sos2 = True
+ self._capabilities.sos1 = False
+ self._capabilities.sos2 = False
def _default_results_format(self, prob_format):
return ResultsFormat.sol
|
{"golden_diff": "diff --git a/pyomo/solvers/plugins/solvers/IPOPT.py b/pyomo/solvers/plugins/solvers/IPOPT.py\n--- a/pyomo/solvers/plugins/solvers/IPOPT.py\n+++ b/pyomo/solvers/plugins/solvers/IPOPT.py\n@@ -52,13 +52,11 @@\n # Note: Undefined capabilities default to 'None'\n self._capabilities = pyutilib.misc.Options()\n self._capabilities.linear = True\n- # Should we set this to False? Doing so might cause\n- # a headache for some folks.\n- self._capabilities.integer = True\n+ self._capabilities.integer = False\n self._capabilities.quadratic_objective = True\n self._capabilities.quadratic_constraint = True\n- self._capabilities.sos1 = True\n- self._capabilities.sos2 = True\n+ self._capabilities.sos1 = False\n+ self._capabilities.sos2 = False\n \n def _default_results_format(self, prob_format):\n return ResultsFormat.sol\n", "issue": "Ipopt has_capability('integer') returns True\n```\r\n>>> import pyomo.environ as pe\r\n>>> pe.SolverFactory('ipopt').has_capability('integer')\r\nTrue\r\n```\r\n\r\nI think this should return False. There is a comment in the code that says returning False might create headaches for some people, but I don't see how. Can I change this?\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and \n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain \n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport os\n\nimport pyutilib.services\nimport pyutilib.misc\n\nimport pyomo.common.plugin\nfrom pyomo.opt.base import *\nfrom pyomo.opt.base.solvers import _extract_version\nfrom pyomo.opt.results import *\nfrom pyomo.opt.solver import *\n\nimport logging\nlogger = logging.getLogger('pyomo.solvers')\n\ntry:\n unicode\nexcept:\n basestring = str\n\nclass IPOPT(SystemCallSolver):\n \"\"\"\n An interface to the Ipopt optimizer that uses the AMPL Solver Library.\n \"\"\"\n\n pyomo.common.plugin.alias('ipopt', doc='The Ipopt NLP solver')\n\n def __init__(self, **kwds):\n #\n # Call base constructor\n #\n kwds[\"type\"] = \"ipopt\"\n super(IPOPT, self).__init__(**kwds)\n #\n # Setup valid problem formats, and valid results for each problem format\n # Also set the default problem and results formats.\n #\n self._valid_problem_formats=[ProblemFormat.nl]\n self._valid_result_formats = {}\n self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol]\n self.set_problem_format(ProblemFormat.nl)\n\n # Note: Undefined capabilities default to 'None'\n self._capabilities = pyutilib.misc.Options()\n self._capabilities.linear = True\n # Should we set this to False? Doing so might cause\n # a headache for some folks.\n self._capabilities.integer = True\n self._capabilities.quadratic_objective = True\n self._capabilities.quadratic_constraint = True\n self._capabilities.sos1 = True\n self._capabilities.sos2 = True\n\n def _default_results_format(self, prob_format):\n return ResultsFormat.sol\n\n def _default_executable(self):\n executable = pyutilib.services.registered_executable(\"ipopt\")\n if executable is None:\n logger.warning(\"Could not locate the 'ipopt' executable, \"\n \"which is required for solver %s\" % self.name)\n self.enable = False\n return None\n return executable.get_path()\n\n def _get_version(self):\n \"\"\"\n Returns a tuple describing the solver executable version.\n \"\"\"\n solver_exec = self.executable()\n if solver_exec is None:\n return _extract_version('')\n results = pyutilib.subprocess.run( [solver_exec,\"-v\"], timelimit=1 )\n return _extract_version(results[1])\n\n def create_command_line(self, executable, problem_files):\n\n assert(self._problem_format == ProblemFormat.nl)\n assert(self._results_format == ResultsFormat.sol)\n\n #\n # Define log file\n #\n if self._log_file is None:\n self._log_file = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix=\"_ipopt.log\")\n\n fname = problem_files[0]\n if '.' in fname:\n tmp = fname.split('.')\n if len(tmp) > 2:\n fname = '.'.join(tmp[:-1])\n else:\n fname = tmp[0]\n self._soln_file = fname+\".sol\"\n\n #\n # Define results file (since an external parser is used)\n #\n self._results_file = self._soln_file\n\n #\n # Define command line\n #\n env=os.environ.copy()\n #\n # Merge the PYOMO_AMPLFUNC (externals defined within\n # Pyomo/Pyomo) with any user-specified external function\n # libraries\n #\n if 'PYOMO_AMPLFUNC' in env:\n if 'AMPLFUNC' in env:\n env['AMPLFUNC'] += \"\\n\" + env['PYOMO_AMPLFUNC']\n else:\n env['AMPLFUNC'] = env['PYOMO_AMPLFUNC']\n\n cmd = [executable, problem_files[0], '-AMPL']\n if self._timer:\n cmd.insert(0, self._timer)\n\n env_opt = []\n of_opt = []\n ofn_option_used = False\n for key in self.options:\n if key == 'solver':\n continue\n elif key.startswith(\"OF_\"):\n assert len(key) > 3\n of_opt.append((key[3:], self.options[key]))\n else:\n if key == \"option_file_name\":\n ofn_option_used = True\n if isinstance(self.options[key], basestring) and ' ' in self.options[key]:\n env_opt.append(key+\"=\\\"\"+str(self.options[key])+\"\\\"\")\n cmd.append(str(key)+\"=\"+str(self.options[key]))\n else:\n env_opt.append(key+\"=\"+str(self.options[key]))\n cmd.append(str(key)+\"=\"+str(self.options[key]))\n\n if len(of_opt) > 0:\n # If the 'option_file_name' command-line option\n # was used, we don't know if we should overwrite,\n # merge it, or it is was a mistake, so raise an\n # exception. Maybe this can be changed.\n if ofn_option_used:\n raise ValueError(\n \"The 'option_file_name' command-line \"\n \"option for Ipopt can not be used \"\n \"when specifying options for the \"\n \"options file (i.e., options that \"\n \"start with 'OF_'\")\n\n # Now check if an 'ipopt.opt' file exists in the\n # current working directory. If so, we need to\n # make it clear that this file will be ignored.\n default_of_name = os.path.join(os.getcwd(), 'ipopt.opt')\n if os.path.exists(default_of_name):\n logger.warning(\"A file named '%s' exists in \"\n \"the current working directory, but \"\n \"Ipopt options file options (i.e., \"\n \"options that start with 'OF_') were \"\n \"provided. The options file '%s' will \"\n \"be ignored.\" % (default_of_name,\n default_of_name))\n\n # Now write the new options file\n options_filename = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix=\"_ipopt.opt\")\n with open(options_filename, \"w\") as f:\n for key, val in of_opt:\n f.write(key+\" \"+str(val)+\"\\n\")\n\n # Now set the command-line option telling Ipopt\n # to use this file\n env_opt.append('option_file_name=\"'+str(options_filename)+'\"')\n cmd.append('option_file_name='+str(options_filename))\n\n envstr = \"%s_options\" % self.options.solver\n # Merge with any options coming in through the environment\n env[envstr] = \" \".join(env_opt)\n\n return pyutilib.misc.Bunch(cmd=cmd, log_file=self._log_file, env=env)\n\n def process_output(self, rc):\n if os.path.exists(self._results_file):\n return super(IPOPT, self).process_output(rc)\n else:\n res = SolverResults()\n res.solver.status = SolverStatus.warning\n res.solver.termination_condition = TerminationCondition.other\n if os.path.exists(self._log_file):\n with open(self._log_file) as f:\n for line in f:\n if \"TOO_FEW_DEGREES_OF_FREEDOM\" in line:\n res.solver.message = line.split(':')[2].strip()\n assert \"degrees of freedom\" in res.solver.message\n return res\n\npyutilib.services.register_executable(name=\"ipopt\")\n", "path": "pyomo/solvers/plugins/solvers/IPOPT.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and \n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain \n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport os\n\nimport pyutilib.services\nimport pyutilib.misc\n\nimport pyomo.common.plugin\nfrom pyomo.opt.base import *\nfrom pyomo.opt.base.solvers import _extract_version\nfrom pyomo.opt.results import *\nfrom pyomo.opt.solver import *\n\nimport logging\nlogger = logging.getLogger('pyomo.solvers')\n\ntry:\n unicode\nexcept:\n basestring = str\n\nclass IPOPT(SystemCallSolver):\n \"\"\"\n An interface to the Ipopt optimizer that uses the AMPL Solver Library.\n \"\"\"\n\n pyomo.common.plugin.alias('ipopt', doc='The Ipopt NLP solver')\n\n def __init__(self, **kwds):\n #\n # Call base constructor\n #\n kwds[\"type\"] = \"ipopt\"\n super(IPOPT, self).__init__(**kwds)\n #\n # Setup valid problem formats, and valid results for each problem format\n # Also set the default problem and results formats.\n #\n self._valid_problem_formats=[ProblemFormat.nl]\n self._valid_result_formats = {}\n self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol]\n self.set_problem_format(ProblemFormat.nl)\n\n # Note: Undefined capabilities default to 'None'\n self._capabilities = pyutilib.misc.Options()\n self._capabilities.linear = True\n self._capabilities.integer = False\n self._capabilities.quadratic_objective = True\n self._capabilities.quadratic_constraint = True\n self._capabilities.sos1 = False\n self._capabilities.sos2 = False\n\n def _default_results_format(self, prob_format):\n return ResultsFormat.sol\n\n def _default_executable(self):\n executable = pyutilib.services.registered_executable(\"ipopt\")\n if executable is None:\n logger.warning(\"Could not locate the 'ipopt' executable, \"\n \"which is required for solver %s\" % self.name)\n self.enable = False\n return None\n return executable.get_path()\n\n def _get_version(self):\n \"\"\"\n Returns a tuple describing the solver executable version.\n \"\"\"\n solver_exec = self.executable()\n if solver_exec is None:\n return _extract_version('')\n results = pyutilib.subprocess.run( [solver_exec,\"-v\"], timelimit=1 )\n return _extract_version(results[1])\n\n def create_command_line(self, executable, problem_files):\n\n assert(self._problem_format == ProblemFormat.nl)\n assert(self._results_format == ResultsFormat.sol)\n\n #\n # Define log file\n #\n if self._log_file is None:\n self._log_file = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix=\"_ipopt.log\")\n\n fname = problem_files[0]\n if '.' in fname:\n tmp = fname.split('.')\n if len(tmp) > 2:\n fname = '.'.join(tmp[:-1])\n else:\n fname = tmp[0]\n self._soln_file = fname+\".sol\"\n\n #\n # Define results file (since an external parser is used)\n #\n self._results_file = self._soln_file\n\n #\n # Define command line\n #\n env=os.environ.copy()\n #\n # Merge the PYOMO_AMPLFUNC (externals defined within\n # Pyomo/Pyomo) with any user-specified external function\n # libraries\n #\n if 'PYOMO_AMPLFUNC' in env:\n if 'AMPLFUNC' in env:\n env['AMPLFUNC'] += \"\\n\" + env['PYOMO_AMPLFUNC']\n else:\n env['AMPLFUNC'] = env['PYOMO_AMPLFUNC']\n\n cmd = [executable, problem_files[0], '-AMPL']\n if self._timer:\n cmd.insert(0, self._timer)\n\n env_opt = []\n of_opt = []\n ofn_option_used = False\n for key in self.options:\n if key == 'solver':\n continue\n elif key.startswith(\"OF_\"):\n assert len(key) > 3\n of_opt.append((key[3:], self.options[key]))\n else:\n if key == \"option_file_name\":\n ofn_option_used = True\n if isinstance(self.options[key], basestring) and ' ' in self.options[key]:\n env_opt.append(key+\"=\\\"\"+str(self.options[key])+\"\\\"\")\n cmd.append(str(key)+\"=\"+str(self.options[key]))\n else:\n env_opt.append(key+\"=\"+str(self.options[key]))\n cmd.append(str(key)+\"=\"+str(self.options[key]))\n\n if len(of_opt) > 0:\n # If the 'option_file_name' command-line option\n # was used, we don't know if we should overwrite,\n # merge it, or it is was a mistake, so raise an\n # exception. Maybe this can be changed.\n if ofn_option_used:\n raise ValueError(\n \"The 'option_file_name' command-line \"\n \"option for Ipopt can not be used \"\n \"when specifying options for the \"\n \"options file (i.e., options that \"\n \"start with 'OF_'\")\n\n # Now check if an 'ipopt.opt' file exists in the\n # current working directory. If so, we need to\n # make it clear that this file will be ignored.\n default_of_name = os.path.join(os.getcwd(), 'ipopt.opt')\n if os.path.exists(default_of_name):\n logger.warning(\"A file named '%s' exists in \"\n \"the current working directory, but \"\n \"Ipopt options file options (i.e., \"\n \"options that start with 'OF_') were \"\n \"provided. The options file '%s' will \"\n \"be ignored.\" % (default_of_name,\n default_of_name))\n\n # Now write the new options file\n options_filename = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix=\"_ipopt.opt\")\n with open(options_filename, \"w\") as f:\n for key, val in of_opt:\n f.write(key+\" \"+str(val)+\"\\n\")\n\n # Now set the command-line option telling Ipopt\n # to use this file\n env_opt.append('option_file_name=\"'+str(options_filename)+'\"')\n cmd.append('option_file_name='+str(options_filename))\n\n envstr = \"%s_options\" % self.options.solver\n # Merge with any options coming in through the environment\n env[envstr] = \" \".join(env_opt)\n\n return pyutilib.misc.Bunch(cmd=cmd, log_file=self._log_file, env=env)\n\n def process_output(self, rc):\n if os.path.exists(self._results_file):\n return super(IPOPT, self).process_output(rc)\n else:\n res = SolverResults()\n res.solver.status = SolverStatus.warning\n res.solver.termination_condition = TerminationCondition.other\n if os.path.exists(self._log_file):\n with open(self._log_file) as f:\n for line in f:\n if \"TOO_FEW_DEGREES_OF_FREEDOM\" in line:\n res.solver.message = line.split(':')[2].strip()\n assert \"degrees of freedom\" in res.solver.message\n return res\n\npyutilib.services.register_executable(name=\"ipopt\")\n", "path": "pyomo/solvers/plugins/solvers/IPOPT.py"}]}
| 2,564 | 224 |
gh_patches_debug_32524
|
rasdani/github-patches
|
git_diff
|
pymodbus-dev__pymodbus-1189
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
REPL server missing serial configuration
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* check the discussions forum https://github.com/riptideio/pymodbus/discussions
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python: Python 3.9.2 (default, Mar 12 2021, 04:06:34)
[GCC 10.2.1 20210110] on linux
* OS: PRETTY_NAME="Raspbian GNU/Linux 11 (bullseye)"
NAME="Raspbian GNU/Linux"
VERSION_ID="11"
VERSION="11 (bullseye)"
VERSION_CODENAME=bullseye
ID=raspbian
ID_LIKE=debian
* Pymodbus: 3.0.2 REPL
* Modbus Hardware (if used): Serial USB Adapter
### Pymodbus Specific
* Server: rtu - sync/async
* Client: rtu - sync/async
### Description
I am using pymodbus REPL server and client for modbus rtu communication with two usb adapters. Unfortunately I wasn't able to configure another baudrate, parity, stop bits and other serial settings for the server. I checked the config of the serial port with the command stty. In my example the server wasn't set to 38400 baud. I was able to set it manually while running the server. The client settings are fine.
Are there command line parameters to set those properties? I haven't found them in the example videos, docs or code.
### Code and Logs
```python
pymodbus.server --verbose run -s serial -f rtu -p /dev/ttyUSB1 --baudrate 38400 -u 1 -r 2
pymodbus.console serial --method rtu --port /dev/ttyUSB0 --baudrate 38400
```
```
#serial settings and logs
pi@pi1:~/pymodbus-dev/pymodbus $ stty -F /dev/ttyUSB1 -a
#speed 9600 baud; rows 0; columns 0; line = 0;
#intr = ^C; quit = ^\; erase = ^?; kill = ^U; eof = ^D; eol = <undef>; eol2 = <undef>; #swtch = <undef>; start = ^Q; stop = ^S; susp = ^Z; rprnt = ^R; werase = ^W; lnext = #^V; discard = ^O; min = 0; time = 0;
#-parenb -parodd -cmspar cs8 hupcl -cstopb cread clocal -crtscts
#-ignbrk -brkint -ignpar -parmrk -inpck -istrip -inlcr -igncr -icrnl -ixon -ixoff -iuclc -ixany #-imaxbel -iutf8
#-opost -olcuc -ocrnl -onlcr -onocr -onlret -ofill -ofdel nl0 cr0 tab0 bs0 vt0 ff0
#-isig -icanon -iexten -echo -echoe -echok -echonl -noflsh -xcase -tostop -echoprt -#echoctl -echoke -flusho -extproc
#manual edit interface while running the server
stty -F /dev/ttyUSB1 38400
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymodbus/repl/server/main.py`
Content:
```
1 """Repl server main."""
2 from __future__ import annotations
3
4 import asyncio
5 import json
6 import logging
7 from enum import Enum
8 from pathlib import Path
9 from typing import List
10
11 import typer
12
13 from pymodbus.framer.socket_framer import ModbusSocketFramer
14 from pymodbus.repl.server.cli import run_repl
15 from pymodbus.server.reactive.default_config import DEFAULT_CONFIG
16 from pymodbus.server.reactive.main import (
17 DEFAULT_FRAMER,
18 DEFUALT_HANDLERS,
19 ReactiveServer,
20 )
21
22
23 CANCELLED_ERROR = asyncio.exceptions.CancelledError
24
25 _logger = logging.getLogger(__name__)
26
27 CONTEXT_SETTING = {"allow_extra_args": True, "ignore_unknown_options": True}
28
29 # TBD class ModbusServerConfig:
30
31
32 class ModbusServerTypes(str, Enum):
33 """Server types."""
34
35 # ["tcp", "serial", "tls", "udp"]
36 tcp = "tcp" # pylint: disable=invalid-name
37 serial = "serial" # pylint: disable=invalid-name
38 tls = "tls" # pylint: disable=invalid-name
39 udp = "udp" # pylint: disable=invalid-name
40
41
42 class ModbusFramerTypes(str, Enum):
43 """Framer types."""
44
45 # ["socket", "rtu", "tls", "ascii", "binary"]
46 socket = "socket" # pylint: disable=invalid-name
47 rtu = "rtu" # pylint: disable=invalid-name
48 tls = "tls" # pylint: disable=invalid-name
49 ascii = "ascii" # pylint: disable=invalid-name
50 binary = "binary" # pylint: disable=invalid-name
51
52
53 def _completer(incomplete: str, valid_values: List[str]) -> List[str]:
54 """Complete value."""
55 completion = []
56 for name in valid_values:
57 if name.startswith(incomplete):
58 completion.append(name)
59 return completion
60
61
62 def framers(incomplete: str) -> List[str]:
63 """Return an autocompleted list of supported clouds."""
64 _framers = ["socket", "rtu", "tls", "ascii", "binary"]
65 return _completer(incomplete, _framers)
66
67
68 def servers(incomplete: str) -> List[str]:
69 """Return an autocompleted list of supported clouds."""
70 _servers = ["tcp", "serial", "tls", "udp"]
71 return _completer(incomplete, _servers)
72
73
74 app = typer.Typer(
75 no_args_is_help=True,
76 context_settings=CONTEXT_SETTING,
77 help="Reactive modebus server",
78 )
79
80
81 @app.callback()
82 def server(
83 ctx: typer.Context,
84 host: str = typer.Option("localhost", "--host", help="Host address"),
85 web_port: int = typer.Option(8080, "--web-port", help="Web app port"),
86 broadcast_support: bool = typer.Option(
87 False, "-b", help="Support broadcast messages"
88 ),
89 repl: bool = typer.Option(True, help="Enable/Disable repl for server"),
90 verbose: bool = typer.Option(
91 False, help="Run with debug logs enabled for pymodbus"
92 ),
93 ):
94 """Run server code."""
95 FORMAT = ( # pylint: disable=invalid-name
96 "%(asctime)-15s %(threadName)-15s"
97 " %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s"
98 )
99 pymodbus_logger = logging.getLogger("pymodbus")
100 logging.basicConfig(format=FORMAT)
101 logger = logging.getLogger(__name__)
102 if verbose:
103 pymodbus_logger.setLevel(logging.DEBUG)
104 logger.setLevel(logging.DEBUG)
105 else:
106 pymodbus_logger.setLevel(logging.ERROR)
107 logger.setLevel(logging.ERROR)
108
109 ctx.obj = {
110 "repl": repl,
111 "host": host,
112 "web_port": web_port,
113 "broadcast": broadcast_support,
114 }
115
116
117 @app.command("run", context_settings=CONTEXT_SETTING)
118 def run(
119 ctx: typer.Context,
120 modbus_server: str = typer.Option(
121 ModbusServerTypes.tcp,
122 "--modbus-server",
123 "-s",
124 case_sensitive=False,
125 autocompletion=servers,
126 help="Modbus Server",
127 ),
128 modbus_framer: str = typer.Option(
129 ModbusFramerTypes.socket,
130 "--framer",
131 "-f",
132 case_sensitive=False,
133 autocompletion=framers,
134 help="Modbus framer to use",
135 ),
136 modbus_port: str = typer.Option("5020", "--modbus-port", "-p", help="Modbus port"),
137 modbus_unit_id: List[int] = typer.Option(
138 None, "--unit-id", "-u", help="Supported Modbus unit id's"
139 ),
140 modbus_config: Path = typer.Option(
141 None, help="Path to additional modbus server config"
142 ),
143 randomize: int = typer.Option(
144 0,
145 "--random",
146 "-r",
147 help="Randomize every `r` reads. 0=never, 1=always,2=every-second-read"
148 ", and so on. Applicable IR and DI.",
149 ),
150 ):
151 """Run Reactive Modbus server.
152
153 Exposing REST endpoint for response manipulation.
154 """
155 repl = ctx.obj.pop("repl")
156 # TBD extra_args = ctx.args
157 web_app_config = ctx.obj
158 loop = asyncio.get_event_loop()
159 framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)
160 if modbus_config:
161 with open(modbus_config) as my_file: # pylint: disable=unspecified-encoding
162 modbus_config = json.load(my_file)
163 else:
164 modbus_config = DEFAULT_CONFIG
165
166 data_block_settings = modbus_config.pop("data_block_settings", {})
167 modbus_config = modbus_config.get(modbus_server, {})
168 if modbus_server != "serial":
169 modbus_port = int(modbus_port)
170 handler = modbus_config.pop("handler", "ModbusConnectedRequestHandler")
171 else:
172 handler = modbus_config.pop("handler", "ModbusSingleRequestHandler")
173 handler = DEFUALT_HANDLERS.get(handler.strip())
174
175 modbus_config["handler"] = handler
176 modbus_config["randomize"] = randomize
177 app = ReactiveServer.factory(
178 modbus_server,
179 framer,
180 modbus_port=modbus_port,
181 unit=modbus_unit_id,
182 loop=loop,
183 single=False,
184 data_block_settings=data_block_settings,
185 **web_app_config,
186 **modbus_config,
187 )
188 try:
189 loop.run_until_complete(app.run_async(repl))
190 if repl:
191 loop.run_until_complete(run_repl(app))
192 loop.run_forever()
193
194 except CANCELLED_ERROR:
195 print("Done!!!!!")
196
197
198 if __name__ == "__main__":
199 app()
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pymodbus/repl/server/main.py b/pymodbus/repl/server/main.py
--- a/pymodbus/repl/server/main.py
+++ b/pymodbus/repl/server/main.py
@@ -4,6 +4,7 @@
import asyncio
import json
import logging
+import sys
from enum import Enum
from pathlib import Path
from typing import List
@@ -26,6 +27,7 @@
CONTEXT_SETTING = {"allow_extra_args": True, "ignore_unknown_options": True}
+
# TBD class ModbusServerConfig:
@@ -71,6 +73,23 @@
return _completer(incomplete, _servers)
+def process_extra_args(extra_args: list[str], modbus_config: dict) -> dict:
+ """Process extra args passed to server."""
+ options_stripped = [x.strip().replace("--", "") for x in extra_args[::2]]
+ extra_args = dict(list(zip(options_stripped, extra_args[1::2])))
+ for option, value in extra_args.items():
+ if option in modbus_config:
+ try:
+ modbus_config[option] = type(modbus_config[option])(value)
+ except ValueError as err:
+ msg = (
+ f"Error parsing extra arg {option}' " f"with value '{value}'. {err}"
+ )
+ _logger.error(msg)
+ sys.exit(1)
+ return modbus_config
+
+
app = typer.Typer(
no_args_is_help=True,
context_settings=CONTEXT_SETTING,
@@ -163,8 +182,10 @@
else:
modbus_config = DEFAULT_CONFIG
+ extra_args = ctx.args
data_block_settings = modbus_config.pop("data_block_settings", {})
modbus_config = modbus_config.get(modbus_server, {})
+ modbus_config = process_extra_args(extra_args, modbus_config)
if modbus_server != "serial":
modbus_port = int(modbus_port)
handler = modbus_config.pop("handler", "ModbusConnectedRequestHandler")
|
{"golden_diff": "diff --git a/pymodbus/repl/server/main.py b/pymodbus/repl/server/main.py\n--- a/pymodbus/repl/server/main.py\n+++ b/pymodbus/repl/server/main.py\n@@ -4,6 +4,7 @@\n import asyncio\n import json\n import logging\n+import sys\n from enum import Enum\n from pathlib import Path\n from typing import List\n@@ -26,6 +27,7 @@\n \n CONTEXT_SETTING = {\"allow_extra_args\": True, \"ignore_unknown_options\": True}\n \n+\n # TBD class ModbusServerConfig:\n \n \n@@ -71,6 +73,23 @@\n return _completer(incomplete, _servers)\n \n \n+def process_extra_args(extra_args: list[str], modbus_config: dict) -> dict:\n+ \"\"\"Process extra args passed to server.\"\"\"\n+ options_stripped = [x.strip().replace(\"--\", \"\") for x in extra_args[::2]]\n+ extra_args = dict(list(zip(options_stripped, extra_args[1::2])))\n+ for option, value in extra_args.items():\n+ if option in modbus_config:\n+ try:\n+ modbus_config[option] = type(modbus_config[option])(value)\n+ except ValueError as err:\n+ msg = (\n+ f\"Error parsing extra arg {option}' \" f\"with value '{value}'. {err}\"\n+ )\n+ _logger.error(msg)\n+ sys.exit(1)\n+ return modbus_config\n+\n+\n app = typer.Typer(\n no_args_is_help=True,\n context_settings=CONTEXT_SETTING,\n@@ -163,8 +182,10 @@\n else:\n modbus_config = DEFAULT_CONFIG\n \n+ extra_args = ctx.args\n data_block_settings = modbus_config.pop(\"data_block_settings\", {})\n modbus_config = modbus_config.get(modbus_server, {})\n+ modbus_config = process_extra_args(extra_args, modbus_config)\n if modbus_server != \"serial\":\n modbus_port = int(modbus_port)\n handler = modbus_config.pop(\"handler\", \"ModbusConnectedRequestHandler\")\n", "issue": "REPL server missing serial configuration\n<!--\r\nPlease use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for\r\nsupport questions.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues\r\n * check the discussions forum https://github.com/riptideio/pymodbus/discussions\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: Python 3.9.2 (default, Mar 12 2021, 04:06:34)\r\n[GCC 10.2.1 20210110] on linux\r\n* OS: PRETTY_NAME=\"Raspbian GNU/Linux 11 (bullseye)\"\r\nNAME=\"Raspbian GNU/Linux\"\r\nVERSION_ID=\"11\"\r\nVERSION=\"11 (bullseye)\"\r\nVERSION_CODENAME=bullseye\r\nID=raspbian\r\nID_LIKE=debian\r\n* Pymodbus: 3.0.2 REPL\r\n* Modbus Hardware (if used): Serial USB Adapter\r\n\r\n### Pymodbus Specific\r\n* Server: rtu - sync/async\r\n* Client: rtu - sync/async\r\n\r\n### Description\r\n\r\nI am using pymodbus REPL server and client for modbus rtu communication with two usb adapters. Unfortunately I wasn't able to configure another baudrate, parity, stop bits and other serial settings for the server. I checked the config of the serial port with the command stty. In my example the server wasn't set to 38400 baud. I was able to set it manually while running the server. The client settings are fine.\r\n\r\nAre there command line parameters to set those properties? I haven't found them in the example videos, docs or code. \r\n\r\n### Code and Logs\r\n\r\n```python\r\npymodbus.server --verbose run -s serial -f rtu -p /dev/ttyUSB1 --baudrate 38400 -u 1 -r 2\r\n\r\npymodbus.console serial --method rtu --port /dev/ttyUSB0 --baudrate 38400\r\n```\r\n```\r\n#serial settings and logs\r\npi@pi1:~/pymodbus-dev/pymodbus $ stty -F /dev/ttyUSB1 -a\r\n#speed 9600 baud; rows 0; columns 0; line = 0;\r\n#intr = ^C; quit = ^\\; erase = ^?; kill = ^U; eof = ^D; eol = <undef>; eol2 = <undef>; #swtch = <undef>; start = ^Q; stop = ^S; susp = ^Z; rprnt = ^R; werase = ^W; lnext = #^V; discard = ^O; min = 0; time = 0;\r\n#-parenb -parodd -cmspar cs8 hupcl -cstopb cread clocal -crtscts\r\n#-ignbrk -brkint -ignpar -parmrk -inpck -istrip -inlcr -igncr -icrnl -ixon -ixoff -iuclc -ixany #-imaxbel -iutf8\r\n#-opost -olcuc -ocrnl -onlcr -onocr -onlret -ofill -ofdel nl0 cr0 tab0 bs0 vt0 ff0\r\n#-isig -icanon -iexten -echo -echoe -echok -echonl -noflsh -xcase -tostop -echoprt -#echoctl -echoke -flusho -extproc\r\n\r\n#manual edit interface while running the server\r\nstty -F /dev/ttyUSB1 38400\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Repl server main.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport logging\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List\n\nimport typer\n\nfrom pymodbus.framer.socket_framer import ModbusSocketFramer\nfrom pymodbus.repl.server.cli import run_repl\nfrom pymodbus.server.reactive.default_config import DEFAULT_CONFIG\nfrom pymodbus.server.reactive.main import (\n DEFAULT_FRAMER,\n DEFUALT_HANDLERS,\n ReactiveServer,\n)\n\n\nCANCELLED_ERROR = asyncio.exceptions.CancelledError\n\n_logger = logging.getLogger(__name__)\n\nCONTEXT_SETTING = {\"allow_extra_args\": True, \"ignore_unknown_options\": True}\n\n# TBD class ModbusServerConfig:\n\n\nclass ModbusServerTypes(str, Enum):\n \"\"\"Server types.\"\"\"\n\n # [\"tcp\", \"serial\", \"tls\", \"udp\"]\n tcp = \"tcp\" # pylint: disable=invalid-name\n serial = \"serial\" # pylint: disable=invalid-name\n tls = \"tls\" # pylint: disable=invalid-name\n udp = \"udp\" # pylint: disable=invalid-name\n\n\nclass ModbusFramerTypes(str, Enum):\n \"\"\"Framer types.\"\"\"\n\n # [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"]\n socket = \"socket\" # pylint: disable=invalid-name\n rtu = \"rtu\" # pylint: disable=invalid-name\n tls = \"tls\" # pylint: disable=invalid-name\n ascii = \"ascii\" # pylint: disable=invalid-name\n binary = \"binary\" # pylint: disable=invalid-name\n\n\ndef _completer(incomplete: str, valid_values: List[str]) -> List[str]:\n \"\"\"Complete value.\"\"\"\n completion = []\n for name in valid_values:\n if name.startswith(incomplete):\n completion.append(name)\n return completion\n\n\ndef framers(incomplete: str) -> List[str]:\n \"\"\"Return an autocompleted list of supported clouds.\"\"\"\n _framers = [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"]\n return _completer(incomplete, _framers)\n\n\ndef servers(incomplete: str) -> List[str]:\n \"\"\"Return an autocompleted list of supported clouds.\"\"\"\n _servers = [\"tcp\", \"serial\", \"tls\", \"udp\"]\n return _completer(incomplete, _servers)\n\n\napp = typer.Typer(\n no_args_is_help=True,\n context_settings=CONTEXT_SETTING,\n help=\"Reactive modebus server\",\n)\n\n\[email protected]()\ndef server(\n ctx: typer.Context,\n host: str = typer.Option(\"localhost\", \"--host\", help=\"Host address\"),\n web_port: int = typer.Option(8080, \"--web-port\", help=\"Web app port\"),\n broadcast_support: bool = typer.Option(\n False, \"-b\", help=\"Support broadcast messages\"\n ),\n repl: bool = typer.Option(True, help=\"Enable/Disable repl for server\"),\n verbose: bool = typer.Option(\n False, help=\"Run with debug logs enabled for pymodbus\"\n ),\n):\n \"\"\"Run server code.\"\"\"\n FORMAT = ( # pylint: disable=invalid-name\n \"%(asctime)-15s %(threadName)-15s\"\n \" %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s\"\n )\n pymodbus_logger = logging.getLogger(\"pymodbus\")\n logging.basicConfig(format=FORMAT)\n logger = logging.getLogger(__name__)\n if verbose:\n pymodbus_logger.setLevel(logging.DEBUG)\n logger.setLevel(logging.DEBUG)\n else:\n pymodbus_logger.setLevel(logging.ERROR)\n logger.setLevel(logging.ERROR)\n\n ctx.obj = {\n \"repl\": repl,\n \"host\": host,\n \"web_port\": web_port,\n \"broadcast\": broadcast_support,\n }\n\n\[email protected](\"run\", context_settings=CONTEXT_SETTING)\ndef run(\n ctx: typer.Context,\n modbus_server: str = typer.Option(\n ModbusServerTypes.tcp,\n \"--modbus-server\",\n \"-s\",\n case_sensitive=False,\n autocompletion=servers,\n help=\"Modbus Server\",\n ),\n modbus_framer: str = typer.Option(\n ModbusFramerTypes.socket,\n \"--framer\",\n \"-f\",\n case_sensitive=False,\n autocompletion=framers,\n help=\"Modbus framer to use\",\n ),\n modbus_port: str = typer.Option(\"5020\", \"--modbus-port\", \"-p\", help=\"Modbus port\"),\n modbus_unit_id: List[int] = typer.Option(\n None, \"--unit-id\", \"-u\", help=\"Supported Modbus unit id's\"\n ),\n modbus_config: Path = typer.Option(\n None, help=\"Path to additional modbus server config\"\n ),\n randomize: int = typer.Option(\n 0,\n \"--random\",\n \"-r\",\n help=\"Randomize every `r` reads. 0=never, 1=always,2=every-second-read\"\n \", and so on. Applicable IR and DI.\",\n ),\n):\n \"\"\"Run Reactive Modbus server.\n\n Exposing REST endpoint for response manipulation.\n \"\"\"\n repl = ctx.obj.pop(\"repl\")\n # TBD extra_args = ctx.args\n web_app_config = ctx.obj\n loop = asyncio.get_event_loop()\n framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)\n if modbus_config:\n with open(modbus_config) as my_file: # pylint: disable=unspecified-encoding\n modbus_config = json.load(my_file)\n else:\n modbus_config = DEFAULT_CONFIG\n\n data_block_settings = modbus_config.pop(\"data_block_settings\", {})\n modbus_config = modbus_config.get(modbus_server, {})\n if modbus_server != \"serial\":\n modbus_port = int(modbus_port)\n handler = modbus_config.pop(\"handler\", \"ModbusConnectedRequestHandler\")\n else:\n handler = modbus_config.pop(\"handler\", \"ModbusSingleRequestHandler\")\n handler = DEFUALT_HANDLERS.get(handler.strip())\n\n modbus_config[\"handler\"] = handler\n modbus_config[\"randomize\"] = randomize\n app = ReactiveServer.factory(\n modbus_server,\n framer,\n modbus_port=modbus_port,\n unit=modbus_unit_id,\n loop=loop,\n single=False,\n data_block_settings=data_block_settings,\n **web_app_config,\n **modbus_config,\n )\n try:\n loop.run_until_complete(app.run_async(repl))\n if repl:\n loop.run_until_complete(run_repl(app))\n loop.run_forever()\n\n except CANCELLED_ERROR:\n print(\"Done!!!!!\")\n\n\nif __name__ == \"__main__\":\n app()\n", "path": "pymodbus/repl/server/main.py"}], "after_files": [{"content": "\"\"\"Repl server main.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport logging\nimport sys\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List\n\nimport typer\n\nfrom pymodbus.framer.socket_framer import ModbusSocketFramer\nfrom pymodbus.repl.server.cli import run_repl\nfrom pymodbus.server.reactive.default_config import DEFAULT_CONFIG\nfrom pymodbus.server.reactive.main import (\n DEFAULT_FRAMER,\n DEFUALT_HANDLERS,\n ReactiveServer,\n)\n\n\nCANCELLED_ERROR = asyncio.exceptions.CancelledError\n\n_logger = logging.getLogger(__name__)\n\nCONTEXT_SETTING = {\"allow_extra_args\": True, \"ignore_unknown_options\": True}\n\n\n# TBD class ModbusServerConfig:\n\n\nclass ModbusServerTypes(str, Enum):\n \"\"\"Server types.\"\"\"\n\n # [\"tcp\", \"serial\", \"tls\", \"udp\"]\n tcp = \"tcp\" # pylint: disable=invalid-name\n serial = \"serial\" # pylint: disable=invalid-name\n tls = \"tls\" # pylint: disable=invalid-name\n udp = \"udp\" # pylint: disable=invalid-name\n\n\nclass ModbusFramerTypes(str, Enum):\n \"\"\"Framer types.\"\"\"\n\n # [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"]\n socket = \"socket\" # pylint: disable=invalid-name\n rtu = \"rtu\" # pylint: disable=invalid-name\n tls = \"tls\" # pylint: disable=invalid-name\n ascii = \"ascii\" # pylint: disable=invalid-name\n binary = \"binary\" # pylint: disable=invalid-name\n\n\ndef _completer(incomplete: str, valid_values: List[str]) -> List[str]:\n \"\"\"Complete value.\"\"\"\n completion = []\n for name in valid_values:\n if name.startswith(incomplete):\n completion.append(name)\n return completion\n\n\ndef framers(incomplete: str) -> List[str]:\n \"\"\"Return an autocompleted list of supported clouds.\"\"\"\n _framers = [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"]\n return _completer(incomplete, _framers)\n\n\ndef servers(incomplete: str) -> List[str]:\n \"\"\"Return an autocompleted list of supported clouds.\"\"\"\n _servers = [\"tcp\", \"serial\", \"tls\", \"udp\"]\n return _completer(incomplete, _servers)\n\n\ndef process_extra_args(extra_args: list[str], modbus_config: dict) -> dict:\n \"\"\"Process extra args passed to server.\"\"\"\n options_stripped = [x.strip().replace(\"--\", \"\") for x in extra_args[::2]]\n extra_args = dict(list(zip(options_stripped, extra_args[1::2])))\n for option, value in extra_args.items():\n if option in modbus_config:\n try:\n modbus_config[option] = type(modbus_config[option])(value)\n except ValueError as err:\n msg = (\n f\"Error parsing extra arg {option}' \" f\"with value '{value}'. {err}\"\n )\n _logger.error(msg)\n sys.exit(1)\n return modbus_config\n\n\napp = typer.Typer(\n no_args_is_help=True,\n context_settings=CONTEXT_SETTING,\n help=\"Reactive modebus server\",\n)\n\n\[email protected]()\ndef server(\n ctx: typer.Context,\n host: str = typer.Option(\"localhost\", \"--host\", help=\"Host address\"),\n web_port: int = typer.Option(8080, \"--web-port\", help=\"Web app port\"),\n broadcast_support: bool = typer.Option(\n False, \"-b\", help=\"Support broadcast messages\"\n ),\n repl: bool = typer.Option(True, help=\"Enable/Disable repl for server\"),\n verbose: bool = typer.Option(\n False, help=\"Run with debug logs enabled for pymodbus\"\n ),\n):\n \"\"\"Run server code.\"\"\"\n FORMAT = ( # pylint: disable=invalid-name\n \"%(asctime)-15s %(threadName)-15s\"\n \" %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s\"\n )\n pymodbus_logger = logging.getLogger(\"pymodbus\")\n logging.basicConfig(format=FORMAT)\n logger = logging.getLogger(__name__)\n if verbose:\n pymodbus_logger.setLevel(logging.DEBUG)\n logger.setLevel(logging.DEBUG)\n else:\n pymodbus_logger.setLevel(logging.ERROR)\n logger.setLevel(logging.ERROR)\n\n ctx.obj = {\n \"repl\": repl,\n \"host\": host,\n \"web_port\": web_port,\n \"broadcast\": broadcast_support,\n }\n\n\[email protected](\"run\", context_settings=CONTEXT_SETTING)\ndef run(\n ctx: typer.Context,\n modbus_server: str = typer.Option(\n ModbusServerTypes.tcp,\n \"--modbus-server\",\n \"-s\",\n case_sensitive=False,\n autocompletion=servers,\n help=\"Modbus Server\",\n ),\n modbus_framer: str = typer.Option(\n ModbusFramerTypes.socket,\n \"--framer\",\n \"-f\",\n case_sensitive=False,\n autocompletion=framers,\n help=\"Modbus framer to use\",\n ),\n modbus_port: str = typer.Option(\"5020\", \"--modbus-port\", \"-p\", help=\"Modbus port\"),\n modbus_unit_id: List[int] = typer.Option(\n None, \"--unit-id\", \"-u\", help=\"Supported Modbus unit id's\"\n ),\n modbus_config: Path = typer.Option(\n None, help=\"Path to additional modbus server config\"\n ),\n randomize: int = typer.Option(\n 0,\n \"--random\",\n \"-r\",\n help=\"Randomize every `r` reads. 0=never, 1=always,2=every-second-read\"\n \", and so on. Applicable IR and DI.\",\n ),\n):\n \"\"\"Run Reactive Modbus server.\n\n Exposing REST endpoint for response manipulation.\n \"\"\"\n repl = ctx.obj.pop(\"repl\")\n # TBD extra_args = ctx.args\n web_app_config = ctx.obj\n loop = asyncio.get_event_loop()\n framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)\n if modbus_config:\n with open(modbus_config) as my_file: # pylint: disable=unspecified-encoding\n modbus_config = json.load(my_file)\n else:\n modbus_config = DEFAULT_CONFIG\n\n extra_args = ctx.args\n data_block_settings = modbus_config.pop(\"data_block_settings\", {})\n modbus_config = modbus_config.get(modbus_server, {})\n modbus_config = process_extra_args(extra_args, modbus_config)\n if modbus_server != \"serial\":\n modbus_port = int(modbus_port)\n handler = modbus_config.pop(\"handler\", \"ModbusConnectedRequestHandler\")\n else:\n handler = modbus_config.pop(\"handler\", \"ModbusSingleRequestHandler\")\n handler = DEFUALT_HANDLERS.get(handler.strip())\n\n modbus_config[\"handler\"] = handler\n modbus_config[\"randomize\"] = randomize\n app = ReactiveServer.factory(\n modbus_server,\n framer,\n modbus_port=modbus_port,\n unit=modbus_unit_id,\n loop=loop,\n single=False,\n data_block_settings=data_block_settings,\n **web_app_config,\n **modbus_config,\n )\n try:\n loop.run_until_complete(app.run_async(repl))\n if repl:\n loop.run_until_complete(run_repl(app))\n loop.run_forever()\n\n except CANCELLED_ERROR:\n print(\"Done!!!!!\")\n\n\nif __name__ == \"__main__\":\n app()\n", "path": "pymodbus/repl/server/main.py"}]}
| 3,144 | 465 |
gh_patches_debug_4743
|
rasdani/github-patches
|
git_diff
|
netket__netket-1112
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade `flakehell` in the pre-commit hook
It seems that `flakehell` is not actively maintained, and it is incompatible with `flake8 4.x` released in last October (see flakehell/flakehell#22). That issue is not resolved after a few months. If a new developer of NetKet runs `pre-commit install-hooks`, it will just fail.
We may use [this fix](https://github.com/flakehell/flakehell/pull/23#issuecomment-985879201), or change it to [flakeheaven](https://github.com/flakeheaven/flakeheaven) which seems more actively maintained.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 DEV_DEPENDENCIES = [
4 "pytest>=6",
5 "pytest-xdist>=2",
6 "coverage>=5",
7 "pytest-cov>=2.10.1",
8 "networkx~=2.4",
9 "flaky>=3.7",
10 "pre-commit",
11 "black==22.1.0",
12 "flakehell>=0.9",
13 ]
14 MPI_DEPENDENCIES = ["mpi4py>=3.0.1, <4", "mpi4jax~=0.3.1"]
15 EXTRA_DEPENDENCIES = ["tensorboardx>=2.0.0", "openfermion>=1.0.0"]
16 BASE_DEPENDENCIES = [
17 "numpy~=1.18",
18 "scipy>=1.5.3, <2",
19 "tqdm~=4.60",
20 "plum-dispatch~=1.5.1",
21 "numba>=0.52, <0.56",
22 "igraph~=0.9.8",
23 "jax>=0.2.23, <0.4",
24 "jaxlib>=0.1.69",
25 "flax>=0.3.5, <0.5",
26 "orjson~=3.4",
27 "optax>=0.1.1, <0.2",
28 "numba4jax>=0.0.3, <0.1",
29 ]
30
31 setup(
32 name="netket",
33 author="Giuseppe Carleo et al.",
34 url="http://github.com/netket/netket",
35 author_email="[email protected]",
36 license="Apache 2.0",
37 description="Netket : Machine Learning techniques for many-body quantum systems.",
38 long_description="""NetKet is an open-source project delivering cutting-edge
39 methods for the study of many-body quantum systems with artificial
40 neural networks and machine learning techniques.""",
41 classifiers=[
42 "Programming Language :: Python :: 3",
43 "Development Status :: 5 - Production/Stable",
44 "Intended Audience :: Science/Research",
45 "License :: OSI Approved :: Apache Software License",
46 "Operating System :: MacOS :: MacOS X",
47 "Operating System :: POSIX :: Linux",
48 "Operating System :: Unix",
49 "Topic :: Scientific/Engineering :: Physics",
50 ],
51 packages=find_packages(),
52 install_requires=BASE_DEPENDENCIES,
53 python_requires=">=3.7",
54 extras_require={
55 "dev": DEV_DEPENDENCIES,
56 "mpi": MPI_DEPENDENCIES,
57 "extra": EXTRA_DEPENDENCIES,
58 "all": MPI_DEPENDENCIES + DEV_DEPENDENCIES + EXTRA_DEPENDENCIES,
59 },
60 )
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,10 +6,9 @@
"coverage>=5",
"pytest-cov>=2.10.1",
"networkx~=2.4",
- "flaky>=3.7",
- "pre-commit",
+ "pre-commit>=2.7",
"black==22.1.0",
- "flakehell>=0.9",
+ "flake8==4.0.1",
]
MPI_DEPENDENCIES = ["mpi4py>=3.0.1, <4", "mpi4jax~=0.3.1"]
EXTRA_DEPENDENCIES = ["tensorboardx>=2.0.0", "openfermion>=1.0.0"]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,10 +6,9 @@\n \"coverage>=5\",\n \"pytest-cov>=2.10.1\",\n \"networkx~=2.4\",\n- \"flaky>=3.7\",\n- \"pre-commit\",\n+ \"pre-commit>=2.7\",\n \"black==22.1.0\",\n- \"flakehell>=0.9\",\n+ \"flake8==4.0.1\",\n ]\n MPI_DEPENDENCIES = [\"mpi4py>=3.0.1, <4\", \"mpi4jax~=0.3.1\"]\n EXTRA_DEPENDENCIES = [\"tensorboardx>=2.0.0\", \"openfermion>=1.0.0\"]\n", "issue": "Upgrade `flakehell` in the pre-commit hook\nIt seems that `flakehell` is not actively maintained, and it is incompatible with `flake8 4.x` released in last October (see flakehell/flakehell#22). That issue is not resolved after a few months. If a new developer of NetKet runs `pre-commit install-hooks`, it will just fail.\r\n\r\nWe may use [this fix](https://github.com/flakehell/flakehell/pull/23#issuecomment-985879201), or change it to [flakeheaven](https://github.com/flakeheaven/flakeheaven) which seems more actively maintained.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nDEV_DEPENDENCIES = [\n \"pytest>=6\",\n \"pytest-xdist>=2\",\n \"coverage>=5\",\n \"pytest-cov>=2.10.1\",\n \"networkx~=2.4\",\n \"flaky>=3.7\",\n \"pre-commit\",\n \"black==22.1.0\",\n \"flakehell>=0.9\",\n]\nMPI_DEPENDENCIES = [\"mpi4py>=3.0.1, <4\", \"mpi4jax~=0.3.1\"]\nEXTRA_DEPENDENCIES = [\"tensorboardx>=2.0.0\", \"openfermion>=1.0.0\"]\nBASE_DEPENDENCIES = [\n \"numpy~=1.18\",\n \"scipy>=1.5.3, <2\",\n \"tqdm~=4.60\",\n \"plum-dispatch~=1.5.1\",\n \"numba>=0.52, <0.56\",\n \"igraph~=0.9.8\",\n \"jax>=0.2.23, <0.4\",\n \"jaxlib>=0.1.69\",\n \"flax>=0.3.5, <0.5\",\n \"orjson~=3.4\",\n \"optax>=0.1.1, <0.2\",\n \"numba4jax>=0.0.3, <0.1\",\n]\n\nsetup(\n name=\"netket\",\n author=\"Giuseppe Carleo et al.\",\n url=\"http://github.com/netket/netket\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n description=\"Netket : Machine Learning techniques for many-body quantum systems.\",\n long_description=\"\"\"NetKet is an open-source project delivering cutting-edge\n methods for the study of many-body quantum systems with artificial\n neural networks and machine learning techniques.\"\"\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Unix\",\n \"Topic :: Scientific/Engineering :: Physics\",\n ],\n packages=find_packages(),\n install_requires=BASE_DEPENDENCIES,\n python_requires=\">=3.7\",\n extras_require={\n \"dev\": DEV_DEPENDENCIES,\n \"mpi\": MPI_DEPENDENCIES,\n \"extra\": EXTRA_DEPENDENCIES,\n \"all\": MPI_DEPENDENCIES + DEV_DEPENDENCIES + EXTRA_DEPENDENCIES,\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nDEV_DEPENDENCIES = [\n \"pytest>=6\",\n \"pytest-xdist>=2\",\n \"coverage>=5\",\n \"pytest-cov>=2.10.1\",\n \"networkx~=2.4\",\n \"pre-commit>=2.7\",\n \"black==22.1.0\",\n \"flake8==4.0.1\",\n]\nMPI_DEPENDENCIES = [\"mpi4py>=3.0.1, <4\", \"mpi4jax~=0.3.1\"]\nEXTRA_DEPENDENCIES = [\"tensorboardx>=2.0.0\", \"openfermion>=1.0.0\"]\nBASE_DEPENDENCIES = [\n \"numpy~=1.18\",\n \"scipy>=1.5.3, <2\",\n \"tqdm~=4.60\",\n \"plum-dispatch~=1.5.1\",\n \"numba>=0.52, <0.56\",\n \"igraph~=0.9.8\",\n \"jax>=0.2.23, <0.4\",\n \"jaxlib>=0.1.69\",\n \"flax>=0.3.5, <0.5\",\n \"orjson~=3.4\",\n \"optax>=0.1.1, <0.2\",\n \"numba4jax>=0.0.3, <0.1\",\n]\n\nsetup(\n name=\"netket\",\n author=\"Giuseppe Carleo et al.\",\n url=\"http://github.com/netket/netket\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n description=\"Netket : Machine Learning techniques for many-body quantum systems.\",\n long_description=\"\"\"NetKet is an open-source project delivering cutting-edge\n methods for the study of many-body quantum systems with artificial\n neural networks and machine learning techniques.\"\"\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Unix\",\n \"Topic :: Scientific/Engineering :: Physics\",\n ],\n packages=find_packages(),\n install_requires=BASE_DEPENDENCIES,\n python_requires=\">=3.7\",\n extras_require={\n \"dev\": DEV_DEPENDENCIES,\n \"mpi\": MPI_DEPENDENCIES,\n \"extra\": EXTRA_DEPENDENCIES,\n \"all\": MPI_DEPENDENCIES + DEV_DEPENDENCIES + EXTRA_DEPENDENCIES,\n },\n)\n", "path": "setup.py"}]}
| 1,084 | 175 |
gh_patches_debug_12294
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-2757
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
console: shift+tab is broken in WSL for some Windows terminals
See https://github.com/Microsoft/WSL/issues/1770. This seems to affect some but not all Windows terminals.
We use shift+tab by default to switch to the next pane in the console app. We can:
- Say that this is not our problem, and wait for upstream to fix it.
- Find a different binding for next pane - which would be a shame, because shit+tab is very natural.
@mhils what say you?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/tools/console/defaultkeys.py`
Content:
```
1
2 def map(km):
3 km.add(":", "console.command ", ["global"], "Command prompt")
4 km.add("?", "console.view.help", ["global"], "View help")
5 km.add("B", "browser.start", ["global"], "Start an attached browser")
6 km.add("C", "console.view.commands", ["global"], "View commands")
7 km.add("K", "console.view.keybindings", ["global"], "View key bindings")
8 km.add("O", "console.view.options", ["global"], "View options")
9 km.add("E", "console.view.eventlog", ["global"], "View event log")
10 km.add("Q", "console.exit", ["global"], "Exit immediately")
11 km.add("q", "console.view.pop", ["global"], "Exit the current view")
12 km.add("-", "console.layout.cycle", ["global"], "Cycle to next layout")
13 km.add("shift tab", "console.panes.next", ["global"], "Focus next layout pane")
14 km.add("P", "console.view.flow @focus", ["global"], "View flow details")
15
16 km.add("g", "console.nav.start", ["global"], "Go to start")
17 km.add("G", "console.nav.end", ["global"], "Go to end")
18 km.add("k", "console.nav.up", ["global"], "Up")
19 km.add("j", "console.nav.down", ["global"], "Down")
20 km.add("l", "console.nav.right", ["global"], "Right")
21 km.add("h", "console.nav.left", ["global"], "Left")
22 km.add("tab", "console.nav.next", ["global"], "Next")
23 km.add("enter", "console.nav.select", ["global"], "Select")
24 km.add("space", "console.nav.pagedown", ["global"], "Page down")
25 km.add("ctrl f", "console.nav.pagedown", ["global"], "Page down")
26 km.add("ctrl b", "console.nav.pageup", ["global"], "Page up")
27
28 km.add("I", "console.intercept.toggle", ["global"], "Toggle intercept")
29 km.add("i", "console.command.set intercept", ["global"], "Set intercept")
30 km.add("W", "console.command.set save_stream_file", ["global"], "Stream to file")
31 km.add("A", "flow.resume @all", ["flowlist", "flowview"], "Resume all intercepted flows")
32 km.add("a", "flow.resume @focus", ["flowlist", "flowview"], "Resume this intercepted flow")
33 km.add(
34 "b", "console.command cut.save @focus response.content ",
35 ["flowlist", "flowview"],
36 "Save response body to file"
37 )
38 km.add("d", "view.remove @focus", ["flowlist", "flowview"], "Delete flow from view")
39 km.add("D", "view.duplicate @focus", ["flowlist", "flowview"], "Duplicate flow")
40 km.add(
41 "e",
42 """
43 console.choose.cmd Format export.formats
44 console.command export.file {choice} @focus
45 """,
46 ["flowlist", "flowview"],
47 "Export this flow to file"
48 )
49 km.add("f", "console.command.set view_filter", ["flowlist"], "Set view filter")
50 km.add("F", "set console_focus_follow=toggle", ["flowlist"], "Set focus follow")
51 km.add(
52 "ctrl l",
53 "console.command cut.clip ",
54 ["flowlist", "flowview"],
55 "Send cuts to clipboard"
56 )
57 km.add("L", "console.command view.load ", ["flowlist"], "Load flows from file")
58 km.add("m", "flow.mark.toggle @focus", ["flowlist"], "Toggle mark on this flow")
59 km.add("M", "view.marked.toggle", ["flowlist"], "Toggle viewing marked flows")
60 km.add(
61 "n",
62 "console.command view.create get https://example.com/",
63 ["flowlist"],
64 "Create a new flow"
65 )
66 km.add(
67 "o",
68 """
69 console.choose.cmd Order view.order.options
70 set view_order={choice}
71 """,
72 ["flowlist"],
73 "Set flow list order"
74 )
75 km.add("r", "replay.client @focus", ["flowlist", "flowview"], "Replay this flow")
76 km.add("S", "console.command replay.server ", ["flowlist"], "Start server replay")
77 km.add("v", "set view_order_reversed=toggle", ["flowlist"], "Reverse flow list order")
78 km.add("U", "flow.mark @all false", ["flowlist"], "Un-set all marks")
79 km.add("w", "console.command save.file @shown ", ["flowlist"], "Save listed flows to file")
80 km.add("V", "flow.revert @focus", ["flowlist", "flowview"], "Revert changes to this flow")
81 km.add("X", "flow.kill @focus", ["flowlist"], "Kill this flow")
82 km.add("z", "view.remove @all", ["flowlist"], "Clear flow list")
83 km.add("Z", "view.remove @hidden", ["flowlist"], "Purge all flows not showing")
84 km.add(
85 "|",
86 "console.command script.run @focus ",
87 ["flowlist", "flowview"],
88 "Run a script on this flow"
89 )
90
91 km.add(
92 "e",
93 """
94 console.choose.cmd Part console.edit.focus.options
95 console.edit.focus {choice}
96 """,
97 ["flowview"],
98 "Edit a flow component"
99 )
100 km.add(
101 "f",
102 "view.setval.toggle @focus fullcontents",
103 ["flowview"],
104 "Toggle viewing full contents on this flow",
105 )
106 km.add("w", "console.command save.file @focus ", ["flowview"], "Save flow to file")
107 km.add("space", "view.focus.next", ["flowview"], "Go to next flow")
108
109 km.add(
110 "v",
111 """
112 console.choose "View Part" request,response
113 console.bodyview @focus {choice}
114 """,
115 ["flowview"],
116 "View flow body in an external viewer"
117 )
118 km.add("p", "view.focus.prev", ["flowview"], "Go to previous flow")
119 km.add(
120 "m",
121 """
122 console.choose.cmd Mode console.flowview.mode.options
123 console.flowview.mode.set {choice}
124 """,
125 ["flowview"],
126 "Set flow view mode"
127 )
128 km.add(
129 "z",
130 """
131 console.choose "Part" request,response
132 flow.encode.toggle @focus {choice}
133 """,
134 ["flowview"],
135 "Encode/decode flow body"
136 )
137
138 km.add("L", "console.command options.load ", ["options"], "Load from file")
139 km.add("S", "console.command options.save ", ["options"], "Save to file")
140 km.add("D", "options.reset", ["options"], "Reset all options")
141 km.add("d", "console.options.reset.focus", ["options"], "Reset this option")
142
143 km.add("a", "console.grideditor.add", ["grideditor"], "Add a row after cursor")
144 km.add("A", "console.grideditor.insert", ["grideditor"], "Insert a row before cursor")
145 km.add("d", "console.grideditor.delete", ["grideditor"], "Delete this row")
146 km.add(
147 "r",
148 "console.command console.grideditor.load",
149 ["grideditor"],
150 "Read unescaped data into the current cell from file"
151 )
152 km.add(
153 "R",
154 "console.command console.grideditor.load_escaped",
155 ["grideditor"],
156 "Load a Python-style escaped string into the current cell from file"
157 )
158 km.add("e", "console.grideditor.editor", ["grideditor"], "Edit in external editor")
159 km.add(
160 "w",
161 "console.command console.grideditor.save ",
162 ["grideditor"],
163 "Save data to file as CSV"
164 )
165
166 km.add("z", "eventstore.clear", ["eventlog"], "Clear")
167
168 km.add(
169 "a",
170 """
171 console.choose.cmd "Context" console.key.contexts
172 console.command console.key.bind {choice}
173 """,
174 ["keybindings"],
175 "Add a key binding"
176 )
177 km.add(
178 "d",
179 "console.key.unbind.focus",
180 ["keybindings"],
181 "Unbind the currently focused key binding"
182 )
183 km.add(
184 "x",
185 "console.key.execute.focus",
186 ["keybindings"],
187 "Execute the currently focused key binding"
188 )
189 km.add(
190 "enter",
191 "console.key.edit.focus",
192 ["keybindings"],
193 "Edit the currently focused key binding"
194 )
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/tools/console/defaultkeys.py b/mitmproxy/tools/console/defaultkeys.py
--- a/mitmproxy/tools/console/defaultkeys.py
+++ b/mitmproxy/tools/console/defaultkeys.py
@@ -11,6 +11,7 @@
km.add("q", "console.view.pop", ["global"], "Exit the current view")
km.add("-", "console.layout.cycle", ["global"], "Cycle to next layout")
km.add("shift tab", "console.panes.next", ["global"], "Focus next layout pane")
+ km.add("ctrl right", "console.panes.next", ["global"], "Focus next layout pane")
km.add("P", "console.view.flow @focus", ["global"], "View flow details")
km.add("g", "console.nav.start", ["global"], "Go to start")
|
{"golden_diff": "diff --git a/mitmproxy/tools/console/defaultkeys.py b/mitmproxy/tools/console/defaultkeys.py\n--- a/mitmproxy/tools/console/defaultkeys.py\n+++ b/mitmproxy/tools/console/defaultkeys.py\n@@ -11,6 +11,7 @@\n km.add(\"q\", \"console.view.pop\", [\"global\"], \"Exit the current view\")\n km.add(\"-\", \"console.layout.cycle\", [\"global\"], \"Cycle to next layout\")\n km.add(\"shift tab\", \"console.panes.next\", [\"global\"], \"Focus next layout pane\")\n+ km.add(\"ctrl right\", \"console.panes.next\", [\"global\"], \"Focus next layout pane\")\n km.add(\"P\", \"console.view.flow @focus\", [\"global\"], \"View flow details\")\n \n km.add(\"g\", \"console.nav.start\", [\"global\"], \"Go to start\")\n", "issue": "console: shift+tab is broken in WSL for some Windows terminals\nSee https://github.com/Microsoft/WSL/issues/1770. This seems to affect some but not all Windows terminals. \r\n\r\nWe use shift+tab by default to switch to the next pane in the console app. We can:\r\n\r\n- Say that this is not our problem, and wait for upstream to fix it.\r\n- Find a different binding for next pane - which would be a shame, because shit+tab is very natural.\r\n\r\n@mhils what say you?\n", "before_files": [{"content": "\ndef map(km):\n km.add(\":\", \"console.command \", [\"global\"], \"Command prompt\")\n km.add(\"?\", \"console.view.help\", [\"global\"], \"View help\")\n km.add(\"B\", \"browser.start\", [\"global\"], \"Start an attached browser\")\n km.add(\"C\", \"console.view.commands\", [\"global\"], \"View commands\")\n km.add(\"K\", \"console.view.keybindings\", [\"global\"], \"View key bindings\")\n km.add(\"O\", \"console.view.options\", [\"global\"], \"View options\")\n km.add(\"E\", \"console.view.eventlog\", [\"global\"], \"View event log\")\n km.add(\"Q\", \"console.exit\", [\"global\"], \"Exit immediately\")\n km.add(\"q\", \"console.view.pop\", [\"global\"], \"Exit the current view\")\n km.add(\"-\", \"console.layout.cycle\", [\"global\"], \"Cycle to next layout\")\n km.add(\"shift tab\", \"console.panes.next\", [\"global\"], \"Focus next layout pane\")\n km.add(\"P\", \"console.view.flow @focus\", [\"global\"], \"View flow details\")\n\n km.add(\"g\", \"console.nav.start\", [\"global\"], \"Go to start\")\n km.add(\"G\", \"console.nav.end\", [\"global\"], \"Go to end\")\n km.add(\"k\", \"console.nav.up\", [\"global\"], \"Up\")\n km.add(\"j\", \"console.nav.down\", [\"global\"], \"Down\")\n km.add(\"l\", \"console.nav.right\", [\"global\"], \"Right\")\n km.add(\"h\", \"console.nav.left\", [\"global\"], \"Left\")\n km.add(\"tab\", \"console.nav.next\", [\"global\"], \"Next\")\n km.add(\"enter\", \"console.nav.select\", [\"global\"], \"Select\")\n km.add(\"space\", \"console.nav.pagedown\", [\"global\"], \"Page down\")\n km.add(\"ctrl f\", \"console.nav.pagedown\", [\"global\"], \"Page down\")\n km.add(\"ctrl b\", \"console.nav.pageup\", [\"global\"], \"Page up\")\n\n km.add(\"I\", \"console.intercept.toggle\", [\"global\"], \"Toggle intercept\")\n km.add(\"i\", \"console.command.set intercept\", [\"global\"], \"Set intercept\")\n km.add(\"W\", \"console.command.set save_stream_file\", [\"global\"], \"Stream to file\")\n km.add(\"A\", \"flow.resume @all\", [\"flowlist\", \"flowview\"], \"Resume all intercepted flows\")\n km.add(\"a\", \"flow.resume @focus\", [\"flowlist\", \"flowview\"], \"Resume this intercepted flow\")\n km.add(\n \"b\", \"console.command cut.save @focus response.content \",\n [\"flowlist\", \"flowview\"],\n \"Save response body to file\"\n )\n km.add(\"d\", \"view.remove @focus\", [\"flowlist\", \"flowview\"], \"Delete flow from view\")\n km.add(\"D\", \"view.duplicate @focus\", [\"flowlist\", \"flowview\"], \"Duplicate flow\")\n km.add(\n \"e\",\n \"\"\"\n console.choose.cmd Format export.formats\n console.command export.file {choice} @focus\n \"\"\",\n [\"flowlist\", \"flowview\"],\n \"Export this flow to file\"\n )\n km.add(\"f\", \"console.command.set view_filter\", [\"flowlist\"], \"Set view filter\")\n km.add(\"F\", \"set console_focus_follow=toggle\", [\"flowlist\"], \"Set focus follow\")\n km.add(\n \"ctrl l\",\n \"console.command cut.clip \",\n [\"flowlist\", \"flowview\"],\n \"Send cuts to clipboard\"\n )\n km.add(\"L\", \"console.command view.load \", [\"flowlist\"], \"Load flows from file\")\n km.add(\"m\", \"flow.mark.toggle @focus\", [\"flowlist\"], \"Toggle mark on this flow\")\n km.add(\"M\", \"view.marked.toggle\", [\"flowlist\"], \"Toggle viewing marked flows\")\n km.add(\n \"n\",\n \"console.command view.create get https://example.com/\",\n [\"flowlist\"],\n \"Create a new flow\"\n )\n km.add(\n \"o\",\n \"\"\"\n console.choose.cmd Order view.order.options\n set view_order={choice}\n \"\"\",\n [\"flowlist\"],\n \"Set flow list order\"\n )\n km.add(\"r\", \"replay.client @focus\", [\"flowlist\", \"flowview\"], \"Replay this flow\")\n km.add(\"S\", \"console.command replay.server \", [\"flowlist\"], \"Start server replay\")\n km.add(\"v\", \"set view_order_reversed=toggle\", [\"flowlist\"], \"Reverse flow list order\")\n km.add(\"U\", \"flow.mark @all false\", [\"flowlist\"], \"Un-set all marks\")\n km.add(\"w\", \"console.command save.file @shown \", [\"flowlist\"], \"Save listed flows to file\")\n km.add(\"V\", \"flow.revert @focus\", [\"flowlist\", \"flowview\"], \"Revert changes to this flow\")\n km.add(\"X\", \"flow.kill @focus\", [\"flowlist\"], \"Kill this flow\")\n km.add(\"z\", \"view.remove @all\", [\"flowlist\"], \"Clear flow list\")\n km.add(\"Z\", \"view.remove @hidden\", [\"flowlist\"], \"Purge all flows not showing\")\n km.add(\n \"|\",\n \"console.command script.run @focus \",\n [\"flowlist\", \"flowview\"],\n \"Run a script on this flow\"\n )\n\n km.add(\n \"e\",\n \"\"\"\n console.choose.cmd Part console.edit.focus.options\n console.edit.focus {choice}\n \"\"\",\n [\"flowview\"],\n \"Edit a flow component\"\n )\n km.add(\n \"f\",\n \"view.setval.toggle @focus fullcontents\",\n [\"flowview\"],\n \"Toggle viewing full contents on this flow\",\n )\n km.add(\"w\", \"console.command save.file @focus \", [\"flowview\"], \"Save flow to file\")\n km.add(\"space\", \"view.focus.next\", [\"flowview\"], \"Go to next flow\")\n\n km.add(\n \"v\",\n \"\"\"\n console.choose \"View Part\" request,response\n console.bodyview @focus {choice}\n \"\"\",\n [\"flowview\"],\n \"View flow body in an external viewer\"\n )\n km.add(\"p\", \"view.focus.prev\", [\"flowview\"], \"Go to previous flow\")\n km.add(\n \"m\",\n \"\"\"\n console.choose.cmd Mode console.flowview.mode.options\n console.flowview.mode.set {choice}\n \"\"\",\n [\"flowview\"],\n \"Set flow view mode\"\n )\n km.add(\n \"z\",\n \"\"\"\n console.choose \"Part\" request,response\n flow.encode.toggle @focus {choice}\n \"\"\",\n [\"flowview\"],\n \"Encode/decode flow body\"\n )\n\n km.add(\"L\", \"console.command options.load \", [\"options\"], \"Load from file\")\n km.add(\"S\", \"console.command options.save \", [\"options\"], \"Save to file\")\n km.add(\"D\", \"options.reset\", [\"options\"], \"Reset all options\")\n km.add(\"d\", \"console.options.reset.focus\", [\"options\"], \"Reset this option\")\n\n km.add(\"a\", \"console.grideditor.add\", [\"grideditor\"], \"Add a row after cursor\")\n km.add(\"A\", \"console.grideditor.insert\", [\"grideditor\"], \"Insert a row before cursor\")\n km.add(\"d\", \"console.grideditor.delete\", [\"grideditor\"], \"Delete this row\")\n km.add(\n \"r\",\n \"console.command console.grideditor.load\",\n [\"grideditor\"],\n \"Read unescaped data into the current cell from file\"\n )\n km.add(\n \"R\",\n \"console.command console.grideditor.load_escaped\",\n [\"grideditor\"],\n \"Load a Python-style escaped string into the current cell from file\"\n )\n km.add(\"e\", \"console.grideditor.editor\", [\"grideditor\"], \"Edit in external editor\")\n km.add(\n \"w\",\n \"console.command console.grideditor.save \",\n [\"grideditor\"],\n \"Save data to file as CSV\"\n )\n\n km.add(\"z\", \"eventstore.clear\", [\"eventlog\"], \"Clear\")\n\n km.add(\n \"a\",\n \"\"\"\n console.choose.cmd \"Context\" console.key.contexts\n console.command console.key.bind {choice}\n \"\"\",\n [\"keybindings\"],\n \"Add a key binding\"\n )\n km.add(\n \"d\",\n \"console.key.unbind.focus\",\n [\"keybindings\"],\n \"Unbind the currently focused key binding\"\n )\n km.add(\n \"x\",\n \"console.key.execute.focus\",\n [\"keybindings\"],\n \"Execute the currently focused key binding\"\n )\n km.add(\n \"enter\",\n \"console.key.edit.focus\",\n [\"keybindings\"],\n \"Edit the currently focused key binding\"\n )\n", "path": "mitmproxy/tools/console/defaultkeys.py"}], "after_files": [{"content": "\ndef map(km):\n km.add(\":\", \"console.command \", [\"global\"], \"Command prompt\")\n km.add(\"?\", \"console.view.help\", [\"global\"], \"View help\")\n km.add(\"B\", \"browser.start\", [\"global\"], \"Start an attached browser\")\n km.add(\"C\", \"console.view.commands\", [\"global\"], \"View commands\")\n km.add(\"K\", \"console.view.keybindings\", [\"global\"], \"View key bindings\")\n km.add(\"O\", \"console.view.options\", [\"global\"], \"View options\")\n km.add(\"E\", \"console.view.eventlog\", [\"global\"], \"View event log\")\n km.add(\"Q\", \"console.exit\", [\"global\"], \"Exit immediately\")\n km.add(\"q\", \"console.view.pop\", [\"global\"], \"Exit the current view\")\n km.add(\"-\", \"console.layout.cycle\", [\"global\"], \"Cycle to next layout\")\n km.add(\"shift tab\", \"console.panes.next\", [\"global\"], \"Focus next layout pane\")\n km.add(\"ctrl right\", \"console.panes.next\", [\"global\"], \"Focus next layout pane\")\n km.add(\"P\", \"console.view.flow @focus\", [\"global\"], \"View flow details\")\n\n km.add(\"g\", \"console.nav.start\", [\"global\"], \"Go to start\")\n km.add(\"G\", \"console.nav.end\", [\"global\"], \"Go to end\")\n km.add(\"k\", \"console.nav.up\", [\"global\"], \"Up\")\n km.add(\"j\", \"console.nav.down\", [\"global\"], \"Down\")\n km.add(\"l\", \"console.nav.right\", [\"global\"], \"Right\")\n km.add(\"h\", \"console.nav.left\", [\"global\"], \"Left\")\n km.add(\"tab\", \"console.nav.next\", [\"global\"], \"Next\")\n km.add(\"enter\", \"console.nav.select\", [\"global\"], \"Select\")\n km.add(\"space\", \"console.nav.pagedown\", [\"global\"], \"Page down\")\n km.add(\"ctrl f\", \"console.nav.pagedown\", [\"global\"], \"Page down\")\n km.add(\"ctrl b\", \"console.nav.pageup\", [\"global\"], \"Page up\")\n\n km.add(\"I\", \"console.intercept.toggle\", [\"global\"], \"Toggle intercept\")\n km.add(\"i\", \"console.command.set intercept\", [\"global\"], \"Set intercept\")\n km.add(\"W\", \"console.command.set save_stream_file\", [\"global\"], \"Stream to file\")\n km.add(\"A\", \"flow.resume @all\", [\"flowlist\", \"flowview\"], \"Resume all intercepted flows\")\n km.add(\"a\", \"flow.resume @focus\", [\"flowlist\", \"flowview\"], \"Resume this intercepted flow\")\n km.add(\n \"b\", \"console.command cut.save @focus response.content \",\n [\"flowlist\", \"flowview\"],\n \"Save response body to file\"\n )\n km.add(\"d\", \"view.remove @focus\", [\"flowlist\", \"flowview\"], \"Delete flow from view\")\n km.add(\"D\", \"view.duplicate @focus\", [\"flowlist\", \"flowview\"], \"Duplicate flow\")\n km.add(\n \"e\",\n \"\"\"\n console.choose.cmd Format export.formats\n console.command export.file {choice} @focus\n \"\"\",\n [\"flowlist\", \"flowview\"],\n \"Export this flow to file\"\n )\n km.add(\"f\", \"console.command.set view_filter\", [\"flowlist\"], \"Set view filter\")\n km.add(\"F\", \"set console_focus_follow=toggle\", [\"flowlist\"], \"Set focus follow\")\n km.add(\n \"ctrl l\",\n \"console.command cut.clip \",\n [\"flowlist\", \"flowview\"],\n \"Send cuts to clipboard\"\n )\n km.add(\"L\", \"console.command view.load \", [\"flowlist\"], \"Load flows from file\")\n km.add(\"m\", \"flow.mark.toggle @focus\", [\"flowlist\"], \"Toggle mark on this flow\")\n km.add(\"M\", \"view.marked.toggle\", [\"flowlist\"], \"Toggle viewing marked flows\")\n km.add(\n \"n\",\n \"console.command view.create get https://example.com/\",\n [\"flowlist\"],\n \"Create a new flow\"\n )\n km.add(\n \"o\",\n \"\"\"\n console.choose.cmd Order view.order.options\n set view_order={choice}\n \"\"\",\n [\"flowlist\"],\n \"Set flow list order\"\n )\n km.add(\"r\", \"replay.client @focus\", [\"flowlist\", \"flowview\"], \"Replay this flow\")\n km.add(\"S\", \"console.command replay.server \", [\"flowlist\"], \"Start server replay\")\n km.add(\"v\", \"set view_order_reversed=toggle\", [\"flowlist\"], \"Reverse flow list order\")\n km.add(\"U\", \"flow.mark @all false\", [\"flowlist\"], \"Un-set all marks\")\n km.add(\"w\", \"console.command save.file @shown \", [\"flowlist\"], \"Save listed flows to file\")\n km.add(\"V\", \"flow.revert @focus\", [\"flowlist\", \"flowview\"], \"Revert changes to this flow\")\n km.add(\"X\", \"flow.kill @focus\", [\"flowlist\"], \"Kill this flow\")\n km.add(\"z\", \"view.remove @all\", [\"flowlist\"], \"Clear flow list\")\n km.add(\"Z\", \"view.remove @hidden\", [\"flowlist\"], \"Purge all flows not showing\")\n km.add(\n \"|\",\n \"console.command script.run @focus \",\n [\"flowlist\", \"flowview\"],\n \"Run a script on this flow\"\n )\n\n km.add(\n \"e\",\n \"\"\"\n console.choose.cmd Part console.edit.focus.options\n console.edit.focus {choice}\n \"\"\",\n [\"flowview\"],\n \"Edit a flow component\"\n )\n km.add(\n \"f\",\n \"view.setval.toggle @focus fullcontents\",\n [\"flowview\"],\n \"Toggle viewing full contents on this flow\",\n )\n km.add(\"w\", \"console.command save.file @focus \", [\"flowview\"], \"Save flow to file\")\n km.add(\"space\", \"view.focus.next\", [\"flowview\"], \"Go to next flow\")\n\n km.add(\n \"v\",\n \"\"\"\n console.choose \"View Part\" request,response\n console.bodyview @focus {choice}\n \"\"\",\n [\"flowview\"],\n \"View flow body in an external viewer\"\n )\n km.add(\"p\", \"view.focus.prev\", [\"flowview\"], \"Go to previous flow\")\n km.add(\n \"m\",\n \"\"\"\n console.choose.cmd Mode console.flowview.mode.options\n console.flowview.mode.set {choice}\n \"\"\",\n [\"flowview\"],\n \"Set flow view mode\"\n )\n km.add(\n \"z\",\n \"\"\"\n console.choose \"Part\" request,response\n flow.encode.toggle @focus {choice}\n \"\"\",\n [\"flowview\"],\n \"Encode/decode flow body\"\n )\n\n km.add(\"L\", \"console.command options.load \", [\"options\"], \"Load from file\")\n km.add(\"S\", \"console.command options.save \", [\"options\"], \"Save to file\")\n km.add(\"D\", \"options.reset\", [\"options\"], \"Reset all options\")\n km.add(\"d\", \"console.options.reset.focus\", [\"options\"], \"Reset this option\")\n\n km.add(\"a\", \"console.grideditor.add\", [\"grideditor\"], \"Add a row after cursor\")\n km.add(\"A\", \"console.grideditor.insert\", [\"grideditor\"], \"Insert a row before cursor\")\n km.add(\"d\", \"console.grideditor.delete\", [\"grideditor\"], \"Delete this row\")\n km.add(\n \"r\",\n \"console.command console.grideditor.load\",\n [\"grideditor\"],\n \"Read unescaped data into the current cell from file\"\n )\n km.add(\n \"R\",\n \"console.command console.grideditor.load_escaped\",\n [\"grideditor\"],\n \"Load a Python-style escaped string into the current cell from file\"\n )\n km.add(\"e\", \"console.grideditor.editor\", [\"grideditor\"], \"Edit in external editor\")\n km.add(\n \"w\",\n \"console.command console.grideditor.save \",\n [\"grideditor\"],\n \"Save data to file as CSV\"\n )\n\n km.add(\"z\", \"eventstore.clear\", [\"eventlog\"], \"Clear\")\n\n km.add(\n \"a\",\n \"\"\"\n console.choose.cmd \"Context\" console.key.contexts\n console.command console.key.bind {choice}\n \"\"\",\n [\"keybindings\"],\n \"Add a key binding\"\n )\n km.add(\n \"d\",\n \"console.key.unbind.focus\",\n [\"keybindings\"],\n \"Unbind the currently focused key binding\"\n )\n km.add(\n \"x\",\n \"console.key.execute.focus\",\n [\"keybindings\"],\n \"Execute the currently focused key binding\"\n )\n km.add(\n \"enter\",\n \"console.key.edit.focus\",\n [\"keybindings\"],\n \"Edit the currently focused key binding\"\n )\n", "path": "mitmproxy/tools/console/defaultkeys.py"}]}
| 2,746 | 180 |
gh_patches_debug_27640
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-6301
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wagtail should not change month name translations
### Issue Summary
Wagtail translations overrides month name translations (at least for Slovenian language) which changes how dates are formatted.
### Steps to Reproduce
With wagtail installed:
```python
>>> from django.utils.translation import activate
>>> activate("sl")
>>> from django.utils import formats
...
>>> from datetime import date
>>> formats.date_format(date.today())
'5. Avgust 2020'
```
It should be (and without wagtail installed it is) `5. avgust 2020`.
* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: (yes / no)
yes
### Technical details
* Python version: Run `python --version`.
Python 3.7.1
Django version: Look in your requirements.txt, or run `pip show django | grep Version`.
Version: 2.2.14
* Wagtail version: Look at the bottom of the Settings menu in the Wagtail admin, or run `pip show wagtail | grep Version:`.
Version: 2.9.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/localization.py`
Content:
```
1 import pytz
2
3 from django.conf import settings
4 from django.utils.translation import gettext as _
5 from django.utils.translation import gettext_lazy
6
7
8 # Wagtail languages with >=90% coverage
9 # This list is manually maintained
10 WAGTAILADMIN_PROVIDED_LANGUAGES = [
11 ('ar', gettext_lazy('Arabic')),
12 ('ca', gettext_lazy('Catalan')),
13 ('cs', gettext_lazy('Czech')),
14 ('de', gettext_lazy('German')),
15 ('el', gettext_lazy('Greek')),
16 ('en', gettext_lazy('English')),
17 ('es', gettext_lazy('Spanish')),
18 ('fi', gettext_lazy('Finnish')),
19 ('fr', gettext_lazy('French')),
20 ('gl', gettext_lazy('Galician')),
21 ('hu', gettext_lazy('Hungarian')),
22 ('id-id', gettext_lazy('Indonesian')),
23 ('is-is', gettext_lazy('Icelandic')),
24 ('it', gettext_lazy('Italian')),
25 ('ja', gettext_lazy('Japanese')),
26 ('ko', gettext_lazy('Korean')),
27 ('lt', gettext_lazy('Lithuanian')),
28 ('mn', gettext_lazy('Mongolian')),
29 ('nb', gettext_lazy('Norwegian Bokmål')),
30 ('nl-nl', gettext_lazy('Netherlands Dutch')),
31 ('fa', gettext_lazy('Persian')),
32 ('pl', gettext_lazy('Polish')),
33 ('pt-br', gettext_lazy('Brazilian Portuguese')),
34 ('pt-pt', gettext_lazy('Portuguese')),
35 ('ro', gettext_lazy('Romanian')),
36 ('ru', gettext_lazy('Russian')),
37 ('sv', gettext_lazy('Swedish')),
38 ('sk-sk', gettext_lazy('Slovak')),
39 ('th', gettext_lazy('Thai')),
40 ('tr', gettext_lazy('Turkish')),
41 ('tr-tr', gettext_lazy('Turkish (Turkey)')),
42 ('uk', gettext_lazy('Ukrainian')),
43 ('zh-hans', gettext_lazy('Chinese (Simplified)')),
44 ('zh-hant', gettext_lazy('Chinese (Traditional)')),
45 ]
46
47
48 # Translatable strings to be made available to JavaScript code
49 # as the wagtailConfig.STRINGS object
50 def get_js_translation_strings():
51 return {
52 'DELETE': _('Delete'),
53 'EDIT': _('Edit'),
54 'PAGE': _('Page'),
55 'PAGES': _('Pages'),
56 'LOADING': _('Loading…'),
57 'NO_RESULTS': _('No results'),
58 'SERVER_ERROR': _('Server Error'),
59 'SEE_ALL': _('See all'),
60 'CLOSE_EXPLORER': _('Close explorer'),
61 'ALT_TEXT': _('Alt text'),
62 'WRITE_HERE': _('Write here…'),
63 'HORIZONTAL_LINE': _('Horizontal line'),
64 'LINE_BREAK': _('Line break'),
65 'UNDO': _('Undo'),
66 'REDO': _('Redo'),
67 'RELOAD_PAGE': _('Reload the page'),
68 'RELOAD_EDITOR': _('Reload saved content'),
69 'SHOW_LATEST_CONTENT': _('Show latest content'),
70 'SHOW_ERROR': _('Show error'),
71 'EDITOR_CRASH': _('The editor just crashed. Content has been reset to the last saved version.'),
72 'BROKEN_LINK': _('Broken link'),
73 'MISSING_DOCUMENT': _('Missing document'),
74 'CLOSE': _('Close'),
75 'EDIT_PAGE': _('Edit \'{title}\''),
76 'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \'{title}\''),
77 'PAGE_EXPLORER': _('Page explorer'),
78
79 'MONTHS': [
80 _('January'),
81 _('February'),
82 _('March'),
83 _('April'),
84 _('May'),
85 _('June'),
86 _('July'),
87 _('August'),
88 _('September'),
89 _('October'),
90 _('November'),
91 _('December')
92 ],
93 'WEEKDAYS': [
94 _('Sunday'),
95 _('Monday'),
96 _('Tuesday'),
97 _('Wednesday'),
98 _('Thursday'),
99 _('Friday'),
100 _('Saturday')
101 ],
102 'WEEKDAYS_SHORT': [
103 _('Sun'),
104 _('Mon'),
105 _('Tue'),
106 _('Wed'),
107 _('Thu'),
108 _('Fri'),
109 _('Sat')
110 ]
111 }
112
113
114 def get_available_admin_languages():
115 return getattr(settings, 'WAGTAILADMIN_PERMITTED_LANGUAGES', WAGTAILADMIN_PROVIDED_LANGUAGES)
116
117
118 def get_available_admin_time_zones():
119 return getattr(settings, 'WAGTAIL_USER_TIME_ZONES', pytz.common_timezones)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/admin/localization.py b/wagtail/admin/localization.py
--- a/wagtail/admin/localization.py
+++ b/wagtail/admin/localization.py
@@ -1,6 +1,7 @@
import pytz
from django.conf import settings
+from django.utils.dates import MONTHS, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
@@ -76,38 +77,12 @@
'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \'{title}\''),
'PAGE_EXPLORER': _('Page explorer'),
- 'MONTHS': [
- _('January'),
- _('February'),
- _('March'),
- _('April'),
- _('May'),
- _('June'),
- _('July'),
- _('August'),
- _('September'),
- _('October'),
- _('November'),
- _('December')
- ],
- 'WEEKDAYS': [
- _('Sunday'),
- _('Monday'),
- _('Tuesday'),
- _('Wednesday'),
- _('Thursday'),
- _('Friday'),
- _('Saturday')
- ],
- 'WEEKDAYS_SHORT': [
- _('Sun'),
- _('Mon'),
- _('Tue'),
- _('Wed'),
- _('Thu'),
- _('Fri'),
- _('Sat')
- ]
+ 'MONTHS': [str(m) for m in MONTHS.values()],
+
+ # Django's WEEKDAYS list begins on Monday, but ours should start on Sunday, so start
+ # counting from -1 and use modulo 7 to get an array index
+ 'WEEKDAYS': [str(WEEKDAYS[d % 7]) for d in range(-1, 6)],
+ 'WEEKDAYS_SHORT': [str(WEEKDAYS_ABBR[d % 7]) for d in range(-1, 6)],
}
|
{"golden_diff": "diff --git a/wagtail/admin/localization.py b/wagtail/admin/localization.py\n--- a/wagtail/admin/localization.py\n+++ b/wagtail/admin/localization.py\n@@ -1,6 +1,7 @@\n import pytz\n \n from django.conf import settings\n+from django.utils.dates import MONTHS, WEEKDAYS, WEEKDAYS_ABBR\n from django.utils.translation import gettext as _\n from django.utils.translation import gettext_lazy\n \n@@ -76,38 +77,12 @@\n 'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \\'{title}\\''),\n 'PAGE_EXPLORER': _('Page explorer'),\n \n- 'MONTHS': [\n- _('January'),\n- _('February'),\n- _('March'),\n- _('April'),\n- _('May'),\n- _('June'),\n- _('July'),\n- _('August'),\n- _('September'),\n- _('October'),\n- _('November'),\n- _('December')\n- ],\n- 'WEEKDAYS': [\n- _('Sunday'),\n- _('Monday'),\n- _('Tuesday'),\n- _('Wednesday'),\n- _('Thursday'),\n- _('Friday'),\n- _('Saturday')\n- ],\n- 'WEEKDAYS_SHORT': [\n- _('Sun'),\n- _('Mon'),\n- _('Tue'),\n- _('Wed'),\n- _('Thu'),\n- _('Fri'),\n- _('Sat')\n- ]\n+ 'MONTHS': [str(m) for m in MONTHS.values()],\n+\n+ # Django's WEEKDAYS list begins on Monday, but ours should start on Sunday, so start\n+ # counting from -1 and use modulo 7 to get an array index\n+ 'WEEKDAYS': [str(WEEKDAYS[d % 7]) for d in range(-1, 6)],\n+ 'WEEKDAYS_SHORT': [str(WEEKDAYS_ABBR[d % 7]) for d in range(-1, 6)],\n }\n", "issue": "Wagtail should not change month name translations\n### Issue Summary\r\n\r\nWagtail translations overrides month name translations (at least for Slovenian language) which changes how dates are formatted.\r\n\r\n### Steps to Reproduce\r\n\r\nWith wagtail installed:\r\n\r\n```python\r\n>>> from django.utils.translation import activate \r\n>>> activate(\"sl\") \r\n>>> from django.utils import formats \r\n... \r\n>>> from datetime import date \r\n>>> formats.date_format(date.today()) \r\n'5. Avgust 2020'\r\n```\r\n\r\nIt should be (and without wagtail installed it is) `5. avgust 2020`.\r\n\r\n* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: (yes / no)\r\n\r\nyes\r\n\r\n### Technical details\r\n\r\n* Python version: Run `python --version`.\r\n\r\nPython 3.7.1\r\n\r\nDjango version: Look in your requirements.txt, or run `pip show django | grep Version`.\r\n\r\nVersion: 2.2.14\r\n\r\n* Wagtail version: Look at the bottom of the Settings menu in the Wagtail admin, or run `pip show wagtail | grep Version:`.\r\n\r\nVersion: 2.9.2\r\n\n", "before_files": [{"content": "import pytz\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\n# Wagtail languages with >=90% coverage\n# This list is manually maintained\nWAGTAILADMIN_PROVIDED_LANGUAGES = [\n ('ar', gettext_lazy('Arabic')),\n ('ca', gettext_lazy('Catalan')),\n ('cs', gettext_lazy('Czech')),\n ('de', gettext_lazy('German')),\n ('el', gettext_lazy('Greek')),\n ('en', gettext_lazy('English')),\n ('es', gettext_lazy('Spanish')),\n ('fi', gettext_lazy('Finnish')),\n ('fr', gettext_lazy('French')),\n ('gl', gettext_lazy('Galician')),\n ('hu', gettext_lazy('Hungarian')),\n ('id-id', gettext_lazy('Indonesian')),\n ('is-is', gettext_lazy('Icelandic')),\n ('it', gettext_lazy('Italian')),\n ('ja', gettext_lazy('Japanese')),\n ('ko', gettext_lazy('Korean')),\n ('lt', gettext_lazy('Lithuanian')),\n ('mn', gettext_lazy('Mongolian')),\n ('nb', gettext_lazy('Norwegian Bokm\u00e5l')),\n ('nl-nl', gettext_lazy('Netherlands Dutch')),\n ('fa', gettext_lazy('Persian')),\n ('pl', gettext_lazy('Polish')),\n ('pt-br', gettext_lazy('Brazilian Portuguese')),\n ('pt-pt', gettext_lazy('Portuguese')),\n ('ro', gettext_lazy('Romanian')),\n ('ru', gettext_lazy('Russian')),\n ('sv', gettext_lazy('Swedish')),\n ('sk-sk', gettext_lazy('Slovak')),\n ('th', gettext_lazy('Thai')),\n ('tr', gettext_lazy('Turkish')),\n ('tr-tr', gettext_lazy('Turkish (Turkey)')),\n ('uk', gettext_lazy('Ukrainian')),\n ('zh-hans', gettext_lazy('Chinese (Simplified)')),\n ('zh-hant', gettext_lazy('Chinese (Traditional)')),\n]\n\n\n# Translatable strings to be made available to JavaScript code\n# as the wagtailConfig.STRINGS object\ndef get_js_translation_strings():\n return {\n 'DELETE': _('Delete'),\n 'EDIT': _('Edit'),\n 'PAGE': _('Page'),\n 'PAGES': _('Pages'),\n 'LOADING': _('Loading\u2026'),\n 'NO_RESULTS': _('No results'),\n 'SERVER_ERROR': _('Server Error'),\n 'SEE_ALL': _('See all'),\n 'CLOSE_EXPLORER': _('Close explorer'),\n 'ALT_TEXT': _('Alt text'),\n 'WRITE_HERE': _('Write here\u2026'),\n 'HORIZONTAL_LINE': _('Horizontal line'),\n 'LINE_BREAK': _('Line break'),\n 'UNDO': _('Undo'),\n 'REDO': _('Redo'),\n 'RELOAD_PAGE': _('Reload the page'),\n 'RELOAD_EDITOR': _('Reload saved content'),\n 'SHOW_LATEST_CONTENT': _('Show latest content'),\n 'SHOW_ERROR': _('Show error'),\n 'EDITOR_CRASH': _('The editor just crashed. Content has been reset to the last saved version.'),\n 'BROKEN_LINK': _('Broken link'),\n 'MISSING_DOCUMENT': _('Missing document'),\n 'CLOSE': _('Close'),\n 'EDIT_PAGE': _('Edit \\'{title}\\''),\n 'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \\'{title}\\''),\n 'PAGE_EXPLORER': _('Page explorer'),\n\n 'MONTHS': [\n _('January'),\n _('February'),\n _('March'),\n _('April'),\n _('May'),\n _('June'),\n _('July'),\n _('August'),\n _('September'),\n _('October'),\n _('November'),\n _('December')\n ],\n 'WEEKDAYS': [\n _('Sunday'),\n _('Monday'),\n _('Tuesday'),\n _('Wednesday'),\n _('Thursday'),\n _('Friday'),\n _('Saturday')\n ],\n 'WEEKDAYS_SHORT': [\n _('Sun'),\n _('Mon'),\n _('Tue'),\n _('Wed'),\n _('Thu'),\n _('Fri'),\n _('Sat')\n ]\n }\n\n\ndef get_available_admin_languages():\n return getattr(settings, 'WAGTAILADMIN_PERMITTED_LANGUAGES', WAGTAILADMIN_PROVIDED_LANGUAGES)\n\n\ndef get_available_admin_time_zones():\n return getattr(settings, 'WAGTAIL_USER_TIME_ZONES', pytz.common_timezones)\n", "path": "wagtail/admin/localization.py"}], "after_files": [{"content": "import pytz\n\nfrom django.conf import settings\nfrom django.utils.dates import MONTHS, WEEKDAYS, WEEKDAYS_ABBR\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\n# Wagtail languages with >=90% coverage\n# This list is manually maintained\nWAGTAILADMIN_PROVIDED_LANGUAGES = [\n ('ar', gettext_lazy('Arabic')),\n ('ca', gettext_lazy('Catalan')),\n ('cs', gettext_lazy('Czech')),\n ('de', gettext_lazy('German')),\n ('el', gettext_lazy('Greek')),\n ('en', gettext_lazy('English')),\n ('es', gettext_lazy('Spanish')),\n ('fi', gettext_lazy('Finnish')),\n ('fr', gettext_lazy('French')),\n ('gl', gettext_lazy('Galician')),\n ('hu', gettext_lazy('Hungarian')),\n ('id-id', gettext_lazy('Indonesian')),\n ('is-is', gettext_lazy('Icelandic')),\n ('it', gettext_lazy('Italian')),\n ('ja', gettext_lazy('Japanese')),\n ('ko', gettext_lazy('Korean')),\n ('lt', gettext_lazy('Lithuanian')),\n ('mn', gettext_lazy('Mongolian')),\n ('nb', gettext_lazy('Norwegian Bokm\u00e5l')),\n ('nl-nl', gettext_lazy('Netherlands Dutch')),\n ('fa', gettext_lazy('Persian')),\n ('pl', gettext_lazy('Polish')),\n ('pt-br', gettext_lazy('Brazilian Portuguese')),\n ('pt-pt', gettext_lazy('Portuguese')),\n ('ro', gettext_lazy('Romanian')),\n ('ru', gettext_lazy('Russian')),\n ('sv', gettext_lazy('Swedish')),\n ('sk-sk', gettext_lazy('Slovak')),\n ('th', gettext_lazy('Thai')),\n ('tr', gettext_lazy('Turkish')),\n ('tr-tr', gettext_lazy('Turkish (Turkey)')),\n ('uk', gettext_lazy('Ukrainian')),\n ('zh-hans', gettext_lazy('Chinese (Simplified)')),\n ('zh-hant', gettext_lazy('Chinese (Traditional)')),\n]\n\n\n# Translatable strings to be made available to JavaScript code\n# as the wagtailConfig.STRINGS object\ndef get_js_translation_strings():\n return {\n 'DELETE': _('Delete'),\n 'EDIT': _('Edit'),\n 'PAGE': _('Page'),\n 'PAGES': _('Pages'),\n 'LOADING': _('Loading\u2026'),\n 'NO_RESULTS': _('No results'),\n 'SERVER_ERROR': _('Server Error'),\n 'SEE_ALL': _('See all'),\n 'CLOSE_EXPLORER': _('Close explorer'),\n 'ALT_TEXT': _('Alt text'),\n 'WRITE_HERE': _('Write here\u2026'),\n 'HORIZONTAL_LINE': _('Horizontal line'),\n 'LINE_BREAK': _('Line break'),\n 'UNDO': _('Undo'),\n 'REDO': _('Redo'),\n 'RELOAD_PAGE': _('Reload the page'),\n 'RELOAD_EDITOR': _('Reload saved content'),\n 'SHOW_LATEST_CONTENT': _('Show latest content'),\n 'SHOW_ERROR': _('Show error'),\n 'EDITOR_CRASH': _('The editor just crashed. Content has been reset to the last saved version.'),\n 'BROKEN_LINK': _('Broken link'),\n 'MISSING_DOCUMENT': _('Missing document'),\n 'CLOSE': _('Close'),\n 'EDIT_PAGE': _('Edit \\'{title}\\''),\n 'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \\'{title}\\''),\n 'PAGE_EXPLORER': _('Page explorer'),\n\n 'MONTHS': [str(m) for m in MONTHS.values()],\n\n # Django's WEEKDAYS list begins on Monday, but ours should start on Sunday, so start\n # counting from -1 and use modulo 7 to get an array index\n 'WEEKDAYS': [str(WEEKDAYS[d % 7]) for d in range(-1, 6)],\n 'WEEKDAYS_SHORT': [str(WEEKDAYS_ABBR[d % 7]) for d in range(-1, 6)],\n }\n\n\ndef get_available_admin_languages():\n return getattr(settings, 'WAGTAILADMIN_PERMITTED_LANGUAGES', WAGTAILADMIN_PROVIDED_LANGUAGES)\n\n\ndef get_available_admin_time_zones():\n return getattr(settings, 'WAGTAIL_USER_TIME_ZONES', pytz.common_timezones)\n", "path": "wagtail/admin/localization.py"}]}
| 1,696 | 431 |
gh_patches_debug_21832
|
rasdani/github-patches
|
git_diff
|
pallets__werkzeug-2771
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use scrypt by default
#2654 added scrypt support, but couldn't make it the default because [PyPy didn't support it at the time](https://foss.heptapod.net/pypy/pypy/-/issues/3921). Now PyPy has fixed that and [made a release](https://doc.pypy.org/en/latest/release-v7.3.12.html), I'm comfortable with making scrypt the default.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/security.py`
Content:
```
1 from __future__ import annotations
2
3 import hashlib
4 import hmac
5 import os
6 import posixpath
7 import secrets
8
9 SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
10 DEFAULT_PBKDF2_ITERATIONS = 600000
11
12 _os_alt_seps: list[str] = list(
13 sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != "/"
14 )
15
16
17 def gen_salt(length: int) -> str:
18 """Generate a random string of SALT_CHARS with specified ``length``."""
19 if length <= 0:
20 raise ValueError("Salt length must be at least 1.")
21
22 return "".join(secrets.choice(SALT_CHARS) for _ in range(length))
23
24
25 def _hash_internal(method: str, salt: str, password: str) -> tuple[str, str]:
26 method, *args = method.split(":")
27 salt = salt.encode("utf-8")
28 password = password.encode("utf-8")
29
30 if method == "scrypt":
31 if not args:
32 n = 2**15
33 r = 8
34 p = 1
35 else:
36 try:
37 n, r, p = map(int, args)
38 except ValueError:
39 raise ValueError("'scrypt' takes 3 arguments.") from None
40
41 maxmem = 132 * n * r * p # ideally 128, but some extra seems needed
42 return (
43 hashlib.scrypt(password, salt=salt, n=n, r=r, p=p, maxmem=maxmem).hex(),
44 f"scrypt:{n}:{r}:{p}",
45 )
46 elif method == "pbkdf2":
47 len_args = len(args)
48
49 if len_args == 0:
50 hash_name = "sha256"
51 iterations = DEFAULT_PBKDF2_ITERATIONS
52 elif len_args == 1:
53 hash_name = args[0]
54 iterations = DEFAULT_PBKDF2_ITERATIONS
55 elif len_args == 2:
56 hash_name = args[0]
57 iterations = int(args[1])
58 else:
59 raise ValueError("'pbkdf2' takes 2 arguments.")
60
61 return (
62 hashlib.pbkdf2_hmac(hash_name, password, salt, iterations).hex(),
63 f"pbkdf2:{hash_name}:{iterations}",
64 )
65 else:
66 raise ValueError(f"Invalid hash method '{method}'.")
67
68
69 def generate_password_hash(
70 password: str, method: str = "pbkdf2", salt_length: int = 16
71 ) -> str:
72 """Securely hash a password for storage. A password can be compared to a stored hash
73 using :func:`check_password_hash`.
74
75 The following methods are supported:
76
77 - ``scrypt``, more secure but not available on PyPy. The parameters are ``n``,
78 ``r``, and ``p``, the default is ``scrypt:32768:8:1``. See
79 :func:`hashlib.scrypt`.
80 - ``pbkdf2``, the default. The parameters are ``hash_method`` and ``iterations``,
81 the default is ``pbkdf2:sha256:600000``. See :func:`hashlib.pbkdf2_hmac`.
82
83 Default parameters may be updated to reflect current guidelines, and methods may be
84 deprecated and removed if they are no longer considered secure. To migrate old
85 hashes, you may generate a new hash when checking an old hash, or you may contact
86 users with a link to reset their password.
87
88 :param password: The plaintext password.
89 :param method: The key derivation function and parameters.
90 :param salt_length: The number of characters to generate for the salt.
91
92 .. versionchanged:: 2.3
93 Scrypt support was added.
94
95 .. versionchanged:: 2.3
96 The default iterations for pbkdf2 was increased to 600,000.
97
98 .. versionchanged:: 2.3
99 All plain hashes are deprecated and will not be supported in Werkzeug 3.0.
100 """
101 salt = gen_salt(salt_length)
102 h, actual_method = _hash_internal(method, salt, password)
103 return f"{actual_method}${salt}${h}"
104
105
106 def check_password_hash(pwhash: str, password: str) -> bool:
107 """Securely check that the given stored password hash, previously generated using
108 :func:`generate_password_hash`, matches the given password.
109
110 Methods may be deprecated and removed if they are no longer considered secure. To
111 migrate old hashes, you may generate a new hash when checking an old hash, or you
112 may contact users with a link to reset their password.
113
114 :param pwhash: The hashed password.
115 :param password: The plaintext password.
116
117 .. versionchanged:: 2.3
118 All plain hashes are deprecated and will not be supported in Werkzeug 3.0.
119 """
120 try:
121 method, salt, hashval = pwhash.split("$", 2)
122 except ValueError:
123 return False
124
125 return hmac.compare_digest(_hash_internal(method, salt, password)[0], hashval)
126
127
128 def safe_join(directory: str, *pathnames: str) -> str | None:
129 """Safely join zero or more untrusted path components to a base
130 directory to avoid escaping the base directory.
131
132 :param directory: The trusted base directory.
133 :param pathnames: The untrusted path components relative to the
134 base directory.
135 :return: A safe path, otherwise ``None``.
136 """
137 if not directory:
138 # Ensure we end up with ./path if directory="" is given,
139 # otherwise the first untrusted part could become trusted.
140 directory = "."
141
142 parts = [directory]
143
144 for filename in pathnames:
145 if filename != "":
146 filename = posixpath.normpath(filename)
147
148 if (
149 any(sep in filename for sep in _os_alt_seps)
150 or os.path.isabs(filename)
151 or filename == ".."
152 or filename.startswith("../")
153 ):
154 return None
155
156 parts.append(filename)
157
158 return posixpath.join(*parts)
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/werkzeug/security.py b/src/werkzeug/security.py
--- a/src/werkzeug/security.py
+++ b/src/werkzeug/security.py
@@ -67,17 +67,16 @@
def generate_password_hash(
- password: str, method: str = "pbkdf2", salt_length: int = 16
+ password: str, method: str = "scrypt", salt_length: int = 16
) -> str:
"""Securely hash a password for storage. A password can be compared to a stored hash
using :func:`check_password_hash`.
The following methods are supported:
- - ``scrypt``, more secure but not available on PyPy. The parameters are ``n``,
- ``r``, and ``p``, the default is ``scrypt:32768:8:1``. See
- :func:`hashlib.scrypt`.
- - ``pbkdf2``, the default. The parameters are ``hash_method`` and ``iterations``,
+ - ``scrypt``, the default. The parameters are ``n``, ``r``, and ``p``, the default
+ is ``scrypt:32768:8:1``. See :func:`hashlib.scrypt`.
+ - ``pbkdf2``, less secure. The parameters are ``hash_method`` and ``iterations``,
the default is ``pbkdf2:sha256:600000``. See :func:`hashlib.pbkdf2_hmac`.
Default parameters may be updated to reflect current guidelines, and methods may be
|
{"golden_diff": "diff --git a/src/werkzeug/security.py b/src/werkzeug/security.py\n--- a/src/werkzeug/security.py\n+++ b/src/werkzeug/security.py\n@@ -67,17 +67,16 @@\n \n \n def generate_password_hash(\n- password: str, method: str = \"pbkdf2\", salt_length: int = 16\n+ password: str, method: str = \"scrypt\", salt_length: int = 16\n ) -> str:\n \"\"\"Securely hash a password for storage. A password can be compared to a stored hash\n using :func:`check_password_hash`.\n \n The following methods are supported:\n \n- - ``scrypt``, more secure but not available on PyPy. The parameters are ``n``,\n- ``r``, and ``p``, the default is ``scrypt:32768:8:1``. See\n- :func:`hashlib.scrypt`.\n- - ``pbkdf2``, the default. The parameters are ``hash_method`` and ``iterations``,\n+ - ``scrypt``, the default. The parameters are ``n``, ``r``, and ``p``, the default\n+ is ``scrypt:32768:8:1``. See :func:`hashlib.scrypt`.\n+ - ``pbkdf2``, less secure. The parameters are ``hash_method`` and ``iterations``,\n the default is ``pbkdf2:sha256:600000``. See :func:`hashlib.pbkdf2_hmac`.\n \n Default parameters may be updated to reflect current guidelines, and methods may be\n", "issue": "use scrypt by default\n#2654 added scrypt support, but couldn't make it the default because [PyPy didn't support it at the time](https://foss.heptapod.net/pypy/pypy/-/issues/3921). Now PyPy has fixed that and [made a release](https://doc.pypy.org/en/latest/release-v7.3.12.html), I'm comfortable with making scrypt the default.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport hashlib\nimport hmac\nimport os\nimport posixpath\nimport secrets\n\nSALT_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\nDEFAULT_PBKDF2_ITERATIONS = 600000\n\n_os_alt_seps: list[str] = list(\n sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != \"/\"\n)\n\n\ndef gen_salt(length: int) -> str:\n \"\"\"Generate a random string of SALT_CHARS with specified ``length``.\"\"\"\n if length <= 0:\n raise ValueError(\"Salt length must be at least 1.\")\n\n return \"\".join(secrets.choice(SALT_CHARS) for _ in range(length))\n\n\ndef _hash_internal(method: str, salt: str, password: str) -> tuple[str, str]:\n method, *args = method.split(\":\")\n salt = salt.encode(\"utf-8\")\n password = password.encode(\"utf-8\")\n\n if method == \"scrypt\":\n if not args:\n n = 2**15\n r = 8\n p = 1\n else:\n try:\n n, r, p = map(int, args)\n except ValueError:\n raise ValueError(\"'scrypt' takes 3 arguments.\") from None\n\n maxmem = 132 * n * r * p # ideally 128, but some extra seems needed\n return (\n hashlib.scrypt(password, salt=salt, n=n, r=r, p=p, maxmem=maxmem).hex(),\n f\"scrypt:{n}:{r}:{p}\",\n )\n elif method == \"pbkdf2\":\n len_args = len(args)\n\n if len_args == 0:\n hash_name = \"sha256\"\n iterations = DEFAULT_PBKDF2_ITERATIONS\n elif len_args == 1:\n hash_name = args[0]\n iterations = DEFAULT_PBKDF2_ITERATIONS\n elif len_args == 2:\n hash_name = args[0]\n iterations = int(args[1])\n else:\n raise ValueError(\"'pbkdf2' takes 2 arguments.\")\n\n return (\n hashlib.pbkdf2_hmac(hash_name, password, salt, iterations).hex(),\n f\"pbkdf2:{hash_name}:{iterations}\",\n )\n else:\n raise ValueError(f\"Invalid hash method '{method}'.\")\n\n\ndef generate_password_hash(\n password: str, method: str = \"pbkdf2\", salt_length: int = 16\n) -> str:\n \"\"\"Securely hash a password for storage. A password can be compared to a stored hash\n using :func:`check_password_hash`.\n\n The following methods are supported:\n\n - ``scrypt``, more secure but not available on PyPy. The parameters are ``n``,\n ``r``, and ``p``, the default is ``scrypt:32768:8:1``. See\n :func:`hashlib.scrypt`.\n - ``pbkdf2``, the default. The parameters are ``hash_method`` and ``iterations``,\n the default is ``pbkdf2:sha256:600000``. See :func:`hashlib.pbkdf2_hmac`.\n\n Default parameters may be updated to reflect current guidelines, and methods may be\n deprecated and removed if they are no longer considered secure. To migrate old\n hashes, you may generate a new hash when checking an old hash, or you may contact\n users with a link to reset their password.\n\n :param password: The plaintext password.\n :param method: The key derivation function and parameters.\n :param salt_length: The number of characters to generate for the salt.\n\n .. versionchanged:: 2.3\n Scrypt support was added.\n\n .. versionchanged:: 2.3\n The default iterations for pbkdf2 was increased to 600,000.\n\n .. versionchanged:: 2.3\n All plain hashes are deprecated and will not be supported in Werkzeug 3.0.\n \"\"\"\n salt = gen_salt(salt_length)\n h, actual_method = _hash_internal(method, salt, password)\n return f\"{actual_method}${salt}${h}\"\n\n\ndef check_password_hash(pwhash: str, password: str) -> bool:\n \"\"\"Securely check that the given stored password hash, previously generated using\n :func:`generate_password_hash`, matches the given password.\n\n Methods may be deprecated and removed if they are no longer considered secure. To\n migrate old hashes, you may generate a new hash when checking an old hash, or you\n may contact users with a link to reset their password.\n\n :param pwhash: The hashed password.\n :param password: The plaintext password.\n\n .. versionchanged:: 2.3\n All plain hashes are deprecated and will not be supported in Werkzeug 3.0.\n \"\"\"\n try:\n method, salt, hashval = pwhash.split(\"$\", 2)\n except ValueError:\n return False\n\n return hmac.compare_digest(_hash_internal(method, salt, password)[0], hashval)\n\n\ndef safe_join(directory: str, *pathnames: str) -> str | None:\n \"\"\"Safely join zero or more untrusted path components to a base\n directory to avoid escaping the base directory.\n\n :param directory: The trusted base directory.\n :param pathnames: The untrusted path components relative to the\n base directory.\n :return: A safe path, otherwise ``None``.\n \"\"\"\n if not directory:\n # Ensure we end up with ./path if directory=\"\" is given,\n # otherwise the first untrusted part could become trusted.\n directory = \".\"\n\n parts = [directory]\n\n for filename in pathnames:\n if filename != \"\":\n filename = posixpath.normpath(filename)\n\n if (\n any(sep in filename for sep in _os_alt_seps)\n or os.path.isabs(filename)\n or filename == \"..\"\n or filename.startswith(\"../\")\n ):\n return None\n\n parts.append(filename)\n\n return posixpath.join(*parts)\n", "path": "src/werkzeug/security.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport hashlib\nimport hmac\nimport os\nimport posixpath\nimport secrets\n\nSALT_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\nDEFAULT_PBKDF2_ITERATIONS = 600000\n\n_os_alt_seps: list[str] = list(\n sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != \"/\"\n)\n\n\ndef gen_salt(length: int) -> str:\n \"\"\"Generate a random string of SALT_CHARS with specified ``length``.\"\"\"\n if length <= 0:\n raise ValueError(\"Salt length must be at least 1.\")\n\n return \"\".join(secrets.choice(SALT_CHARS) for _ in range(length))\n\n\ndef _hash_internal(method: str, salt: str, password: str) -> tuple[str, str]:\n method, *args = method.split(\":\")\n salt = salt.encode(\"utf-8\")\n password = password.encode(\"utf-8\")\n\n if method == \"scrypt\":\n if not args:\n n = 2**15\n r = 8\n p = 1\n else:\n try:\n n, r, p = map(int, args)\n except ValueError:\n raise ValueError(\"'scrypt' takes 3 arguments.\") from None\n\n maxmem = 132 * n * r * p # ideally 128, but some extra seems needed\n return (\n hashlib.scrypt(password, salt=salt, n=n, r=r, p=p, maxmem=maxmem).hex(),\n f\"scrypt:{n}:{r}:{p}\",\n )\n elif method == \"pbkdf2\":\n len_args = len(args)\n\n if len_args == 0:\n hash_name = \"sha256\"\n iterations = DEFAULT_PBKDF2_ITERATIONS\n elif len_args == 1:\n hash_name = args[0]\n iterations = DEFAULT_PBKDF2_ITERATIONS\n elif len_args == 2:\n hash_name = args[0]\n iterations = int(args[1])\n else:\n raise ValueError(\"'pbkdf2' takes 2 arguments.\")\n\n return (\n hashlib.pbkdf2_hmac(hash_name, password, salt, iterations).hex(),\n f\"pbkdf2:{hash_name}:{iterations}\",\n )\n else:\n raise ValueError(f\"Invalid hash method '{method}'.\")\n\n\ndef generate_password_hash(\n password: str, method: str = \"scrypt\", salt_length: int = 16\n) -> str:\n \"\"\"Securely hash a password for storage. A password can be compared to a stored hash\n using :func:`check_password_hash`.\n\n The following methods are supported:\n\n - ``scrypt``, the default. The parameters are ``n``, ``r``, and ``p``, the default\n is ``scrypt:32768:8:1``. See :func:`hashlib.scrypt`.\n - ``pbkdf2``, less secure. The parameters are ``hash_method`` and ``iterations``,\n the default is ``pbkdf2:sha256:600000``. See :func:`hashlib.pbkdf2_hmac`.\n\n Default parameters may be updated to reflect current guidelines, and methods may be\n deprecated and removed if they are no longer considered secure. To migrate old\n hashes, you may generate a new hash when checking an old hash, or you may contact\n users with a link to reset their password.\n\n :param password: The plaintext password.\n :param method: The key derivation function and parameters.\n :param salt_length: The number of characters to generate for the salt.\n\n .. versionchanged:: 2.3\n Scrypt support was added.\n\n .. versionchanged:: 2.3\n The default iterations for pbkdf2 was increased to 600,000.\n\n .. versionchanged:: 2.3\n All plain hashes are deprecated and will not be supported in Werkzeug 3.0.\n \"\"\"\n salt = gen_salt(salt_length)\n h, actual_method = _hash_internal(method, salt, password)\n return f\"{actual_method}${salt}${h}\"\n\n\ndef check_password_hash(pwhash: str, password: str) -> bool:\n \"\"\"Securely check that the given stored password hash, previously generated using\n :func:`generate_password_hash`, matches the given password.\n\n Methods may be deprecated and removed if they are no longer considered secure. To\n migrate old hashes, you may generate a new hash when checking an old hash, or you\n may contact users with a link to reset their password.\n\n :param pwhash: The hashed password.\n :param password: The plaintext password.\n\n .. versionchanged:: 2.3\n All plain hashes are deprecated and will not be supported in Werkzeug 3.0.\n \"\"\"\n try:\n method, salt, hashval = pwhash.split(\"$\", 2)\n except ValueError:\n return False\n\n return hmac.compare_digest(_hash_internal(method, salt, password)[0], hashval)\n\n\ndef safe_join(directory: str, *pathnames: str) -> str | None:\n \"\"\"Safely join zero or more untrusted path components to a base\n directory to avoid escaping the base directory.\n\n :param directory: The trusted base directory.\n :param pathnames: The untrusted path components relative to the\n base directory.\n :return: A safe path, otherwise ``None``.\n \"\"\"\n if not directory:\n # Ensure we end up with ./path if directory=\"\" is given,\n # otherwise the first untrusted part could become trusted.\n directory = \".\"\n\n parts = [directory]\n\n for filename in pathnames:\n if filename != \"\":\n filename = posixpath.normpath(filename)\n\n if (\n any(sep in filename for sep in _os_alt_seps)\n or os.path.isabs(filename)\n or filename == \"..\"\n or filename.startswith(\"../\")\n ):\n return None\n\n parts.append(filename)\n\n return posixpath.join(*parts)\n", "path": "src/werkzeug/security.py"}]}
| 2,081 | 366 |
gh_patches_debug_28835
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-2669
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing dependency on setuptools
When importing `hypothesis` in a container that only installs the declared dependencies (i.e. `sortedcontainers` and `attrs`) then we see the following error:
```
Traceback (most recent call last):
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py", line 27, in <module>
from importlib import metadata as importlib_metadata
ImportError: cannot import name 'metadata'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py", line 29, in <module>
import importlib_metadata # type: ignore # mypy thinks this is a redefinition
ModuleNotFoundError: No module named 'importlib_metadata'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "common/python/runtime/python3_wrapper.py", line 36, in <module>
eval(compiled_code, module.__dict__)
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/__main__/perception/cloud/proto_format_test.py", line 7, in <module>
import hypothesis as hyp
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/__init__.py", line 27, in <module>
from hypothesis.entry_points import run
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py", line 38, in <module>
import pkg_resources
ModuleNotFoundError: No module named 'pkg_resources'
```
The `pkg_resources` module is provided by `setuptools`.
I think tweaking `setup.py` should fix it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hypothesis-python/setup.py`
Content:
```
1 # This file is part of Hypothesis, which may be found at
2 # https://github.com/HypothesisWorks/hypothesis/
3 #
4 # Most of this work is copyright (C) 2013-2020 David R. MacIver
5 # ([email protected]), but it contains contributions by others. See
6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
7 # consult the git log if you need to determine who owns an individual
8 # contribution.
9 #
10 # This Source Code Form is subject to the terms of the Mozilla Public License,
11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
12 # obtain one at https://mozilla.org/MPL/2.0/.
13 #
14 # END HEADER
15
16 import os
17 import sys
18 import warnings
19
20 import setuptools
21
22 if sys.version_info[:2] < (3, 6):
23 raise Exception(
24 "This version of Python is too old to install new versions of Hypothesis. "
25 "Update `pip` and `setuptools`, try again, and you will automatically "
26 "get the latest compatible version of Hypothesis instead. "
27 "See also https://python3statement.org/practicalities/"
28 )
29
30
31 def local_file(name):
32 return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
33
34
35 SOURCE = local_file("src")
36 README = local_file("README.rst")
37
38 setuptools_version = tuple(map(int, setuptools.__version__.split(".")[:2]))
39
40 if setuptools_version < (36, 2):
41 # Warning only - very bad if uploading bdist but fine if installing sdist.
42 warnings.warn(
43 "This version of setuptools is too old to correctly store "
44 "conditional dependencies in binary wheels. For more info, see: "
45 "https://hynek.me/articles/conditional-python-dependencies/"
46 )
47
48
49 # Assignment to placate pyflakes. The actual version is from the exec that
50 # follows.
51 __version__ = None
52
53 with open(local_file("src/hypothesis/version.py")) as o:
54 exec(o.read())
55
56 assert __version__ is not None
57
58
59 extras = {
60 "cli": ["click>=7.0", "black>=19.10b0"],
61 "ghostwriter": ["black>=19.10b0"],
62 "pytz": ["pytz>=2014.1"],
63 "dateutil": ["python-dateutil>=1.4"],
64 "lark": ["lark-parser>=0.6.5"],
65 "numpy": ["numpy>=1.9.0"],
66 "pandas": ["pandas>=0.19"],
67 "pytest": ["pytest>=4.3"],
68 "dpcontracts": ["dpcontracts>=0.4"],
69 "redis": ["redis>=3.0.0"],
70 # We only support Django versions with upstream support - see
71 # https://www.djangoproject.com/download/#supported-versions
72 "django": ["pytz>=2014.1", "django>=2.2"],
73 }
74
75 extras["all"] = sorted(set(sum(extras.values(), [])))
76
77
78 setuptools.setup(
79 name="hypothesis",
80 version=__version__,
81 author="David R. MacIver",
82 author_email="[email protected]",
83 packages=setuptools.find_packages(SOURCE),
84 package_dir={"": SOURCE},
85 package_data={"hypothesis": ["py.typed", "vendor/tlds-alpha-by-domain.txt"]},
86 url="https://github.com/HypothesisWorks/hypothesis/tree/master/hypothesis-python",
87 project_urls={
88 "Website": "https://hypothesis.works",
89 "Documentation": "https://hypothesis.readthedocs.io",
90 "Issues": "https://github.com/HypothesisWorks/hypothesis/issues",
91 },
92 license="MPL v2",
93 description="A library for property-based testing",
94 zip_safe=False,
95 extras_require=extras,
96 install_requires=["attrs>=19.2.0", "sortedcontainers>=2.1.0,<3.0.0"],
97 python_requires=">=3.6",
98 classifiers=[
99 "Development Status :: 5 - Production/Stable",
100 "Framework :: Hypothesis",
101 "Framework :: Pytest",
102 "Intended Audience :: Developers",
103 "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
104 "Operating System :: Unix",
105 "Operating System :: POSIX",
106 "Operating System :: Microsoft :: Windows",
107 "Programming Language :: Python",
108 "Programming Language :: Python :: 3",
109 "Programming Language :: Python :: 3 :: Only",
110 "Programming Language :: Python :: 3.6",
111 "Programming Language :: Python :: 3.7",
112 "Programming Language :: Python :: 3.8",
113 "Programming Language :: Python :: Implementation :: CPython",
114 "Programming Language :: Python :: Implementation :: PyPy",
115 "Topic :: Education :: Testing",
116 "Topic :: Software Development :: Testing",
117 "Typing :: Typed",
118 ],
119 entry_points={
120 "pytest11": ["hypothesispytest = hypothesis.extra.pytestplugin"],
121 "console_scripts": ["hypothesis = hypothesis.extra.cli:main"],
122 },
123 long_description=open(README).read(),
124 long_description_content_type="text/x-rst",
125 keywords="python testing fuzzing property-based-testing",
126 )
127
```
Path: `hypothesis-python/src/hypothesis/entry_points.py`
Content:
```
1 # This file is part of Hypothesis, which may be found at
2 # https://github.com/HypothesisWorks/hypothesis/
3 #
4 # Most of this work is copyright (C) 2013-2020 David R. MacIver
5 # ([email protected]), but it contains contributions by others. See
6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
7 # consult the git log if you need to determine who owns an individual
8 # contribution.
9 #
10 # This Source Code Form is subject to the terms of the Mozilla Public License,
11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
12 # obtain one at https://mozilla.org/MPL/2.0/.
13 #
14 # END HEADER
15
16 """Run all functions registered for the "hypothesis" entry point.
17
18 This can be used with `st.register_type_strategy` to register strategies for your
19 custom types, running the relevant code when *hypothesis* is imported instead of
20 your package.
21 """
22
23 try:
24 # We prefer to use importlib.metadata, or the backport on Python <= 3.7,
25 # because it's much faster than pkg_resources (200ms import time speedup).
26 try:
27 from importlib import metadata as importlib_metadata
28 except ImportError:
29 import importlib_metadata # type: ignore # mypy thinks this is a redefinition
30
31 def get_entry_points():
32 yield from importlib_metadata.entry_points().get("hypothesis", [])
33
34
35 except ImportError:
36 # But if we're not on Python >= 3.8 and the importlib_metadata backport
37 # is not installed, we fall back to pkg_resources anyway.
38 import pkg_resources
39
40 def get_entry_points():
41 yield from pkg_resources.iter_entry_points("hypothesis")
42
43
44 def run():
45 for entry in get_entry_points(): # pragma: no cover
46 hook = entry.load()
47 if callable(hook):
48 hook()
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py
--- a/hypothesis-python/setup.py
+++ b/hypothesis-python/setup.py
@@ -63,7 +63,7 @@
"dateutil": ["python-dateutil>=1.4"],
"lark": ["lark-parser>=0.6.5"],
"numpy": ["numpy>=1.9.0"],
- "pandas": ["pandas>=0.19"],
+ "pandas": ["pandas>=0.25"],
"pytest": ["pytest>=4.3"],
"dpcontracts": ["dpcontracts>=0.4"],
"redis": ["redis>=3.0.0"],
@@ -72,7 +72,9 @@
"django": ["pytz>=2014.1", "django>=2.2"],
}
-extras["all"] = sorted(set(sum(extras.values(), [])))
+extras["all"] = sorted(
+ set(sum(extras.values(), ["importlib_metadata ; python_version<'3.8'"]))
+)
setuptools.setup(
diff --git a/hypothesis-python/src/hypothesis/entry_points.py b/hypothesis-python/src/hypothesis/entry_points.py
--- a/hypothesis-python/src/hypothesis/entry_points.py
+++ b/hypothesis-python/src/hypothesis/entry_points.py
@@ -35,10 +35,26 @@
except ImportError:
# But if we're not on Python >= 3.8 and the importlib_metadata backport
# is not installed, we fall back to pkg_resources anyway.
- import pkg_resources
+ try:
+ import pkg_resources
+ except ImportError:
+ import warnings
- def get_entry_points():
- yield from pkg_resources.iter_entry_points("hypothesis")
+ from hypothesis.errors import HypothesisWarning
+
+ warnings.warn(
+ "Under Python <= 3.7, Hypothesis requires either the importlib_metadata "
+ "or setuptools package in order to load plugins via entrypoints.",
+ HypothesisWarning,
+ )
+
+ def get_entry_points():
+ yield from ()
+
+ else:
+
+ def get_entry_points():
+ yield from pkg_resources.iter_entry_points("hypothesis")
def run():
|
{"golden_diff": "diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py\n--- a/hypothesis-python/setup.py\n+++ b/hypothesis-python/setup.py\n@@ -63,7 +63,7 @@\n \"dateutil\": [\"python-dateutil>=1.4\"],\n \"lark\": [\"lark-parser>=0.6.5\"],\n \"numpy\": [\"numpy>=1.9.0\"],\n- \"pandas\": [\"pandas>=0.19\"],\n+ \"pandas\": [\"pandas>=0.25\"],\n \"pytest\": [\"pytest>=4.3\"],\n \"dpcontracts\": [\"dpcontracts>=0.4\"],\n \"redis\": [\"redis>=3.0.0\"],\n@@ -72,7 +72,9 @@\n \"django\": [\"pytz>=2014.1\", \"django>=2.2\"],\n }\n \n-extras[\"all\"] = sorted(set(sum(extras.values(), [])))\n+extras[\"all\"] = sorted(\n+ set(sum(extras.values(), [\"importlib_metadata ; python_version<'3.8'\"]))\n+)\n \n \n setuptools.setup(\ndiff --git a/hypothesis-python/src/hypothesis/entry_points.py b/hypothesis-python/src/hypothesis/entry_points.py\n--- a/hypothesis-python/src/hypothesis/entry_points.py\n+++ b/hypothesis-python/src/hypothesis/entry_points.py\n@@ -35,10 +35,26 @@\n except ImportError:\n # But if we're not on Python >= 3.8 and the importlib_metadata backport\n # is not installed, we fall back to pkg_resources anyway.\n- import pkg_resources\n+ try:\n+ import pkg_resources\n+ except ImportError:\n+ import warnings\n \n- def get_entry_points():\n- yield from pkg_resources.iter_entry_points(\"hypothesis\")\n+ from hypothesis.errors import HypothesisWarning\n+\n+ warnings.warn(\n+ \"Under Python <= 3.7, Hypothesis requires either the importlib_metadata \"\n+ \"or setuptools package in order to load plugins via entrypoints.\",\n+ HypothesisWarning,\n+ )\n+\n+ def get_entry_points():\n+ yield from ()\n+\n+ else:\n+\n+ def get_entry_points():\n+ yield from pkg_resources.iter_entry_points(\"hypothesis\")\n \n \n def run():\n", "issue": "Missing dependency on setuptools\nWhen importing `hypothesis` in a container that only installs the declared dependencies (i.e. `sortedcontainers` and `attrs`) then we see the following error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py\", line 27, in <module>\r\n from importlib import metadata as importlib_metadata\r\nImportError: cannot import name 'metadata'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py\", line 29, in <module>\r\n import importlib_metadata # type: ignore # mypy thinks this is a redefinition\r\nModuleNotFoundError: No module named 'importlib_metadata'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"common/python/runtime/python3_wrapper.py\", line 36, in <module>\r\n eval(compiled_code, module.__dict__)\r\n File \"/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/__main__/perception/cloud/proto_format_test.py\", line 7, in <module>\r\n import hypothesis as hyp\r\n File \"/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/__init__.py\", line 27, in <module>\r\n from hypothesis.entry_points import run\r\n File \"/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py\", line 38, in <module>\r\n import pkg_resources\r\nModuleNotFoundError: No module named 'pkg_resources'\r\n```\r\nThe `pkg_resources` module is provided by `setuptools`.\r\n\r\nI think tweaking `setup.py` should fix it.\n", "before_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2020 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nimport os\nimport sys\nimport warnings\n\nimport setuptools\n\nif sys.version_info[:2] < (3, 6):\n raise Exception(\n \"This version of Python is too old to install new versions of Hypothesis. \"\n \"Update `pip` and `setuptools`, try again, and you will automatically \"\n \"get the latest compatible version of Hypothesis instead. \"\n \"See also https://python3statement.org/practicalities/\"\n )\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file(\"src\")\nREADME = local_file(\"README.rst\")\n\nsetuptools_version = tuple(map(int, setuptools.__version__.split(\".\")[:2]))\n\nif setuptools_version < (36, 2):\n # Warning only - very bad if uploading bdist but fine if installing sdist.\n warnings.warn(\n \"This version of setuptools is too old to correctly store \"\n \"conditional dependencies in binary wheels. For more info, see: \"\n \"https://hynek.me/articles/conditional-python-dependencies/\"\n )\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file(\"src/hypothesis/version.py\")) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n \"cli\": [\"click>=7.0\", \"black>=19.10b0\"],\n \"ghostwriter\": [\"black>=19.10b0\"],\n \"pytz\": [\"pytz>=2014.1\"],\n \"dateutil\": [\"python-dateutil>=1.4\"],\n \"lark\": [\"lark-parser>=0.6.5\"],\n \"numpy\": [\"numpy>=1.9.0\"],\n \"pandas\": [\"pandas>=0.19\"],\n \"pytest\": [\"pytest>=4.3\"],\n \"dpcontracts\": [\"dpcontracts>=0.4\"],\n \"redis\": [\"redis>=3.0.0\"],\n # We only support Django versions with upstream support - see\n # https://www.djangoproject.com/download/#supported-versions\n \"django\": [\"pytz>=2014.1\", \"django>=2.2\"],\n}\n\nextras[\"all\"] = sorted(set(sum(extras.values(), [])))\n\n\nsetuptools.setup(\n name=\"hypothesis\",\n version=__version__,\n author=\"David R. MacIver\",\n author_email=\"[email protected]\",\n packages=setuptools.find_packages(SOURCE),\n package_dir={\"\": SOURCE},\n package_data={\"hypothesis\": [\"py.typed\", \"vendor/tlds-alpha-by-domain.txt\"]},\n url=\"https://github.com/HypothesisWorks/hypothesis/tree/master/hypothesis-python\",\n project_urls={\n \"Website\": \"https://hypothesis.works\",\n \"Documentation\": \"https://hypothesis.readthedocs.io\",\n \"Issues\": \"https://github.com/HypothesisWorks/hypothesis/issues\",\n },\n license=\"MPL v2\",\n description=\"A library for property-based testing\",\n zip_safe=False,\n extras_require=extras,\n install_requires=[\"attrs>=19.2.0\", \"sortedcontainers>=2.1.0,<3.0.0\"],\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Hypothesis\",\n \"Framework :: Pytest\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n \"Operating System :: Unix\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Education :: Testing\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n entry_points={\n \"pytest11\": [\"hypothesispytest = hypothesis.extra.pytestplugin\"],\n \"console_scripts\": [\"hypothesis = hypothesis.extra.cli:main\"],\n },\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n keywords=\"python testing fuzzing property-based-testing\",\n)\n", "path": "hypothesis-python/setup.py"}, {"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2020 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\n\"\"\"Run all functions registered for the \"hypothesis\" entry point.\n\nThis can be used with `st.register_type_strategy` to register strategies for your\ncustom types, running the relevant code when *hypothesis* is imported instead of\nyour package.\n\"\"\"\n\ntry:\n # We prefer to use importlib.metadata, or the backport on Python <= 3.7,\n # because it's much faster than pkg_resources (200ms import time speedup).\n try:\n from importlib import metadata as importlib_metadata\n except ImportError:\n import importlib_metadata # type: ignore # mypy thinks this is a redefinition\n\n def get_entry_points():\n yield from importlib_metadata.entry_points().get(\"hypothesis\", [])\n\n\nexcept ImportError:\n # But if we're not on Python >= 3.8 and the importlib_metadata backport\n # is not installed, we fall back to pkg_resources anyway.\n import pkg_resources\n\n def get_entry_points():\n yield from pkg_resources.iter_entry_points(\"hypothesis\")\n\n\ndef run():\n for entry in get_entry_points(): # pragma: no cover\n hook = entry.load()\n if callable(hook):\n hook()\n", "path": "hypothesis-python/src/hypothesis/entry_points.py"}], "after_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2020 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nimport os\nimport sys\nimport warnings\n\nimport setuptools\n\nif sys.version_info[:2] < (3, 6):\n raise Exception(\n \"This version of Python is too old to install new versions of Hypothesis. \"\n \"Update `pip` and `setuptools`, try again, and you will automatically \"\n \"get the latest compatible version of Hypothesis instead. \"\n \"See also https://python3statement.org/practicalities/\"\n )\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file(\"src\")\nREADME = local_file(\"README.rst\")\n\nsetuptools_version = tuple(map(int, setuptools.__version__.split(\".\")[:2]))\n\nif setuptools_version < (36, 2):\n # Warning only - very bad if uploading bdist but fine if installing sdist.\n warnings.warn(\n \"This version of setuptools is too old to correctly store \"\n \"conditional dependencies in binary wheels. For more info, see: \"\n \"https://hynek.me/articles/conditional-python-dependencies/\"\n )\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file(\"src/hypothesis/version.py\")) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n \"cli\": [\"click>=7.0\", \"black>=19.10b0\"],\n \"ghostwriter\": [\"black>=19.10b0\"],\n \"pytz\": [\"pytz>=2014.1\"],\n \"dateutil\": [\"python-dateutil>=1.4\"],\n \"lark\": [\"lark-parser>=0.6.5\"],\n \"numpy\": [\"numpy>=1.9.0\"],\n \"pandas\": [\"pandas>=0.25\"],\n \"pytest\": [\"pytest>=4.3\"],\n \"dpcontracts\": [\"dpcontracts>=0.4\"],\n \"redis\": [\"redis>=3.0.0\"],\n # We only support Django versions with upstream support - see\n # https://www.djangoproject.com/download/#supported-versions\n \"django\": [\"pytz>=2014.1\", \"django>=2.2\"],\n}\n\nextras[\"all\"] = sorted(\n set(sum(extras.values(), [\"importlib_metadata ; python_version<'3.8'\"]))\n)\n\n\nsetuptools.setup(\n name=\"hypothesis\",\n version=__version__,\n author=\"David R. MacIver\",\n author_email=\"[email protected]\",\n packages=setuptools.find_packages(SOURCE),\n package_dir={\"\": SOURCE},\n package_data={\"hypothesis\": [\"py.typed\", \"vendor/tlds-alpha-by-domain.txt\"]},\n url=\"https://github.com/HypothesisWorks/hypothesis/tree/master/hypothesis-python\",\n project_urls={\n \"Website\": \"https://hypothesis.works\",\n \"Documentation\": \"https://hypothesis.readthedocs.io\",\n \"Issues\": \"https://github.com/HypothesisWorks/hypothesis/issues\",\n },\n license=\"MPL v2\",\n description=\"A library for property-based testing\",\n zip_safe=False,\n extras_require=extras,\n install_requires=[\"attrs>=19.2.0\", \"sortedcontainers>=2.1.0,<3.0.0\"],\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Hypothesis\",\n \"Framework :: Pytest\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n \"Operating System :: Unix\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Education :: Testing\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n entry_points={\n \"pytest11\": [\"hypothesispytest = hypothesis.extra.pytestplugin\"],\n \"console_scripts\": [\"hypothesis = hypothesis.extra.cli:main\"],\n },\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n keywords=\"python testing fuzzing property-based-testing\",\n)\n", "path": "hypothesis-python/setup.py"}, {"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2020 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\n\"\"\"Run all functions registered for the \"hypothesis\" entry point.\n\nThis can be used with `st.register_type_strategy` to register strategies for your\ncustom types, running the relevant code when *hypothesis* is imported instead of\nyour package.\n\"\"\"\n\ntry:\n # We prefer to use importlib.metadata, or the backport on Python <= 3.7,\n # because it's much faster than pkg_resources (200ms import time speedup).\n try:\n from importlib import metadata as importlib_metadata\n except ImportError:\n import importlib_metadata # type: ignore # mypy thinks this is a redefinition\n\n def get_entry_points():\n yield from importlib_metadata.entry_points().get(\"hypothesis\", [])\n\n\nexcept ImportError:\n # But if we're not on Python >= 3.8 and the importlib_metadata backport\n # is not installed, we fall back to pkg_resources anyway.\n try:\n import pkg_resources\n except ImportError:\n import warnings\n\n from hypothesis.errors import HypothesisWarning\n\n warnings.warn(\n \"Under Python <= 3.7, Hypothesis requires either the importlib_metadata \"\n \"or setuptools package in order to load plugins via entrypoints.\",\n HypothesisWarning,\n )\n\n def get_entry_points():\n yield from ()\n\n else:\n\n def get_entry_points():\n yield from pkg_resources.iter_entry_points(\"hypothesis\")\n\n\ndef run():\n for entry in get_entry_points(): # pragma: no cover\n hook = entry.load()\n if callable(hook):\n hook()\n", "path": "hypothesis-python/src/hypothesis/entry_points.py"}]}
| 3,115 | 511 |
gh_patches_debug_766
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-4149
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ModalView background size is not updated
Since https://github.com/kivy/kivy/pull/4136 the ModalView background is not resized when the window size changes, run `kivy/uix/modalview.py`, then resize the window.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/modalview.py`
Content:
```
1 '''
2 ModalView
3 =========
4
5 .. versionadded:: 1.4.0
6
7 The :class:`ModalView` widget is used to create modal views. By default, the
8 view will cover the whole "parent" window.
9
10 Remember that the default size of a Widget is size_hint=(1, 1). If you don't
11 want your view to be fullscreen, either use size hints with values lower than
12 1 (for instance size_hint=(.8, .8)) or deactivate the size_hint and use fixed
13 size attributes.
14
15 Examples
16 --------
17
18 Example of a simple 400x400 Hello world view::
19
20 view = ModalView(size_hint=(None, None), size=(400, 400))
21 view.add_widget(Label(text='Hello world'))
22
23 By default, any click outside the view will dismiss it. If you don't
24 want that, you can set :attr:`ModalView.auto_dismiss` to False::
25
26 view = ModalView(auto_dismiss=False)
27 view.add_widget(Label(text='Hello world'))
28 view.open()
29
30 To manually dismiss/close the view, use the :meth:`ModalView.dismiss` method of
31 the ModalView instance::
32
33 view.dismiss()
34
35 Both :meth:`ModalView.open` and :meth:`ModalView.dismiss` are bindable. That
36 means you can directly bind the function to an action, e.g. to a button's
37 on_press ::
38
39 # create content and add it to the view
40 content = Button(text='Close me!')
41 view = ModalView(auto_dismiss=False)
42 view.add_widget(content)
43
44 # bind the on_press event of the button to the dismiss function
45 content.bind(on_press=view.dismiss)
46
47 # open the view
48 view.open()
49
50
51 ModalView Events
52 ----------------
53
54 There are two events available: `on_open` which is raised when the view is
55 opening, and `on_dismiss` which is raised when the view is closed.
56 For `on_dismiss`, you can prevent the view from closing by explictly returning
57 True from your callback. ::
58
59 def my_callback(instance):
60 print('ModalView', instance, 'is being dismissed, but is prevented!')
61 return True
62 view = ModalView()
63 view.add_widget(Label(text='Hello world'))
64 view.bind(on_dismiss=my_callback)
65 view.open()
66
67
68 .. versionchanged:: 1.5.0
69 The ModalView can be closed by hitting the escape key on the
70 keyboard if the :attr:`ModalView.auto_dismiss` property is True (the
71 default).
72
73 '''
74
75 __all__ = ('ModalView', )
76
77 from kivy.logger import Logger
78 from kivy.animation import Animation
79 from kivy.uix.anchorlayout import AnchorLayout
80 from kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \
81 NumericProperty, ListProperty
82
83
84 class ModalView(AnchorLayout):
85 '''ModalView class. See module documentation for more information.
86
87 :Events:
88 `on_open`:
89 Fired when the ModalView is opened.
90 `on_dismiss`:
91 Fired when the ModalView is closed. If the callback returns True,
92 the dismiss will be canceled.
93 '''
94
95 auto_dismiss = BooleanProperty(True)
96 '''This property determines if the view is automatically
97 dismissed when the user clicks outside it.
98
99 :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty` and
100 defaults to True.
101 '''
102
103 attach_to = ObjectProperty(None)
104 '''If a widget is set on attach_to, the view will attach to the nearest
105 parent window of the widget. If none is found, it will attach to the
106 main/global Window.
107
108 :attr:`attach_to` is an :class:`~kivy.properties.ObjectProperty` and
109 defaults to None.
110 '''
111
112 background_color = ListProperty([0, 0, 0, .7])
113 '''Background color in the format (r, g, b, a).
114
115 :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
116 defaults to [0, 0, 0, .7].
117 '''
118
119 background = StringProperty(
120 'atlas://data/images/defaulttheme/modalview-background')
121 '''Background image of the view used for the view background.
122
123 :attr:`background` is a :class:`~kivy.properties.StringProperty` and
124 defaults to 'atlas://data/images/defaulttheme/modalview-background'.
125 '''
126
127 border = ListProperty([16, 16, 16, 16])
128 '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
129 graphics instruction. Used for the :attr:`background_normal` and the
130 :attr:`background_down` properties. Can be used when using custom
131 backgrounds.
132
133 It must be a list of four values: (top, right, bottom, left). Read the
134 BorderImage instructions for more information about how to use it.
135
136 :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
137 (16, 16, 16, 16).
138 '''
139
140 # Internals properties used for graphical representation.
141
142 _anim_alpha = NumericProperty(0)
143
144 _anim_duration = NumericProperty(.1)
145
146 _window = ObjectProperty(None, allownone=True)
147
148 __events__ = ('on_open', 'on_dismiss')
149
150 def __init__(self, **kwargs):
151 self._parent = None
152 super(ModalView, self).__init__(**kwargs)
153
154 def _search_window(self):
155 # get window to attach to
156 window = None
157 if self.attach_to is not None:
158 window = self.attach_to.get_parent_window()
159 if not window:
160 window = self.attach_to.get_root_window()
161 if not window:
162 from kivy.core.window import Window
163 window = Window
164 return window
165
166 def open(self, *largs):
167 '''Show the view window from the :attr:`attach_to` widget. If set, it
168 will attach to the nearest window. If the widget is not attached to any
169 window, the view will attach to the global
170 :class:`~kivy.core.window.Window`.
171 '''
172 if self._window is not None:
173 Logger.warning('ModalView: you can only open once.')
174 return self
175 # search window
176 self._window = self._search_window()
177 if not self._window:
178 Logger.warning('ModalView: cannot open view, no window found.')
179 return self
180 self._window.add_widget(self)
181 self._window.bind(
182 on_resize=self._align_center,
183 on_keyboard=self._handle_keyboard)
184 self.center = self._window.center
185 self.fbind('center', self._align_center)
186 a = Animation(_anim_alpha=1., d=self._anim_duration)
187 a.bind(on_complete=lambda *x: self.dispatch('on_open'))
188 a.start(self)
189 return self
190
191 def dismiss(self, *largs, **kwargs):
192 '''Close the view if it is open. If you really want to close the
193 view, whatever the on_dismiss event returns, you can use the *force*
194 argument:
195 ::
196
197 view = ModalView(...)
198 view.dismiss(force=True)
199
200 When the view is dismissed, it will be faded out before being
201 removed from the parent. If you don't want animation, use::
202
203 view.dismiss(animation=False)
204
205 '''
206 if self._window is None:
207 return self
208 if self.dispatch('on_dismiss') is True:
209 if kwargs.get('force', False) is not True:
210 return self
211 if kwargs.get('animation', True):
212 Animation(_anim_alpha=0., d=self._anim_duration).start(self)
213 else:
214 self._anim_alpha = 0
215 self._real_remove_widget()
216 return self
217
218 def _align_center(self, *l):
219 if self._window:
220 self.center = self._window.center
221
222 def on_touch_down(self, touch):
223 if not self.collide_point(*touch.pos):
224 if self.auto_dismiss:
225 self.dismiss()
226 return True
227 super(ModalView, self).on_touch_down(touch)
228 return True
229
230 def on_touch_move(self, touch):
231 super(ModalView, self).on_touch_move(touch)
232 return True
233
234 def on_touch_up(self, touch):
235 super(ModalView, self).on_touch_up(touch)
236 return True
237
238 def on__anim_alpha(self, instance, value):
239 if value == 0 and self._window is not None:
240 self._real_remove_widget()
241
242 def _real_remove_widget(self):
243 if self._window is None:
244 return
245 self._window.remove_widget(self)
246 self._window.unbind(
247 on_resize=self._align_center,
248 on_keyboard=self._handle_keyboard)
249 self._window = None
250
251 def on_open(self):
252 pass
253
254 def on_dismiss(self):
255 pass
256
257 def _handle_keyboard(self, window, key, *largs):
258 if key == 27 and self.auto_dismiss:
259 self.dismiss()
260 return True
261
262
263 if __name__ == '__main__':
264 from kivy.base import runTouchApp
265 from kivy.uix.button import Button
266 from kivy.uix.label import Label
267 from kivy.uix.gridlayout import GridLayout
268 from kivy.core.window import Window
269
270 # add view
271 content = GridLayout(cols=1)
272 content.add_widget(Label(text='This is a hello world'))
273 view = ModalView(size_hint=(None, None), size=(256, 256),
274 auto_dismiss=True)
275 view.add_widget(content)
276
277 def open_view(btn):
278 view.open()
279
280 layout = GridLayout(cols=3)
281 for x in range(9):
282 btn = Button(text='click me %s' % x)
283 btn.bind(on_release=view.open)
284 layout.add_widget(btn)
285 Window.add_widget(layout)
286
287 view.open()
288
289 runTouchApp()
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kivy/uix/modalview.py b/kivy/uix/modalview.py
--- a/kivy/uix/modalview.py
+++ b/kivy/uix/modalview.py
@@ -143,7 +143,7 @@
_anim_duration = NumericProperty(.1)
- _window = ObjectProperty(None, allownone=True)
+ _window = ObjectProperty(None, allownone=True, rebind=True)
__events__ = ('on_open', 'on_dismiss')
|
{"golden_diff": "diff --git a/kivy/uix/modalview.py b/kivy/uix/modalview.py\n--- a/kivy/uix/modalview.py\n+++ b/kivy/uix/modalview.py\n@@ -143,7 +143,7 @@\n \n _anim_duration = NumericProperty(.1)\n \n- _window = ObjectProperty(None, allownone=True)\n+ _window = ObjectProperty(None, allownone=True, rebind=True)\n \n __events__ = ('on_open', 'on_dismiss')\n", "issue": "ModalView background size is not updated\nSince https://github.com/kivy/kivy/pull/4136 the ModalView background is not resized when the window size changes, run `kivy/uix/modalview.py`, then resize the window.\n\n\n\n", "before_files": [{"content": "'''\nModalView\n=========\n\n.. versionadded:: 1.4.0\n\nThe :class:`ModalView` widget is used to create modal views. By default, the\nview will cover the whole \"parent\" window.\n\nRemember that the default size of a Widget is size_hint=(1, 1). If you don't\nwant your view to be fullscreen, either use size hints with values lower than\n1 (for instance size_hint=(.8, .8)) or deactivate the size_hint and use fixed\nsize attributes.\n\nExamples\n--------\n\nExample of a simple 400x400 Hello world view::\n\n view = ModalView(size_hint=(None, None), size=(400, 400))\n view.add_widget(Label(text='Hello world'))\n\nBy default, any click outside the view will dismiss it. If you don't\nwant that, you can set :attr:`ModalView.auto_dismiss` to False::\n\n view = ModalView(auto_dismiss=False)\n view.add_widget(Label(text='Hello world'))\n view.open()\n\nTo manually dismiss/close the view, use the :meth:`ModalView.dismiss` method of\nthe ModalView instance::\n\n view.dismiss()\n\nBoth :meth:`ModalView.open` and :meth:`ModalView.dismiss` are bindable. That\nmeans you can directly bind the function to an action, e.g. to a button's\non_press ::\n\n # create content and add it to the view\n content = Button(text='Close me!')\n view = ModalView(auto_dismiss=False)\n view.add_widget(content)\n\n # bind the on_press event of the button to the dismiss function\n content.bind(on_press=view.dismiss)\n\n # open the view\n view.open()\n\n\nModalView Events\n----------------\n\nThere are two events available: `on_open` which is raised when the view is\nopening, and `on_dismiss` which is raised when the view is closed.\nFor `on_dismiss`, you can prevent the view from closing by explictly returning\nTrue from your callback. ::\n\n def my_callback(instance):\n print('ModalView', instance, 'is being dismissed, but is prevented!')\n return True\n view = ModalView()\n view.add_widget(Label(text='Hello world'))\n view.bind(on_dismiss=my_callback)\n view.open()\n\n\n.. versionchanged:: 1.5.0\n The ModalView can be closed by hitting the escape key on the\n keyboard if the :attr:`ModalView.auto_dismiss` property is True (the\n default).\n\n'''\n\n__all__ = ('ModalView', )\n\nfrom kivy.logger import Logger\nfrom kivy.animation import Animation\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \\\n NumericProperty, ListProperty\n\n\nclass ModalView(AnchorLayout):\n '''ModalView class. See module documentation for more information.\n\n :Events:\n `on_open`:\n Fired when the ModalView is opened.\n `on_dismiss`:\n Fired when the ModalView is closed. If the callback returns True,\n the dismiss will be canceled.\n '''\n\n auto_dismiss = BooleanProperty(True)\n '''This property determines if the view is automatically\n dismissed when the user clicks outside it.\n\n :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n attach_to = ObjectProperty(None)\n '''If a widget is set on attach_to, the view will attach to the nearest\n parent window of the widget. If none is found, it will attach to the\n main/global Window.\n\n :attr:`attach_to` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n background_color = ListProperty([0, 0, 0, .7])\n '''Background color in the format (r, g, b, a).\n\n :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and\n defaults to [0, 0, 0, .7].\n '''\n\n background = StringProperty(\n 'atlas://data/images/defaulttheme/modalview-background')\n '''Background image of the view used for the view background.\n\n :attr:`background` is a :class:`~kivy.properties.StringProperty` and\n defaults to 'atlas://data/images/defaulttheme/modalview-background'.\n '''\n\n border = ListProperty([16, 16, 16, 16])\n '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`\n graphics instruction. Used for the :attr:`background_normal` and the\n :attr:`background_down` properties. Can be used when using custom\n backgrounds.\n\n It must be a list of four values: (top, right, bottom, left). Read the\n BorderImage instructions for more information about how to use it.\n\n :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to\n (16, 16, 16, 16).\n '''\n\n # Internals properties used for graphical representation.\n\n _anim_alpha = NumericProperty(0)\n\n _anim_duration = NumericProperty(.1)\n\n _window = ObjectProperty(None, allownone=True)\n\n __events__ = ('on_open', 'on_dismiss')\n\n def __init__(self, **kwargs):\n self._parent = None\n super(ModalView, self).__init__(**kwargs)\n\n def _search_window(self):\n # get window to attach to\n window = None\n if self.attach_to is not None:\n window = self.attach_to.get_parent_window()\n if not window:\n window = self.attach_to.get_root_window()\n if not window:\n from kivy.core.window import Window\n window = Window\n return window\n\n def open(self, *largs):\n '''Show the view window from the :attr:`attach_to` widget. If set, it\n will attach to the nearest window. If the widget is not attached to any\n window, the view will attach to the global\n :class:`~kivy.core.window.Window`.\n '''\n if self._window is not None:\n Logger.warning('ModalView: you can only open once.')\n return self\n # search window\n self._window = self._search_window()\n if not self._window:\n Logger.warning('ModalView: cannot open view, no window found.')\n return self\n self._window.add_widget(self)\n self._window.bind(\n on_resize=self._align_center,\n on_keyboard=self._handle_keyboard)\n self.center = self._window.center\n self.fbind('center', self._align_center)\n a = Animation(_anim_alpha=1., d=self._anim_duration)\n a.bind(on_complete=lambda *x: self.dispatch('on_open'))\n a.start(self)\n return self\n\n def dismiss(self, *largs, **kwargs):\n '''Close the view if it is open. If you really want to close the\n view, whatever the on_dismiss event returns, you can use the *force*\n argument:\n ::\n\n view = ModalView(...)\n view.dismiss(force=True)\n\n When the view is dismissed, it will be faded out before being\n removed from the parent. If you don't want animation, use::\n\n view.dismiss(animation=False)\n\n '''\n if self._window is None:\n return self\n if self.dispatch('on_dismiss') is True:\n if kwargs.get('force', False) is not True:\n return self\n if kwargs.get('animation', True):\n Animation(_anim_alpha=0., d=self._anim_duration).start(self)\n else:\n self._anim_alpha = 0\n self._real_remove_widget()\n return self\n\n def _align_center(self, *l):\n if self._window:\n self.center = self._window.center\n\n def on_touch_down(self, touch):\n if not self.collide_point(*touch.pos):\n if self.auto_dismiss:\n self.dismiss()\n return True\n super(ModalView, self).on_touch_down(touch)\n return True\n\n def on_touch_move(self, touch):\n super(ModalView, self).on_touch_move(touch)\n return True\n\n def on_touch_up(self, touch):\n super(ModalView, self).on_touch_up(touch)\n return True\n\n def on__anim_alpha(self, instance, value):\n if value == 0 and self._window is not None:\n self._real_remove_widget()\n\n def _real_remove_widget(self):\n if self._window is None:\n return\n self._window.remove_widget(self)\n self._window.unbind(\n on_resize=self._align_center,\n on_keyboard=self._handle_keyboard)\n self._window = None\n\n def on_open(self):\n pass\n\n def on_dismiss(self):\n pass\n\n def _handle_keyboard(self, window, key, *largs):\n if key == 27 and self.auto_dismiss:\n self.dismiss()\n return True\n\n\nif __name__ == '__main__':\n from kivy.base import runTouchApp\n from kivy.uix.button import Button\n from kivy.uix.label import Label\n from kivy.uix.gridlayout import GridLayout\n from kivy.core.window import Window\n\n # add view\n content = GridLayout(cols=1)\n content.add_widget(Label(text='This is a hello world'))\n view = ModalView(size_hint=(None, None), size=(256, 256),\n auto_dismiss=True)\n view.add_widget(content)\n\n def open_view(btn):\n view.open()\n\n layout = GridLayout(cols=3)\n for x in range(9):\n btn = Button(text='click me %s' % x)\n btn.bind(on_release=view.open)\n layout.add_widget(btn)\n Window.add_widget(layout)\n\n view.open()\n\n runTouchApp()\n", "path": "kivy/uix/modalview.py"}], "after_files": [{"content": "'''\nModalView\n=========\n\n.. versionadded:: 1.4.0\n\nThe :class:`ModalView` widget is used to create modal views. By default, the\nview will cover the whole \"parent\" window.\n\nRemember that the default size of a Widget is size_hint=(1, 1). If you don't\nwant your view to be fullscreen, either use size hints with values lower than\n1 (for instance size_hint=(.8, .8)) or deactivate the size_hint and use fixed\nsize attributes.\n\nExamples\n--------\n\nExample of a simple 400x400 Hello world view::\n\n view = ModalView(size_hint=(None, None), size=(400, 400))\n view.add_widget(Label(text='Hello world'))\n\nBy default, any click outside the view will dismiss it. If you don't\nwant that, you can set :attr:`ModalView.auto_dismiss` to False::\n\n view = ModalView(auto_dismiss=False)\n view.add_widget(Label(text='Hello world'))\n view.open()\n\nTo manually dismiss/close the view, use the :meth:`ModalView.dismiss` method of\nthe ModalView instance::\n\n view.dismiss()\n\nBoth :meth:`ModalView.open` and :meth:`ModalView.dismiss` are bindable. That\nmeans you can directly bind the function to an action, e.g. to a button's\non_press ::\n\n # create content and add it to the view\n content = Button(text='Close me!')\n view = ModalView(auto_dismiss=False)\n view.add_widget(content)\n\n # bind the on_press event of the button to the dismiss function\n content.bind(on_press=view.dismiss)\n\n # open the view\n view.open()\n\n\nModalView Events\n----------------\n\nThere are two events available: `on_open` which is raised when the view is\nopening, and `on_dismiss` which is raised when the view is closed.\nFor `on_dismiss`, you can prevent the view from closing by explictly returning\nTrue from your callback. ::\n\n def my_callback(instance):\n print('ModalView', instance, 'is being dismissed, but is prevented!')\n return True\n view = ModalView()\n view.add_widget(Label(text='Hello world'))\n view.bind(on_dismiss=my_callback)\n view.open()\n\n\n.. versionchanged:: 1.5.0\n The ModalView can be closed by hitting the escape key on the\n keyboard if the :attr:`ModalView.auto_dismiss` property is True (the\n default).\n\n'''\n\n__all__ = ('ModalView', )\n\nfrom kivy.logger import Logger\nfrom kivy.animation import Animation\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \\\n NumericProperty, ListProperty\n\n\nclass ModalView(AnchorLayout):\n '''ModalView class. See module documentation for more information.\n\n :Events:\n `on_open`:\n Fired when the ModalView is opened.\n `on_dismiss`:\n Fired when the ModalView is closed. If the callback returns True,\n the dismiss will be canceled.\n '''\n\n auto_dismiss = BooleanProperty(True)\n '''This property determines if the view is automatically\n dismissed when the user clicks outside it.\n\n :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n attach_to = ObjectProperty(None)\n '''If a widget is set on attach_to, the view will attach to the nearest\n parent window of the widget. If none is found, it will attach to the\n main/global Window.\n\n :attr:`attach_to` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n background_color = ListProperty([0, 0, 0, .7])\n '''Background color in the format (r, g, b, a).\n\n :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and\n defaults to [0, 0, 0, .7].\n '''\n\n background = StringProperty(\n 'atlas://data/images/defaulttheme/modalview-background')\n '''Background image of the view used for the view background.\n\n :attr:`background` is a :class:`~kivy.properties.StringProperty` and\n defaults to 'atlas://data/images/defaulttheme/modalview-background'.\n '''\n\n border = ListProperty([16, 16, 16, 16])\n '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`\n graphics instruction. Used for the :attr:`background_normal` and the\n :attr:`background_down` properties. Can be used when using custom\n backgrounds.\n\n It must be a list of four values: (top, right, bottom, left). Read the\n BorderImage instructions for more information about how to use it.\n\n :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to\n (16, 16, 16, 16).\n '''\n\n # Internals properties used for graphical representation.\n\n _anim_alpha = NumericProperty(0)\n\n _anim_duration = NumericProperty(.1)\n\n _window = ObjectProperty(None, allownone=True, rebind=True)\n\n __events__ = ('on_open', 'on_dismiss')\n\n def __init__(self, **kwargs):\n self._parent = None\n super(ModalView, self).__init__(**kwargs)\n\n def _search_window(self):\n # get window to attach to\n window = None\n if self.attach_to is not None:\n window = self.attach_to.get_parent_window()\n if not window:\n window = self.attach_to.get_root_window()\n if not window:\n from kivy.core.window import Window\n window = Window\n return window\n\n def open(self, *largs):\n '''Show the view window from the :attr:`attach_to` widget. If set, it\n will attach to the nearest window. If the widget is not attached to any\n window, the view will attach to the global\n :class:`~kivy.core.window.Window`.\n '''\n if self._window is not None:\n Logger.warning('ModalView: you can only open once.')\n return self\n # search window\n self._window = self._search_window()\n if not self._window:\n Logger.warning('ModalView: cannot open view, no window found.')\n return self\n self._window.add_widget(self)\n self._window.bind(\n on_resize=self._align_center,\n on_keyboard=self._handle_keyboard)\n self.center = self._window.center\n self.fbind('center', self._align_center)\n a = Animation(_anim_alpha=1., d=self._anim_duration)\n a.bind(on_complete=lambda *x: self.dispatch('on_open'))\n a.start(self)\n return self\n\n def dismiss(self, *largs, **kwargs):\n '''Close the view if it is open. If you really want to close the\n view, whatever the on_dismiss event returns, you can use the *force*\n argument:\n ::\n\n view = ModalView(...)\n view.dismiss(force=True)\n\n When the view is dismissed, it will be faded out before being\n removed from the parent. If you don't want animation, use::\n\n view.dismiss(animation=False)\n\n '''\n if self._window is None:\n return self\n if self.dispatch('on_dismiss') is True:\n if kwargs.get('force', False) is not True:\n return self\n if kwargs.get('animation', True):\n Animation(_anim_alpha=0., d=self._anim_duration).start(self)\n else:\n self._anim_alpha = 0\n self._real_remove_widget()\n return self\n\n def _align_center(self, *l):\n if self._window:\n self.center = self._window.center\n\n def on_touch_down(self, touch):\n if not self.collide_point(*touch.pos):\n if self.auto_dismiss:\n self.dismiss()\n return True\n super(ModalView, self).on_touch_down(touch)\n return True\n\n def on_touch_move(self, touch):\n super(ModalView, self).on_touch_move(touch)\n return True\n\n def on_touch_up(self, touch):\n super(ModalView, self).on_touch_up(touch)\n return True\n\n def on__anim_alpha(self, instance, value):\n if value == 0 and self._window is not None:\n self._real_remove_widget()\n\n def _real_remove_widget(self):\n if self._window is None:\n return\n self._window.remove_widget(self)\n self._window.unbind(\n on_resize=self._align_center,\n on_keyboard=self._handle_keyboard)\n self._window = None\n\n def on_open(self):\n pass\n\n def on_dismiss(self):\n pass\n\n def _handle_keyboard(self, window, key, *largs):\n if key == 27 and self.auto_dismiss:\n self.dismiss()\n return True\n\n\nif __name__ == '__main__':\n from kivy.base import runTouchApp\n from kivy.uix.button import Button\n from kivy.uix.label import Label\n from kivy.uix.gridlayout import GridLayout\n from kivy.core.window import Window\n\n # add view\n content = GridLayout(cols=1)\n content.add_widget(Label(text='This is a hello world'))\n view = ModalView(size_hint=(None, None), size=(256, 256),\n auto_dismiss=True)\n view.add_widget(content)\n\n def open_view(btn):\n view.open()\n\n layout = GridLayout(cols=3)\n for x in range(9):\n btn = Button(text='click me %s' % x)\n btn.bind(on_release=view.open)\n layout.add_widget(btn)\n Window.add_widget(layout)\n\n view.open()\n\n runTouchApp()\n", "path": "kivy/uix/modalview.py"}]}
| 3,327 | 113 |
gh_patches_debug_13146
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-1415
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Django's JavaScript Catalog
We should use Django's [JavaScript translation catalog](https://docs.djangoproject.com/en/1.11/topics/i18n/translation/#internationalization-in-javascript-code) to allow us to translate text in JavaScript files.
Refer to https://github.com/uccser/cs-field-guide/pull/713 and https://github.com/uccser/cs-field-guide/pull/731
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/config/settings/base.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Base Django settings for CS Unplugged project.
4
5 For more information on this file, see
6 https://docs.djangoproject.com/en/dev/topics/settings/
7
8 For the full list of settings and their values, see
9 https://docs.djangoproject.com/en/dev/ref/settings/
10 """
11
12 import environ
13 import os.path
14 import logging.config
15
16 # Add custom languages not provided by Django
17 import django.conf.locale
18 from django.conf import global_settings
19 from django.utils.translation import ugettext_lazy as _
20
21 # cs-unplugged/csunplugged/config/settings/base.py - 3 = csunplugged/
22 ROOT_DIR = environ.Path(__file__) - 3
23
24 # Load operating system environment variables and then prepare to use them
25 env = environ.Env()
26
27 # Wipe default Django logging
28 LOGGING_CONFIG = None
29
30 # APP CONFIGURATION
31 # ----------------------------------------------------------------------------
32 DJANGO_APPS = [
33 # Default Django apps:
34 "django.contrib.auth",
35 "django.contrib.contenttypes",
36 "django.contrib.sessions",
37 "django.contrib.messages",
38 "django.contrib.staticfiles",
39 "django.contrib.postgres",
40
41 # Useful template tags
42 "django.contrib.humanize",
43
44 # Admin
45 "django.contrib.admin",
46 ]
47
48 THIRD_PARTY_APPS = [
49 "django_bootstrap_breadcrumbs",
50 "haystack",
51 "widget_tweaks",
52 "modeltranslation",
53 "bidiutils",
54 ]
55
56 # Apps specific for this project go here.
57 LOCAL_APPS = [
58 "general.apps.GeneralConfig",
59 "topics.apps.TopicsConfig",
60 "plugging_it_in.apps.PluggingitinConfig",
61 "resources.apps.ResourcesConfig",
62 "search.apps.SearchConfig",
63 "classic.apps.ClassicConfig",
64 "at_home.apps.AtHomeConfig",
65 ]
66
67 # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
68 INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS
69
70 # MIDDLEWARE CONFIGURATION
71 # ----------------------------------------------------------------------------
72 MIDDLEWARE = [
73 "django.middleware.security.SecurityMiddleware",
74 "django.contrib.sessions.middleware.SessionMiddleware",
75 "django.middleware.locale.LocaleMiddleware",
76 "django.middleware.common.CommonMiddleware",
77 "django.middleware.csrf.CsrfViewMiddleware",
78 "django.contrib.auth.middleware.AuthenticationMiddleware",
79 "django.contrib.messages.middleware.MessageMiddleware",
80 "django.middleware.clickjacking.XFrameOptionsMiddleware",
81 ]
82
83 # DEBUG
84 # ----------------------------------------------------------------------------
85 # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
86 DEBUG = env.bool("DJANGO_DEBUG", False)
87
88 # FIXTURE CONFIGURATION
89 # ----------------------------------------------------------------------------
90 # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
91 FIXTURE_DIRS = (
92 str(ROOT_DIR.path("fixtures")),
93 )
94
95 # EMAIL CONFIGURATION
96 # -----------------------------------------------------------------------------
97 # EMAIL_BACKEND = env("DJANGO_EMAIL_BACKEND",
98 # default="django.core.mail.backends.smtp.EmailBackend")
99
100 # MANAGER CONFIGURATION
101 # ----------------------------------------------------------------------------
102 # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
103 # ADMINS = [
104 # ("University of Canterbury Computer Science Research Group",
105 # "[email protected]"),
106 # ]
107
108 # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
109 # MANAGERS = ADMINS
110
111 # GENERAL CONFIGURATION
112 # ----------------------------------------------------------------------------
113 # Local time zone for this installation. Choices can be found here:
114 # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
115 # although not all choices may be available on all operating systems.
116 # In a Windows environment this must be set to your system time zone.
117 TIME_ZONE = "UTC"
118
119 # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
120 LANGUAGE_CODE = "en"
121
122 INCONTEXT_L10N_PSEUDOLANGUAGE = "xx-lr"
123 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI = "yy-rl"
124 INCONTEXT_L10N_PSEUDOLANGUAGES = (
125 INCONTEXT_L10N_PSEUDOLANGUAGE,
126 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI
127 )
128
129 DEFAULT_LANGUAGES = (
130 ("en", "English"),
131 ("de", "Deutsche"),
132 ("es", "Español"),
133 ("mi", "Te Reo Māori"),
134 ("zh-hans", "简体中文"),
135 )
136 # Keep original values of languages for resource generation
137 LANGUAGES = DEFAULT_LANGUAGES
138
139 EXTRA_LANG_INFO = {
140 'mi': {
141 'bidi': False,
142 'code': 'mi',
143 'name': "Te Reo Māori",
144 'name_local': "Te Reo Māori",
145 }
146 }
147
148 if env.bool("INCLUDE_INCONTEXT_L10N", False):
149 EXTRA_LANGUAGES = [
150 (INCONTEXT_L10N_PSEUDOLANGUAGE, "Translation mode"),
151 (INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI, "Translation mode (Bi-directional)"),
152 ]
153
154 EXTRA_LANG_INFO.update({
155 INCONTEXT_L10N_PSEUDOLANGUAGE: {
156 'bidi': False,
157 'code': INCONTEXT_L10N_PSEUDOLANGUAGE,
158 'name': "Translation mode",
159 'name_local': _("Translation mode"),
160 },
161 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI: {
162 'bidi': True,
163 'code': INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI,
164 'name': "Translation mode (Bi-directional)",
165 'name_local': _("Translation mode (Bi-directional)"),
166 }
167 })
168
169 # Add new languages to the list of all django languages
170 global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES
171 global_settings.LANGUAGES_BIDI = (global_settings.LANGUAGES_BIDI +
172 [INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI.split('-')[0]])
173 # Add new languages to the list of languages used for this project
174 LANGUAGES += tuple(EXTRA_LANGUAGES)
175 LANGUAGES_BIDI = global_settings.LANGUAGES_BIDI
176
177 django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)
178
179 # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
180 SITE_ID = 1
181
182 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
183 USE_I18N = True
184
185 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
186 USE_L10N = True
187
188 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
189 USE_TZ = True
190
191 # See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
192 LOCALE_PATHS = ["locale"]
193
194 # TEMPLATE CONFIGURATION
195 # ----------------------------------------------------------------------------
196 # See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
197 TEMPLATES = [
198 {
199 # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
200 "BACKEND": "django.template.backends.django.DjangoTemplates",
201 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
202 "DIRS": [
203 str(ROOT_DIR.path("templates")),
204 ],
205 "OPTIONS": {
206 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
207 "debug": DEBUG,
208 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
209 # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
210 "loaders": [
211 "django.template.loaders.filesystem.Loader",
212 "django.template.loaders.app_directories.Loader",
213 ],
214 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
215 "context_processors": [
216 "django.template.context_processors.debug",
217 "django.template.context_processors.request",
218 "django.contrib.auth.context_processors.auth",
219 "django.template.context_processors.i18n",
220 "django.template.context_processors.media",
221 "django.template.context_processors.static",
222 "django.template.context_processors.tz",
223 "django.contrib.messages.context_processors.messages",
224 "config.context_processors.version_number.version_number",
225 "config.context_processors.deployed.deployed",
226 "bidiutils.context_processors.bidi",
227 ],
228 "libraries": {
229 "render_html_field": "config.templatetags.render_html_field",
230 "translate_url": "config.templatetags.translate_url",
231 "query_replace": "config.templatetags.query_replace",
232 'custom_tags': 'config.templatetags.custom_tags'
233 },
234 },
235 },
236 ]
237
238 # LOGGING
239 # ------------------------------------------------------------------------------
240 logging.config.dictConfig({
241 'version': 1,
242 'disable_existing_loggers': False,
243 'formatters': {
244 'console': {
245 # exact format is not important, this is the minimum information
246 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
247 },
248 },
249 'handlers': {
250 'console': {
251 'class': 'logging.StreamHandler',
252 'formatter': 'console',
253 },
254 },
255 'loggers': {
256 '': {
257 'level': 'INFO',
258 'handlers': ['console', ],
259 },
260 'csunplugged': {
261 'level': 'INFO',
262 'handlers': ['console', ],
263 # required to avoid double logging with root logger
264 'propagate': False,
265 },
266 },
267 })
268
269 # STATIC FILE CONFIGURATION
270 # ------------------------------------------------------------------------------
271 # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
272 STATIC_ROOT = os.path.join(str(ROOT_DIR.path("staticfiles")), "")
273
274 # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
275 BUILD_ROOT = os.path.join(str(ROOT_DIR.path("build")), "")
276 STATIC_URL = "/staticfiles/"
277
278 # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
279 STATICFILES_DIRS = [
280 BUILD_ROOT,
281 ]
282
283 # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
284 STATICFILES_FINDERS = [
285 "django.contrib.staticfiles.finders.FileSystemFinder",
286 "django.contrib.staticfiles.finders.AppDirectoriesFinder",
287 ]
288
289 # MEDIA CONFIGURATION
290 # ------------------------------------------------------------------------------
291 # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
292 MEDIA_ROOT = str(ROOT_DIR("media"))
293
294 # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
295 MEDIA_URL = "/media/"
296
297 # URL Configuration
298 # ------------------------------------------------------------------------------
299 ROOT_URLCONF = "config.urls"
300
301 # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
302 WSGI_APPLICATION = "config.wsgi.application"
303
304 # PASSWORD VALIDATION
305 # https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
306 # ------------------------------------------------------------------------------
307
308 AUTH_PASSWORD_VALIDATORS = [
309 {
310 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
311 },
312 {
313 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
314 },
315 {
316 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
317 },
318 {
319 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
320 },
321 ]
322
323 # SEARCH CONFIGURATION
324 # ------------------------------------------------------------------------------
325 # See: http://django-haystack.readthedocs.io/en/v2.6.0/settings.html
326 HAYSTACK_CONNECTIONS = {
327 "default": {
328 "ENGINE": "haystack.backends.whoosh_backend.WhooshEngine",
329 "PATH": str(ROOT_DIR.path("whoosh_index")),
330 },
331 }
332 HAYSTACK_SEARCH_RESULTS_PER_PAGE = 10
333
334 # OTHER SETTINGS
335 # ------------------------------------------------------------------------------
336 DJANGO_PRODUCTION = env.bool("DJANGO_PRODUCTION")
337 TOPICS_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("topics")), "content")
338 RESOURCES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("resources")), "content")
339 RESOURCE_GENERATION_LOCATION = os.path.join(str(ROOT_DIR.path("staticfiles")), "resources")
340 RESOURCE_GENERATORS_PACKAGE = "resources.generators"
341 RESOURCE_COPY_AMOUNT = 20
342 SCRATCH_GENERATION_LOCATION = str(ROOT_DIR.path("temp"))
343 CUSTOM_VERTO_TEMPLATES = os.path.join(str(ROOT_DIR.path("utils")), "custom_converter_templates", "")
344 MODELTRANSLATION_CUSTOM_FIELDS = ("JSONField",)
345 CLASSIC_PAGES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("classic")), "content")
346 GENERAL_PAGES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("general")), "content")
347 ACTIVITIES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("at_home")), "content")
348 BREADCRUMBS_TEMPLATE = "django_bootstrap_breadcrumbs/bootstrap4.html"
349
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/csunplugged/config/settings/base.py b/csunplugged/config/settings/base.py
--- a/csunplugged/config/settings/base.py
+++ b/csunplugged/config/settings/base.py
@@ -51,6 +51,7 @@
"widget_tweaks",
"modeltranslation",
"bidiutils",
+ "statici18n",
]
# Apps specific for this project go here.
@@ -346,3 +347,4 @@
GENERAL_PAGES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("general")), "content")
ACTIVITIES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("at_home")), "content")
BREADCRUMBS_TEMPLATE = "django_bootstrap_breadcrumbs/bootstrap4.html"
+STATICI18N_ROOT = BUILD_ROOT
|
{"golden_diff": "diff --git a/csunplugged/config/settings/base.py b/csunplugged/config/settings/base.py\n--- a/csunplugged/config/settings/base.py\n+++ b/csunplugged/config/settings/base.py\n@@ -51,6 +51,7 @@\n \"widget_tweaks\",\n \"modeltranslation\",\n \"bidiutils\",\n+ \"statici18n\",\n ]\n \n # Apps specific for this project go here.\n@@ -346,3 +347,4 @@\n GENERAL_PAGES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"general\")), \"content\")\n ACTIVITIES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"at_home\")), \"content\")\n BREADCRUMBS_TEMPLATE = \"django_bootstrap_breadcrumbs/bootstrap4.html\"\n+STATICI18N_ROOT = BUILD_ROOT\n", "issue": "Use Django's JavaScript Catalog\nWe should use Django's [JavaScript translation catalog](https://docs.djangoproject.com/en/1.11/topics/i18n/translation/#internationalization-in-javascript-code) to allow us to translate text in JavaScript files.\r\n\r\nRefer to https://github.com/uccser/cs-field-guide/pull/713 and https://github.com/uccser/cs-field-guide/pull/731\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nBase Django settings for CS Unplugged project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\nimport environ\nimport os.path\nimport logging.config\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nfrom django.conf import global_settings\nfrom django.utils.translation import ugettext_lazy as _\n\n# cs-unplugged/csunplugged/config/settings/base.py - 3 = csunplugged/\nROOT_DIR = environ.Path(__file__) - 3\n\n# Load operating system environment variables and then prepare to use them\nenv = environ.Env()\n\n# Wipe default Django logging\nLOGGING_CONFIG = None\n\n# APP CONFIGURATION\n# ----------------------------------------------------------------------------\nDJANGO_APPS = [\n # Default Django apps:\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.postgres\",\n\n # Useful template tags\n \"django.contrib.humanize\",\n\n # Admin\n \"django.contrib.admin\",\n]\n\nTHIRD_PARTY_APPS = [\n \"django_bootstrap_breadcrumbs\",\n \"haystack\",\n \"widget_tweaks\",\n \"modeltranslation\",\n \"bidiutils\",\n]\n\n# Apps specific for this project go here.\nLOCAL_APPS = [\n \"general.apps.GeneralConfig\",\n \"topics.apps.TopicsConfig\",\n \"plugging_it_in.apps.PluggingitinConfig\",\n \"resources.apps.ResourcesConfig\",\n \"search.apps.SearchConfig\",\n \"classic.apps.ClassicConfig\",\n \"at_home.apps.AtHomeConfig\",\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\n# MIDDLEWARE CONFIGURATION\n# ----------------------------------------------------------------------------\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\n# DEBUG\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool(\"DJANGO_DEBUG\", False)\n\n# FIXTURE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n str(ROOT_DIR.path(\"fixtures\")),\n)\n\n# EMAIL CONFIGURATION\n# -----------------------------------------------------------------------------\n# EMAIL_BACKEND = env(\"DJANGO_EMAIL_BACKEND\",\n# default=\"django.core.mail.backends.smtp.EmailBackend\")\n\n# MANAGER CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\n# ADMINS = [\n# (\"University of Canterbury Computer Science Research Group\",\n# \"[email protected]\"),\n# ]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\n# MANAGERS = ADMINS\n\n# GENERAL CONFIGURATION\n# ----------------------------------------------------------------------------\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = \"UTC\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = \"en\"\n\nINCONTEXT_L10N_PSEUDOLANGUAGE = \"xx-lr\"\nINCONTEXT_L10N_PSEUDOLANGUAGE_BIDI = \"yy-rl\"\nINCONTEXT_L10N_PSEUDOLANGUAGES = (\n INCONTEXT_L10N_PSEUDOLANGUAGE,\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI\n)\n\nDEFAULT_LANGUAGES = (\n (\"en\", \"English\"),\n (\"de\", \"Deutsche\"),\n (\"es\", \"Espa\u00f1ol\"),\n (\"mi\", \"Te Reo M\u0101ori\"),\n (\"zh-hans\", \"\u7b80\u4f53\u4e2d\u6587\"),\n)\n# Keep original values of languages for resource generation\nLANGUAGES = DEFAULT_LANGUAGES\n\nEXTRA_LANG_INFO = {\n 'mi': {\n 'bidi': False,\n 'code': 'mi',\n 'name': \"Te Reo M\u0101ori\",\n 'name_local': \"Te Reo M\u0101ori\",\n }\n}\n\nif env.bool(\"INCLUDE_INCONTEXT_L10N\", False):\n EXTRA_LANGUAGES = [\n (INCONTEXT_L10N_PSEUDOLANGUAGE, \"Translation mode\"),\n (INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI, \"Translation mode (Bi-directional)\"),\n ]\n\n EXTRA_LANG_INFO.update({\n INCONTEXT_L10N_PSEUDOLANGUAGE: {\n 'bidi': False,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE,\n 'name': \"Translation mode\",\n 'name_local': _(\"Translation mode\"),\n },\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI: {\n 'bidi': True,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI,\n 'name': \"Translation mode (Bi-directional)\",\n 'name_local': _(\"Translation mode (Bi-directional)\"),\n }\n })\n\n # Add new languages to the list of all django languages\n global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES\n global_settings.LANGUAGES_BIDI = (global_settings.LANGUAGES_BIDI +\n [INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI.split('-')[0]])\n # Add new languages to the list of languages used for this project\n LANGUAGES += tuple(EXTRA_LANGUAGES)\n LANGUAGES_BIDI = global_settings.LANGUAGES_BIDI\n\ndjango.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths\nLOCALE_PATHS = [\"locale\"]\n\n# TEMPLATE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n \"DIRS\": [\n str(ROOT_DIR.path(\"templates\")),\n ],\n \"OPTIONS\": {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n \"debug\": DEBUG,\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"config.context_processors.version_number.version_number\",\n \"config.context_processors.deployed.deployed\",\n \"bidiutils.context_processors.bidi\",\n ],\n \"libraries\": {\n \"render_html_field\": \"config.templatetags.render_html_field\",\n \"translate_url\": \"config.templatetags.translate_url\",\n \"query_replace\": \"config.templatetags.query_replace\",\n 'custom_tags': 'config.templatetags.custom_tags'\n },\n },\n },\n]\n\n# LOGGING\n# ------------------------------------------------------------------------------\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n # exact format is not important, this is the minimum information\n 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'console',\n },\n },\n 'loggers': {\n '': {\n 'level': 'INFO',\n 'handlers': ['console', ],\n },\n 'csunplugged': {\n 'level': 'INFO',\n 'handlers': ['console', ],\n # required to avoid double logging with root logger\n 'propagate': False,\n },\n },\n})\n\n# STATIC FILE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"\")\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nBUILD_ROOT = os.path.join(str(ROOT_DIR.path(\"build\")), \"\")\nSTATIC_URL = \"/staticfiles/\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = [\n BUILD_ROOT,\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n]\n\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = str(ROOT_DIR(\"media\"))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = \"/media/\"\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nROOT_URLCONF = \"config.urls\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# PASSWORD VALIDATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators\n# ------------------------------------------------------------------------------\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# SEARCH CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: http://django-haystack.readthedocs.io/en/v2.6.0/settings.html\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": str(ROOT_DIR.path(\"whoosh_index\")),\n },\n}\nHAYSTACK_SEARCH_RESULTS_PER_PAGE = 10\n\n# OTHER SETTINGS\n# ------------------------------------------------------------------------------\nDJANGO_PRODUCTION = env.bool(\"DJANGO_PRODUCTION\")\nTOPICS_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"topics\")), \"content\")\nRESOURCES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"resources\")), \"content\")\nRESOURCE_GENERATION_LOCATION = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"resources\")\nRESOURCE_GENERATORS_PACKAGE = \"resources.generators\"\nRESOURCE_COPY_AMOUNT = 20\nSCRATCH_GENERATION_LOCATION = str(ROOT_DIR.path(\"temp\"))\nCUSTOM_VERTO_TEMPLATES = os.path.join(str(ROOT_DIR.path(\"utils\")), \"custom_converter_templates\", \"\")\nMODELTRANSLATION_CUSTOM_FIELDS = (\"JSONField\",)\nCLASSIC_PAGES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"classic\")), \"content\")\nGENERAL_PAGES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"general\")), \"content\")\nACTIVITIES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"at_home\")), \"content\")\nBREADCRUMBS_TEMPLATE = \"django_bootstrap_breadcrumbs/bootstrap4.html\"\n", "path": "csunplugged/config/settings/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nBase Django settings for CS Unplugged project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\nimport environ\nimport os.path\nimport logging.config\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nfrom django.conf import global_settings\nfrom django.utils.translation import ugettext_lazy as _\n\n# cs-unplugged/csunplugged/config/settings/base.py - 3 = csunplugged/\nROOT_DIR = environ.Path(__file__) - 3\n\n# Load operating system environment variables and then prepare to use them\nenv = environ.Env()\n\n# Wipe default Django logging\nLOGGING_CONFIG = None\n\n# APP CONFIGURATION\n# ----------------------------------------------------------------------------\nDJANGO_APPS = [\n # Default Django apps:\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.postgres\",\n\n # Useful template tags\n \"django.contrib.humanize\",\n\n # Admin\n \"django.contrib.admin\",\n]\n\nTHIRD_PARTY_APPS = [\n \"django_bootstrap_breadcrumbs\",\n \"haystack\",\n \"widget_tweaks\",\n \"modeltranslation\",\n \"bidiutils\",\n \"statici18n\",\n]\n\n# Apps specific for this project go here.\nLOCAL_APPS = [\n \"general.apps.GeneralConfig\",\n \"topics.apps.TopicsConfig\",\n \"plugging_it_in.apps.PluggingitinConfig\",\n \"resources.apps.ResourcesConfig\",\n \"search.apps.SearchConfig\",\n \"classic.apps.ClassicConfig\",\n \"at_home.apps.AtHomeConfig\",\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\n# MIDDLEWARE CONFIGURATION\n# ----------------------------------------------------------------------------\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\n# DEBUG\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool(\"DJANGO_DEBUG\", False)\n\n# FIXTURE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n str(ROOT_DIR.path(\"fixtures\")),\n)\n\n# EMAIL CONFIGURATION\n# -----------------------------------------------------------------------------\n# EMAIL_BACKEND = env(\"DJANGO_EMAIL_BACKEND\",\n# default=\"django.core.mail.backends.smtp.EmailBackend\")\n\n# MANAGER CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\n# ADMINS = [\n# (\"University of Canterbury Computer Science Research Group\",\n# \"[email protected]\"),\n# ]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\n# MANAGERS = ADMINS\n\n# GENERAL CONFIGURATION\n# ----------------------------------------------------------------------------\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = \"UTC\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = \"en\"\n\nINCONTEXT_L10N_PSEUDOLANGUAGE = \"xx-lr\"\nINCONTEXT_L10N_PSEUDOLANGUAGE_BIDI = \"yy-rl\"\nINCONTEXT_L10N_PSEUDOLANGUAGES = (\n INCONTEXT_L10N_PSEUDOLANGUAGE,\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI\n)\n\nDEFAULT_LANGUAGES = (\n (\"en\", \"English\"),\n (\"de\", \"Deutsche\"),\n (\"es\", \"Espa\u00f1ol\"),\n (\"mi\", \"Te Reo M\u0101ori\"),\n (\"zh-hans\", \"\u7b80\u4f53\u4e2d\u6587\"),\n)\n# Keep original values of languages for resource generation\nLANGUAGES = DEFAULT_LANGUAGES\n\nEXTRA_LANG_INFO = {\n 'mi': {\n 'bidi': False,\n 'code': 'mi',\n 'name': \"Te Reo M\u0101ori\",\n 'name_local': \"Te Reo M\u0101ori\",\n }\n}\n\nif env.bool(\"INCLUDE_INCONTEXT_L10N\", False):\n EXTRA_LANGUAGES = [\n (INCONTEXT_L10N_PSEUDOLANGUAGE, \"Translation mode\"),\n (INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI, \"Translation mode (Bi-directional)\"),\n ]\n\n EXTRA_LANG_INFO.update({\n INCONTEXT_L10N_PSEUDOLANGUAGE: {\n 'bidi': False,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE,\n 'name': \"Translation mode\",\n 'name_local': _(\"Translation mode\"),\n },\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI: {\n 'bidi': True,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI,\n 'name': \"Translation mode (Bi-directional)\",\n 'name_local': _(\"Translation mode (Bi-directional)\"),\n }\n })\n\n # Add new languages to the list of all django languages\n global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES\n global_settings.LANGUAGES_BIDI = (global_settings.LANGUAGES_BIDI +\n [INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI.split('-')[0]])\n # Add new languages to the list of languages used for this project\n LANGUAGES += tuple(EXTRA_LANGUAGES)\n LANGUAGES_BIDI = global_settings.LANGUAGES_BIDI\n\ndjango.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths\nLOCALE_PATHS = [\"locale\"]\n\n# TEMPLATE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n \"DIRS\": [\n str(ROOT_DIR.path(\"templates\")),\n ],\n \"OPTIONS\": {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n \"debug\": DEBUG,\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"config.context_processors.version_number.version_number\",\n \"config.context_processors.deployed.deployed\",\n \"bidiutils.context_processors.bidi\",\n ],\n \"libraries\": {\n \"render_html_field\": \"config.templatetags.render_html_field\",\n \"translate_url\": \"config.templatetags.translate_url\",\n \"query_replace\": \"config.templatetags.query_replace\",\n 'custom_tags': 'config.templatetags.custom_tags'\n },\n },\n },\n]\n\n# LOGGING\n# ------------------------------------------------------------------------------\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n # exact format is not important, this is the minimum information\n 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'console',\n },\n },\n 'loggers': {\n '': {\n 'level': 'INFO',\n 'handlers': ['console', ],\n },\n 'csunplugged': {\n 'level': 'INFO',\n 'handlers': ['console', ],\n # required to avoid double logging with root logger\n 'propagate': False,\n },\n },\n})\n\n# STATIC FILE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"\")\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nBUILD_ROOT = os.path.join(str(ROOT_DIR.path(\"build\")), \"\")\nSTATIC_URL = \"/staticfiles/\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = [\n BUILD_ROOT,\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n]\n\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = str(ROOT_DIR(\"media\"))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = \"/media/\"\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nROOT_URLCONF = \"config.urls\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# PASSWORD VALIDATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators\n# ------------------------------------------------------------------------------\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# SEARCH CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: http://django-haystack.readthedocs.io/en/v2.6.0/settings.html\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": str(ROOT_DIR.path(\"whoosh_index\")),\n },\n}\nHAYSTACK_SEARCH_RESULTS_PER_PAGE = 10\n\n# OTHER SETTINGS\n# ------------------------------------------------------------------------------\nDJANGO_PRODUCTION = env.bool(\"DJANGO_PRODUCTION\")\nTOPICS_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"topics\")), \"content\")\nRESOURCES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"resources\")), \"content\")\nRESOURCE_GENERATION_LOCATION = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"resources\")\nRESOURCE_GENERATORS_PACKAGE = \"resources.generators\"\nRESOURCE_COPY_AMOUNT = 20\nSCRATCH_GENERATION_LOCATION = str(ROOT_DIR.path(\"temp\"))\nCUSTOM_VERTO_TEMPLATES = os.path.join(str(ROOT_DIR.path(\"utils\")), \"custom_converter_templates\", \"\")\nMODELTRANSLATION_CUSTOM_FIELDS = (\"JSONField\",)\nCLASSIC_PAGES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"classic\")), \"content\")\nGENERAL_PAGES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"general\")), \"content\")\nACTIVITIES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"at_home\")), \"content\")\nBREADCRUMBS_TEMPLATE = \"django_bootstrap_breadcrumbs/bootstrap4.html\"\nSTATICI18N_ROOT = BUILD_ROOT\n", "path": "csunplugged/config/settings/base.py"}]}
| 3,999 | 175 |
gh_patches_debug_29474
|
rasdani/github-patches
|
git_diff
|
borgbackup__borg-1193
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
borgbackup build failure when using OpenSSL 1.1.0
https://groups.google.com/d/msg/linux.debian.devel/53fq9S-Qpp4/V_0pPtdzBQAJ
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `borg/testsuite/crypto.py`
Content:
```
1 from binascii import hexlify
2
3 from ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes
4 from . import BaseTestCase
5
6
7 class CryptoTestCase(BaseTestCase):
8
9 def test_bytes_to_int(self):
10 self.assert_equal(bytes_to_int(b'\0\0\0\1'), 1)
11
12 def test_bytes_to_long(self):
13 self.assert_equal(bytes_to_long(b'\0\0\0\0\0\0\0\1'), 1)
14 self.assert_equal(long_to_bytes(1), b'\0\0\0\0\0\0\0\1')
15
16 def test_aes(self):
17 key = b'X' * 32
18 data = b'foo' * 10
19 # encrypt
20 aes = AES(is_encrypt=True, key=key)
21 self.assert_equal(bytes_to_long(aes.iv, 8), 0)
22 cdata = aes.encrypt(data)
23 self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466')
24 self.assert_equal(bytes_to_long(aes.iv, 8), 2)
25 # decrypt
26 aes = AES(is_encrypt=False, key=key)
27 self.assert_equal(bytes_to_long(aes.iv, 8), 0)
28 pdata = aes.decrypt(cdata)
29 self.assert_equal(data, pdata)
30 self.assert_equal(bytes_to_long(aes.iv, 8), 2)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/borg/testsuite/crypto.py b/borg/testsuite/crypto.py
--- a/borg/testsuite/crypto.py
+++ b/borg/testsuite/crypto.py
@@ -1,6 +1,7 @@
from binascii import hexlify
from ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes
+from ..crypto import increment_iv, bytes16_to_int, int_to_bytes16
from . import BaseTestCase
@@ -13,6 +14,27 @@
self.assert_equal(bytes_to_long(b'\0\0\0\0\0\0\0\1'), 1)
self.assert_equal(long_to_bytes(1), b'\0\0\0\0\0\0\0\1')
+ def test_bytes16_to_int(self):
+ self.assert_equal(bytes16_to_int(b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1'), 1)
+ self.assert_equal(int_to_bytes16(1), b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1')
+ self.assert_equal(bytes16_to_int(b'\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0\0'), 2 ** 64)
+ self.assert_equal(int_to_bytes16(2 ** 64), b'\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0\0')
+
+ def test_increment_iv(self):
+ iv0 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'
+ iv1 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1'
+ iv2 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\2'
+ self.assert_equal(increment_iv(iv0, 0), iv0)
+ self.assert_equal(increment_iv(iv0, 1), iv1)
+ self.assert_equal(increment_iv(iv0, 2), iv2)
+ iva = b'\0\0\0\0\0\0\0\0\xff\xff\xff\xff\xff\xff\xff\xff'
+ ivb = b'\0\0\0\0\0\0\0\1\x00\x00\x00\x00\x00\x00\x00\x00'
+ ivc = b'\0\0\0\0\0\0\0\1\x00\x00\x00\x00\x00\x00\x00\x01'
+ self.assert_equal(increment_iv(iva, 0), iva)
+ self.assert_equal(increment_iv(iva, 1), ivb)
+ self.assert_equal(increment_iv(iva, 2), ivc)
+ self.assert_equal(increment_iv(iv0, 2**64), ivb)
+
def test_aes(self):
key = b'X' * 32
data = b'foo' * 10
|
{"golden_diff": "diff --git a/borg/testsuite/crypto.py b/borg/testsuite/crypto.py\n--- a/borg/testsuite/crypto.py\n+++ b/borg/testsuite/crypto.py\n@@ -1,6 +1,7 @@\n from binascii import hexlify\n \n from ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes\n+from ..crypto import increment_iv, bytes16_to_int, int_to_bytes16\n from . import BaseTestCase\n \n \n@@ -13,6 +14,27 @@\n self.assert_equal(bytes_to_long(b'\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n self.assert_equal(long_to_bytes(1), b'\\0\\0\\0\\0\\0\\0\\0\\1')\n \n+ def test_bytes16_to_int(self):\n+ self.assert_equal(bytes16_to_int(b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n+ self.assert_equal(int_to_bytes16(1), b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1')\n+ self.assert_equal(bytes16_to_int(b'\\0\\0\\0\\0\\0\\0\\0\\1\\0\\0\\0\\0\\0\\0\\0\\0'), 2 ** 64)\n+ self.assert_equal(int_to_bytes16(2 ** 64), b'\\0\\0\\0\\0\\0\\0\\0\\1\\0\\0\\0\\0\\0\\0\\0\\0')\n+\n+ def test_increment_iv(self):\n+ iv0 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0'\n+ iv1 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1'\n+ iv2 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\2'\n+ self.assert_equal(increment_iv(iv0, 0), iv0)\n+ self.assert_equal(increment_iv(iv0, 1), iv1)\n+ self.assert_equal(increment_iv(iv0, 2), iv2)\n+ iva = b'\\0\\0\\0\\0\\0\\0\\0\\0\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff'\n+ ivb = b'\\0\\0\\0\\0\\0\\0\\0\\1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n+ ivc = b'\\0\\0\\0\\0\\0\\0\\0\\1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'\n+ self.assert_equal(increment_iv(iva, 0), iva)\n+ self.assert_equal(increment_iv(iva, 1), ivb)\n+ self.assert_equal(increment_iv(iva, 2), ivc)\n+ self.assert_equal(increment_iv(iv0, 2**64), ivb)\n+\n def test_aes(self):\n key = b'X' * 32\n data = b'foo' * 10\n", "issue": "borgbackup build failure when using OpenSSL 1.1.0\nhttps://groups.google.com/d/msg/linux.debian.devel/53fq9S-Qpp4/V_0pPtdzBQAJ\n\n", "before_files": [{"content": "from binascii import hexlify\n\nfrom ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes\nfrom . import BaseTestCase\n\n\nclass CryptoTestCase(BaseTestCase):\n\n def test_bytes_to_int(self):\n self.assert_equal(bytes_to_int(b'\\0\\0\\0\\1'), 1)\n\n def test_bytes_to_long(self):\n self.assert_equal(bytes_to_long(b'\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n self.assert_equal(long_to_bytes(1), b'\\0\\0\\0\\0\\0\\0\\0\\1')\n\n def test_aes(self):\n key = b'X' * 32\n data = b'foo' * 10\n # encrypt\n aes = AES(is_encrypt=True, key=key)\n self.assert_equal(bytes_to_long(aes.iv, 8), 0)\n cdata = aes.encrypt(data)\n self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466')\n self.assert_equal(bytes_to_long(aes.iv, 8), 2)\n # decrypt\n aes = AES(is_encrypt=False, key=key)\n self.assert_equal(bytes_to_long(aes.iv, 8), 0)\n pdata = aes.decrypt(cdata)\n self.assert_equal(data, pdata)\n self.assert_equal(bytes_to_long(aes.iv, 8), 2)\n", "path": "borg/testsuite/crypto.py"}], "after_files": [{"content": "from binascii import hexlify\n\nfrom ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes\nfrom ..crypto import increment_iv, bytes16_to_int, int_to_bytes16\nfrom . import BaseTestCase\n\n\nclass CryptoTestCase(BaseTestCase):\n\n def test_bytes_to_int(self):\n self.assert_equal(bytes_to_int(b'\\0\\0\\0\\1'), 1)\n\n def test_bytes_to_long(self):\n self.assert_equal(bytes_to_long(b'\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n self.assert_equal(long_to_bytes(1), b'\\0\\0\\0\\0\\0\\0\\0\\1')\n\n def test_bytes16_to_int(self):\n self.assert_equal(bytes16_to_int(b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n self.assert_equal(int_to_bytes16(1), b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1')\n self.assert_equal(bytes16_to_int(b'\\0\\0\\0\\0\\0\\0\\0\\1\\0\\0\\0\\0\\0\\0\\0\\0'), 2 ** 64)\n self.assert_equal(int_to_bytes16(2 ** 64), b'\\0\\0\\0\\0\\0\\0\\0\\1\\0\\0\\0\\0\\0\\0\\0\\0')\n\n def test_increment_iv(self):\n iv0 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0'\n iv1 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1'\n iv2 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\2'\n self.assert_equal(increment_iv(iv0, 0), iv0)\n self.assert_equal(increment_iv(iv0, 1), iv1)\n self.assert_equal(increment_iv(iv0, 2), iv2)\n iva = b'\\0\\0\\0\\0\\0\\0\\0\\0\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff'\n ivb = b'\\0\\0\\0\\0\\0\\0\\0\\1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n ivc = b'\\0\\0\\0\\0\\0\\0\\0\\1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'\n self.assert_equal(increment_iv(iva, 0), iva)\n self.assert_equal(increment_iv(iva, 1), ivb)\n self.assert_equal(increment_iv(iva, 2), ivc)\n self.assert_equal(increment_iv(iv0, 2**64), ivb)\n\n def test_aes(self):\n key = b'X' * 32\n data = b'foo' * 10\n # encrypt\n aes = AES(is_encrypt=True, key=key)\n self.assert_equal(bytes_to_long(aes.iv, 8), 0)\n cdata = aes.encrypt(data)\n self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466')\n self.assert_equal(bytes_to_long(aes.iv, 8), 2)\n # decrypt\n aes = AES(is_encrypt=False, key=key)\n self.assert_equal(bytes_to_long(aes.iv, 8), 0)\n pdata = aes.decrypt(cdata)\n self.assert_equal(data, pdata)\n self.assert_equal(bytes_to_long(aes.iv, 8), 2)\n", "path": "borg/testsuite/crypto.py"}]}
| 709 | 779 |
gh_patches_debug_29939
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-789
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VFS tests fail on Windows due to path strings
Running `py.test -v tests\test_rio_options.py` on Windows fails because of UNIX-style paths in tests. Specifically, two tests fail:
```
tests\test_rio_options.py::test_file_in_handler_with_vfs FAILED
tests\test_rio_options.py::test_file_in_handler_with_vfs_file FAILED
```
For `test_file_in_handler_with_vfs_file`, I think it can be fixed with:
```
assert retval.endswith(os.path.normpath('tests/data/RGB.byte.tif'))
```
but not sure what to do about `test_file_in_handler_with_vfs`. My `retval` from line 54 is:
```
'zip://D:\\matt\\rasterio\\tests\\data\\files.zip!/inputs/RGB.byte.tif'
```
so the path to the zip file is Windows, path inside the zip is UNIX. Both asserts fail here. Hopefully an easy one ...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/vfs.py`
Content:
```
1 """Implementation of Apache VFS schemes and URLs."""
2
3 import os
4
5 from rasterio.compat import urlparse
6
7
8 # NB: As not to propagate fallacies of distributed computing, Rasterio
9 # does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only
10 # the following local filesystem schemes are supported.
11 SCHEMES = {'gzip': 'gzip', 'zip': 'zip', 'tar': 'tar', 'https': 'curl',
12 'http': 'curl', 's3': 's3'}
13
14
15 def parse_path(uri, vfs=None):
16 """Parse a URI or Apache VFS URL into its parts
17
18 Returns: tuple
19 (path, archive, scheme)
20 """
21 archive = scheme = None
22 path = uri
23 if vfs:
24 parts = urlparse(vfs)
25 scheme = parts.scheme
26 archive = parts.path
27 if parts.netloc and parts.netloc != 'localhost': # pragma: no cover
28 archive = parts.netloc + archive
29 else:
30 parts = urlparse(path)
31 scheme = parts.scheme
32 path = parts.path
33 if parts.netloc and parts.netloc != 'localhost':
34 path = parts.netloc + path
35 # There are certain URI schemes we favor over GDAL's names.
36 if scheme in SCHEMES:
37 parts = path.split('!')
38 path = parts.pop() if parts else None
39 archive = parts.pop() if parts else None
40 # For filesystem paths.
41 elif scheme in (None, '', 'file'):
42 pass
43 # We permit GDAL's idiosyncratic URI-like dataset paths such as
44 # 'NETCDF:...' to fall right through with no parsed archive
45 # or scheme.
46 else:
47 archive = scheme = None
48 path = uri
49
50 return path, archive, scheme
51
52
53 def vsi_path(path, archive=None, scheme=None):
54 """Convert a parsed path to a GDAL VSI path."""
55 # If a VSF and archive file are specified, we convert the path to
56 # a GDAL VSI path (see cpl_vsi.h).
57 if scheme and scheme.startswith('http'):
58 result = "/vsicurl/{0}://{1}".format(scheme, path)
59 elif scheme and scheme == 's3':
60 result = "/vsis3/{0}".format(path)
61 elif scheme and scheme != 'file':
62 path = path.strip(os.path.sep)
63 result = os.path.sep.join(
64 ['/vsi{0}'.format(scheme), archive, path])
65 else:
66 result = path
67 return result
68
```
Path: `rasterio/rio/options.py`
Content:
```
1 """
2 Registry of common rio CLI options. See cligj for more options.
3
4 -a, --all: Use all pixels touched by features. In rio-mask, rio-rasterize
5 --as-mask/--not-as-mask: interpret band as mask or not. In rio-shapes
6 --band/--mask: use band or mask. In rio-shapes
7 --bbox:
8 -b, --bidx: band index(es) (singular or multiple value versions).
9 In rio-info, rio-sample, rio-shapes, rio-stack (different usages)
10 --bounds: bounds in world coordinates.
11 In rio-info, rio-rasterize (different usages)
12 --count: count of bands. In rio-info
13 --crop: Crop raster to extent of features. In rio-mask
14 --crs: CRS of input raster. In rio-info
15 --default-value: default for rasterized pixels. In rio-rasterize
16 --dimensions: Output width, height. In rio-rasterize
17 --dst-crs: destination CRS. In rio-transform
18 --fill: fill value for pixels not covered by features. In rio-rasterize
19 --formats: list available formats. In rio-info
20 --height: height of raster. In rio-info
21 -i, --invert: Invert mask created from features: In rio-mask
22 -j, --geojson-mask: GeoJSON for masking raster. In rio-mask
23 --lnglat: geograhpic coordinates of center of raster. In rio-info
24 --masked/--not-masked: read masked data from source file.
25 In rio-calc, rio-info
26 -m, --mode: output file mode (r, r+). In rio-insp
27 --name: input file name alias. In rio-calc
28 --nodata: nodata value. In rio-info, rio-merge (different usages)
29 --photometric: photometric interpretation. In rio-stack
30 --property: GeoJSON property to use as values for rasterize. In rio-rasterize
31 -r, --res: output resolution.
32 In rio-info, rio-rasterize (different usages. TODO: try to combine
33 usages, prefer rio-rasterize version)
34 --sampling: Inverse of sampling fraction. In rio-shapes
35 --shape: shape (width, height) of band. In rio-info
36 --src-crs: source CRS.
37 In rio-insp, rio-rasterize (different usages. TODO: consolidate usages)
38 --stats: print raster stats. In rio-inf
39 -t, --dtype: data type. In rio-calc, rio-info (different usages)
40 --width: width of raster. In rio-info
41 --with-nodata/--without-nodata: include nodata regions or not. In rio-shapes.
42 -v, --tell-me-more, --verbose
43 --vfs: virtual file system.
44 """
45
46
47 # TODO: move file_in_arg and file_out_arg to cligj
48
49 import os.path
50
51 import click
52
53 import rasterio
54 from rasterio.vfs import parse_path
55
56
57 def _cb_key_val(ctx, param, value):
58
59 """
60 click callback to validate `--opt KEY1=VAL1 --opt KEY2=VAL2` and collect
61 in a dictionary like the one below, which is what the CLI function receives.
62 If no value or `None` is received then an empty dictionary is returned.
63
64 {
65 'KEY1': 'VAL1',
66 'KEY2': 'VAL2'
67 }
68
69 Note: `==VAL` breaks this as `str.split('=', 1)` is used.
70 """
71
72 if not value:
73 return {}
74 else:
75 out = {}
76 for pair in value:
77 if '=' not in pair:
78 raise click.BadParameter("Invalid syntax for KEY=VAL arg: {}".format(pair))
79 else:
80 k, v = pair.split('=', 1)
81 k = k.lower()
82 v = v.lower()
83 out[k] = v
84 return out
85
86
87 def file_in_handler(ctx, param, value):
88 """Normalize ordinary filesystem and VFS paths"""
89 try:
90 path, archive, scheme = parse_path(value)
91 except ValueError as exc:
92 raise click.BadParameter(str(exc))
93 path_to_check = archive or path
94 if not scheme in ['http', 'https', 's3'] and not os.path.exists(path_to_check):
95 raise click.BadParameter(
96 "Input file {0} does not exist".format(path_to_check))
97 if archive and scheme:
98 archive = os.path.abspath(archive)
99 path = "{0}://{1}!{2}".format(scheme, archive, path)
100 elif scheme and scheme.startswith('http'):
101 path = "{0}://{1}".format(scheme, path)
102 elif scheme == 's3':
103 path = "{0}://{1}".format(scheme, path)
104 else:
105 path = os.path.abspath(path)
106 return path
107
108
109 def from_like_context(ctx, param, value):
110 """Return the value for an option from the context if the option
111 or `--all` is given, else return None."""
112 if ctx.obj and ctx.obj.get('like') and (
113 value == 'like' or ctx.obj.get('all_like')):
114 return ctx.obj['like'][param.name]
115 else:
116 return None
117
118
119 def like_handler(ctx, param, value):
120 """Copy a dataset's meta property to the command context for access
121 from other callbacks."""
122 if ctx.obj is None:
123 ctx.obj = {}
124 if value:
125 with rasterio.open(value) as src:
126 metadata = src.meta
127 ctx.obj['like'] = metadata
128 ctx.obj['like']['transform'] = metadata['affine']
129 ctx.obj['like']['tags'] = src.tags()
130
131
132 def nodata_handler(ctx, param, value):
133 """Get nodata value from a template file or command line."""
134 retval = from_like_context(ctx, param, value)
135 if retval is None and value is not None:
136 try:
137 retval = float(value)
138 except:
139 raise click.BadParameter(
140 "%s is not a number." % repr(value),
141 param=param, param_hint='nodata')
142 return retval
143
144
145 # Singular input file
146 file_in_arg = click.argument('INPUT', callback=file_in_handler)
147
148 # Singular output file
149 file_out_arg = click.argument(
150 'OUTPUT',
151 type=click.Path(resolve_path=True))
152
153 bidx_opt = click.option(
154 '-b', '--bidx',
155 type=int,
156 default=1,
157 help="Input file band index (default: 1).")
158
159 bidx_mult_opt = click.option(
160 '-b', '--bidx',
161 multiple=True,
162 help="Indexes of input file bands.")
163
164 # TODO: may be better suited to cligj
165 bounds_opt = click.option(
166 '--bounds',
167 nargs=4, type=float, default=None,
168 help='Output bounds: left bottom right top.')
169
170 dimensions_opt = click.option(
171 '--dimensions',
172 nargs=2, type=int, default=None,
173 help='Output dataset width, height in number of pixels.')
174
175 dtype_opt = click.option(
176 '-t', '--dtype',
177 type=click.Choice([
178 'ubyte', 'uint8', 'uint16', 'int16', 'uint32', 'int32',
179 'float32', 'float64']),
180 default=None,
181 help="Output data type.")
182
183 like_file_opt = click.option(
184 '--like',
185 type=click.Path(exists=True),
186 help='Raster dataset to use as a template for obtaining affine '
187 'transform (bounds and resolution), crs, data type, and driver '
188 'used to create the output.')
189
190 masked_opt = click.option(
191 '--masked/--not-masked',
192 default=True,
193 help="Evaluate expressions using masked arrays (the default) or ordinary "
194 "numpy arrays.")
195
196 output_opt = click.option(
197 '-o', '--output',
198 default=None,
199 type=click.Path(resolve_path=True),
200 help="Path to output file (optional alternative to a positional arg). "
201 "Existing files will be overwritten (`--force-overwrite` is "
202 "implied).")
203
204 resolution_opt = click.option(
205 '-r', '--res',
206 multiple=True, type=float, default=None,
207 help='Output dataset resolution in units of coordinate '
208 'reference system. Pixels assumed to be square if this option '
209 'is used once, otherwise use: '
210 '--res pixel_width --res pixel_height.')
211
212 creation_options = click.option(
213 '--co', 'creation_options',
214 metavar='NAME=VALUE',
215 multiple=True,
216 callback=_cb_key_val,
217 help="Driver specific creation options."
218 "See the documentation for the selected output driver for "
219 "more information.")
220
221 rgb_opt = click.option(
222 '--rgb', 'photometric',
223 flag_value='rgb',
224 default=False,
225 help="Set RGB photometric interpretation.")
226
227 force_overwrite_opt = click.option(
228 '--force-overwrite', 'force_overwrite',
229 is_flag=True, type=bool, default=False,
230 help="Always overwrite an existing output file.")
231
232 nodata_opt = click.option(
233 '--nodata', callback=nodata_handler, default=None,
234 help="New nodata value.")
235
236 like_opt = click.option(
237 '--like',
238 type=click.Path(exists=True),
239 callback=like_handler,
240 is_eager=True,
241 help="Raster dataset to use as a template for obtaining affine "
242 "transform (bounds and resolution), crs, and nodata values.")
243
244 all_touched_opt = click.option(
245 '-a', '--all', '--all_touched', 'all_touched',
246 is_flag=True,
247 default=False,
248 help='Use all pixels touched by features, otherwise (default) use only '
249 'pixels whose center is within the polygon or that are selected by '
250 'Bresenhams line algorithm')
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/rio/options.py b/rasterio/rio/options.py
--- a/rasterio/rio/options.py
+++ b/rasterio/rio/options.py
@@ -83,6 +83,9 @@
out[k] = v
return out
+def abspath_forward_slashes(path):
+ """Return forward-slashed version of os.path.abspath"""
+ return '/'.join(os.path.abspath(path).split(os.path.sep))
def file_in_handler(ctx, param, value):
"""Normalize ordinary filesystem and VFS paths"""
@@ -95,14 +98,14 @@
raise click.BadParameter(
"Input file {0} does not exist".format(path_to_check))
if archive and scheme:
- archive = os.path.abspath(archive)
+ archive = abspath_forward_slashes(archive)
path = "{0}://{1}!{2}".format(scheme, archive, path)
elif scheme and scheme.startswith('http'):
path = "{0}://{1}".format(scheme, path)
elif scheme == 's3':
path = "{0}://{1}".format(scheme, path)
else:
- path = os.path.abspath(path)
+ path = abspath_forward_slashes(path)
return path
diff --git a/rasterio/vfs.py b/rasterio/vfs.py
--- a/rasterio/vfs.py
+++ b/rasterio/vfs.py
@@ -59,8 +59,8 @@
elif scheme and scheme == 's3':
result = "/vsis3/{0}".format(path)
elif scheme and scheme != 'file':
- path = path.strip(os.path.sep)
- result = os.path.sep.join(
+ path = path.strip('/')
+ result = '/'.join(
['/vsi{0}'.format(scheme), archive, path])
else:
result = path
|
{"golden_diff": "diff --git a/rasterio/rio/options.py b/rasterio/rio/options.py\n--- a/rasterio/rio/options.py\n+++ b/rasterio/rio/options.py\n@@ -83,6 +83,9 @@\n out[k] = v\n return out\n \n+def abspath_forward_slashes(path):\n+ \"\"\"Return forward-slashed version of os.path.abspath\"\"\"\n+ return '/'.join(os.path.abspath(path).split(os.path.sep))\n \n def file_in_handler(ctx, param, value):\n \"\"\"Normalize ordinary filesystem and VFS paths\"\"\"\n@@ -95,14 +98,14 @@\n raise click.BadParameter(\n \"Input file {0} does not exist\".format(path_to_check))\n if archive and scheme:\n- archive = os.path.abspath(archive)\n+ archive = abspath_forward_slashes(archive)\n path = \"{0}://{1}!{2}\".format(scheme, archive, path)\n elif scheme and scheme.startswith('http'):\n path = \"{0}://{1}\".format(scheme, path)\n elif scheme == 's3':\n path = \"{0}://{1}\".format(scheme, path)\n else:\n- path = os.path.abspath(path)\n+ path = abspath_forward_slashes(path)\n return path\n \n \ndiff --git a/rasterio/vfs.py b/rasterio/vfs.py\n--- a/rasterio/vfs.py\n+++ b/rasterio/vfs.py\n@@ -59,8 +59,8 @@\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n- path = path.strip(os.path.sep)\n- result = os.path.sep.join(\n+ path = path.strip('/')\n+ result = '/'.join(\n ['/vsi{0}'.format(scheme), archive, path])\n else:\n result = path\n", "issue": "VFS tests fail on Windows due to path strings\nRunning `py.test -v tests\\test_rio_options.py` on Windows fails because of UNIX-style paths in tests. Specifically, two tests fail:\n\n```\ntests\\test_rio_options.py::test_file_in_handler_with_vfs FAILED\ntests\\test_rio_options.py::test_file_in_handler_with_vfs_file FAILED\n```\n\nFor `test_file_in_handler_with_vfs_file`, I think it can be fixed with:\n\n```\nassert retval.endswith(os.path.normpath('tests/data/RGB.byte.tif'))\n```\n\nbut not sure what to do about `test_file_in_handler_with_vfs`. My `retval` from line 54 is:\n\n```\n'zip://D:\\\\matt\\\\rasterio\\\\tests\\\\data\\\\files.zip!/inputs/RGB.byte.tif'\n```\n\nso the path to the zip file is Windows, path inside the zip is UNIX. Both asserts fail here. Hopefully an easy one ...\n\n", "before_files": [{"content": "\"\"\"Implementation of Apache VFS schemes and URLs.\"\"\"\n\nimport os\n\nfrom rasterio.compat import urlparse\n\n\n# NB: As not to propagate fallacies of distributed computing, Rasterio\n# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only\n# the following local filesystem schemes are supported.\nSCHEMES = {'gzip': 'gzip', 'zip': 'zip', 'tar': 'tar', 'https': 'curl',\n 'http': 'curl', 's3': 's3'}\n\n\ndef parse_path(uri, vfs=None):\n \"\"\"Parse a URI or Apache VFS URL into its parts\n\n Returns: tuple\n (path, archive, scheme)\n \"\"\"\n archive = scheme = None\n path = uri\n if vfs:\n parts = urlparse(vfs)\n scheme = parts.scheme\n archive = parts.path\n if parts.netloc and parts.netloc != 'localhost': # pragma: no cover\n archive = parts.netloc + archive\n else:\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n if scheme in SCHEMES:\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n # For filesystem paths.\n elif scheme in (None, '', 'file'):\n pass\n # We permit GDAL's idiosyncratic URI-like dataset paths such as\n # 'NETCDF:...' to fall right through with no parsed archive\n # or scheme.\n else:\n archive = scheme = None\n path = uri\n\n return path, archive, scheme\n\n\ndef vsi_path(path, archive=None, scheme=None):\n \"\"\"Convert a parsed path to a GDAL VSI path.\"\"\"\n # If a VSF and archive file are specified, we convert the path to\n # a GDAL VSI path (see cpl_vsi.h).\n if scheme and scheme.startswith('http'):\n result = \"/vsicurl/{0}://{1}\".format(scheme, path)\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n path = path.strip(os.path.sep)\n result = os.path.sep.join(\n ['/vsi{0}'.format(scheme), archive, path])\n else:\n result = path\n return result\n", "path": "rasterio/vfs.py"}, {"content": "\"\"\"\nRegistry of common rio CLI options. See cligj for more options.\n\n-a, --all: Use all pixels touched by features. In rio-mask, rio-rasterize\n--as-mask/--not-as-mask: interpret band as mask or not. In rio-shapes\n--band/--mask: use band or mask. In rio-shapes\n--bbox:\n-b, --bidx: band index(es) (singular or multiple value versions).\n In rio-info, rio-sample, rio-shapes, rio-stack (different usages)\n--bounds: bounds in world coordinates.\n In rio-info, rio-rasterize (different usages)\n--count: count of bands. In rio-info\n--crop: Crop raster to extent of features. In rio-mask\n--crs: CRS of input raster. In rio-info\n--default-value: default for rasterized pixels. In rio-rasterize\n--dimensions: Output width, height. In rio-rasterize\n--dst-crs: destination CRS. In rio-transform\n--fill: fill value for pixels not covered by features. In rio-rasterize\n--formats: list available formats. In rio-info\n--height: height of raster. In rio-info\n-i, --invert: Invert mask created from features: In rio-mask\n-j, --geojson-mask: GeoJSON for masking raster. In rio-mask\n--lnglat: geograhpic coordinates of center of raster. In rio-info\n--masked/--not-masked: read masked data from source file.\n In rio-calc, rio-info\n-m, --mode: output file mode (r, r+). In rio-insp\n--name: input file name alias. In rio-calc\n--nodata: nodata value. In rio-info, rio-merge (different usages)\n--photometric: photometric interpretation. In rio-stack\n--property: GeoJSON property to use as values for rasterize. In rio-rasterize\n-r, --res: output resolution.\n In rio-info, rio-rasterize (different usages. TODO: try to combine\n usages, prefer rio-rasterize version)\n--sampling: Inverse of sampling fraction. In rio-shapes\n--shape: shape (width, height) of band. In rio-info\n--src-crs: source CRS.\n In rio-insp, rio-rasterize (different usages. TODO: consolidate usages)\n--stats: print raster stats. In rio-inf\n-t, --dtype: data type. In rio-calc, rio-info (different usages)\n--width: width of raster. In rio-info\n--with-nodata/--without-nodata: include nodata regions or not. In rio-shapes.\n-v, --tell-me-more, --verbose\n--vfs: virtual file system.\n\"\"\"\n\n\n# TODO: move file_in_arg and file_out_arg to cligj\n\nimport os.path\n\nimport click\n\nimport rasterio\nfrom rasterio.vfs import parse_path\n\n\ndef _cb_key_val(ctx, param, value):\n\n \"\"\"\n click callback to validate `--opt KEY1=VAL1 --opt KEY2=VAL2` and collect\n in a dictionary like the one below, which is what the CLI function receives.\n If no value or `None` is received then an empty dictionary is returned.\n\n {\n 'KEY1': 'VAL1',\n 'KEY2': 'VAL2'\n }\n\n Note: `==VAL` breaks this as `str.split('=', 1)` is used.\n \"\"\"\n\n if not value:\n return {}\n else:\n out = {}\n for pair in value:\n if '=' not in pair:\n raise click.BadParameter(\"Invalid syntax for KEY=VAL arg: {}\".format(pair))\n else:\n k, v = pair.split('=', 1)\n k = k.lower()\n v = v.lower()\n out[k] = v\n return out\n\n\ndef file_in_handler(ctx, param, value):\n \"\"\"Normalize ordinary filesystem and VFS paths\"\"\"\n try:\n path, archive, scheme = parse_path(value)\n except ValueError as exc:\n raise click.BadParameter(str(exc))\n path_to_check = archive or path\n if not scheme in ['http', 'https', 's3'] and not os.path.exists(path_to_check):\n raise click.BadParameter(\n \"Input file {0} does not exist\".format(path_to_check))\n if archive and scheme:\n archive = os.path.abspath(archive)\n path = \"{0}://{1}!{2}\".format(scheme, archive, path)\n elif scheme and scheme.startswith('http'):\n path = \"{0}://{1}\".format(scheme, path)\n elif scheme == 's3':\n path = \"{0}://{1}\".format(scheme, path)\n else:\n path = os.path.abspath(path)\n return path\n\n\ndef from_like_context(ctx, param, value):\n \"\"\"Return the value for an option from the context if the option\n or `--all` is given, else return None.\"\"\"\n if ctx.obj and ctx.obj.get('like') and (\n value == 'like' or ctx.obj.get('all_like')):\n return ctx.obj['like'][param.name]\n else:\n return None\n\n\ndef like_handler(ctx, param, value):\n \"\"\"Copy a dataset's meta property to the command context for access\n from other callbacks.\"\"\"\n if ctx.obj is None:\n ctx.obj = {}\n if value:\n with rasterio.open(value) as src:\n metadata = src.meta\n ctx.obj['like'] = metadata\n ctx.obj['like']['transform'] = metadata['affine']\n ctx.obj['like']['tags'] = src.tags()\n\n\ndef nodata_handler(ctx, param, value):\n \"\"\"Get nodata value from a template file or command line.\"\"\"\n retval = from_like_context(ctx, param, value)\n if retval is None and value is not None:\n try:\n retval = float(value)\n except:\n raise click.BadParameter(\n \"%s is not a number.\" % repr(value),\n param=param, param_hint='nodata')\n return retval\n\n\n# Singular input file\nfile_in_arg = click.argument('INPUT', callback=file_in_handler)\n\n# Singular output file\nfile_out_arg = click.argument(\n 'OUTPUT',\n type=click.Path(resolve_path=True))\n\nbidx_opt = click.option(\n '-b', '--bidx',\n type=int,\n default=1,\n help=\"Input file band index (default: 1).\")\n\nbidx_mult_opt = click.option(\n '-b', '--bidx',\n multiple=True,\n help=\"Indexes of input file bands.\")\n\n# TODO: may be better suited to cligj\nbounds_opt = click.option(\n '--bounds',\n nargs=4, type=float, default=None,\n help='Output bounds: left bottom right top.')\n\ndimensions_opt = click.option(\n '--dimensions',\n nargs=2, type=int, default=None,\n help='Output dataset width, height in number of pixels.')\n\ndtype_opt = click.option(\n '-t', '--dtype',\n type=click.Choice([\n 'ubyte', 'uint8', 'uint16', 'int16', 'uint32', 'int32',\n 'float32', 'float64']),\n default=None,\n help=\"Output data type.\")\n\nlike_file_opt = click.option(\n '--like',\n type=click.Path(exists=True),\n help='Raster dataset to use as a template for obtaining affine '\n 'transform (bounds and resolution), crs, data type, and driver '\n 'used to create the output.')\n\nmasked_opt = click.option(\n '--masked/--not-masked',\n default=True,\n help=\"Evaluate expressions using masked arrays (the default) or ordinary \"\n \"numpy arrays.\")\n\noutput_opt = click.option(\n '-o', '--output',\n default=None,\n type=click.Path(resolve_path=True),\n help=\"Path to output file (optional alternative to a positional arg). \"\n \"Existing files will be overwritten (`--force-overwrite` is \"\n \"implied).\")\n\nresolution_opt = click.option(\n '-r', '--res',\n multiple=True, type=float, default=None,\n help='Output dataset resolution in units of coordinate '\n 'reference system. Pixels assumed to be square if this option '\n 'is used once, otherwise use: '\n '--res pixel_width --res pixel_height.')\n\ncreation_options = click.option(\n '--co', 'creation_options',\n metavar='NAME=VALUE',\n multiple=True,\n callback=_cb_key_val,\n help=\"Driver specific creation options.\"\n \"See the documentation for the selected output driver for \"\n \"more information.\")\n\nrgb_opt = click.option(\n '--rgb', 'photometric',\n flag_value='rgb',\n default=False,\n help=\"Set RGB photometric interpretation.\")\n\nforce_overwrite_opt = click.option(\n '--force-overwrite', 'force_overwrite',\n is_flag=True, type=bool, default=False,\n help=\"Always overwrite an existing output file.\")\n\nnodata_opt = click.option(\n '--nodata', callback=nodata_handler, default=None,\n help=\"New nodata value.\")\n\nlike_opt = click.option(\n '--like',\n type=click.Path(exists=True),\n callback=like_handler,\n is_eager=True,\n help=\"Raster dataset to use as a template for obtaining affine \"\n \"transform (bounds and resolution), crs, and nodata values.\")\n\nall_touched_opt = click.option(\n '-a', '--all', '--all_touched', 'all_touched',\n is_flag=True,\n default=False,\n help='Use all pixels touched by features, otherwise (default) use only '\n 'pixels whose center is within the polygon or that are selected by '\n 'Bresenhams line algorithm')\n", "path": "rasterio/rio/options.py"}], "after_files": [{"content": "\"\"\"Implementation of Apache VFS schemes and URLs.\"\"\"\n\nimport os\n\nfrom rasterio.compat import urlparse\n\n\n# NB: As not to propagate fallacies of distributed computing, Rasterio\n# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only\n# the following local filesystem schemes are supported.\nSCHEMES = {'gzip': 'gzip', 'zip': 'zip', 'tar': 'tar', 'https': 'curl',\n 'http': 'curl', 's3': 's3'}\n\n\ndef parse_path(uri, vfs=None):\n \"\"\"Parse a URI or Apache VFS URL into its parts\n\n Returns: tuple\n (path, archive, scheme)\n \"\"\"\n archive = scheme = None\n path = uri\n if vfs:\n parts = urlparse(vfs)\n scheme = parts.scheme\n archive = parts.path\n if parts.netloc and parts.netloc != 'localhost': # pragma: no cover\n archive = parts.netloc + archive\n else:\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n if scheme in SCHEMES:\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n # For filesystem paths.\n elif scheme in (None, '', 'file'):\n pass\n # We permit GDAL's idiosyncratic URI-like dataset paths such as\n # 'NETCDF:...' to fall right through with no parsed archive\n # or scheme.\n else:\n archive = scheme = None\n path = uri\n\n return path, archive, scheme\n\n\ndef vsi_path(path, archive=None, scheme=None):\n \"\"\"Convert a parsed path to a GDAL VSI path.\"\"\"\n # If a VSF and archive file are specified, we convert the path to\n # a GDAL VSI path (see cpl_vsi.h).\n if scheme and scheme.startswith('http'):\n result = \"/vsicurl/{0}://{1}\".format(scheme, path)\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n path = path.strip('/')\n result = '/'.join(\n ['/vsi{0}'.format(scheme), archive, path])\n else:\n result = path\n return result\n", "path": "rasterio/vfs.py"}, {"content": "\"\"\"\nRegistry of common rio CLI options. See cligj for more options.\n\n-a, --all: Use all pixels touched by features. In rio-mask, rio-rasterize\n--as-mask/--not-as-mask: interpret band as mask or not. In rio-shapes\n--band/--mask: use band or mask. In rio-shapes\n--bbox:\n-b, --bidx: band index(es) (singular or multiple value versions).\n In rio-info, rio-sample, rio-shapes, rio-stack (different usages)\n--bounds: bounds in world coordinates.\n In rio-info, rio-rasterize (different usages)\n--count: count of bands. In rio-info\n--crop: Crop raster to extent of features. In rio-mask\n--crs: CRS of input raster. In rio-info\n--default-value: default for rasterized pixels. In rio-rasterize\n--dimensions: Output width, height. In rio-rasterize\n--dst-crs: destination CRS. In rio-transform\n--fill: fill value for pixels not covered by features. In rio-rasterize\n--formats: list available formats. In rio-info\n--height: height of raster. In rio-info\n-i, --invert: Invert mask created from features: In rio-mask\n-j, --geojson-mask: GeoJSON for masking raster. In rio-mask\n--lnglat: geograhpic coordinates of center of raster. In rio-info\n--masked/--not-masked: read masked data from source file.\n In rio-calc, rio-info\n-m, --mode: output file mode (r, r+). In rio-insp\n--name: input file name alias. In rio-calc\n--nodata: nodata value. In rio-info, rio-merge (different usages)\n--photometric: photometric interpretation. In rio-stack\n--property: GeoJSON property to use as values for rasterize. In rio-rasterize\n-r, --res: output resolution.\n In rio-info, rio-rasterize (different usages. TODO: try to combine\n usages, prefer rio-rasterize version)\n--sampling: Inverse of sampling fraction. In rio-shapes\n--shape: shape (width, height) of band. In rio-info\n--src-crs: source CRS.\n In rio-insp, rio-rasterize (different usages. TODO: consolidate usages)\n--stats: print raster stats. In rio-inf\n-t, --dtype: data type. In rio-calc, rio-info (different usages)\n--width: width of raster. In rio-info\n--with-nodata/--without-nodata: include nodata regions or not. In rio-shapes.\n-v, --tell-me-more, --verbose\n--vfs: virtual file system.\n\"\"\"\n\n\n# TODO: move file_in_arg and file_out_arg to cligj\n\nimport os.path\n\nimport click\n\nimport rasterio\nfrom rasterio.vfs import parse_path\n\n\ndef _cb_key_val(ctx, param, value):\n\n \"\"\"\n click callback to validate `--opt KEY1=VAL1 --opt KEY2=VAL2` and collect\n in a dictionary like the one below, which is what the CLI function receives.\n If no value or `None` is received then an empty dictionary is returned.\n\n {\n 'KEY1': 'VAL1',\n 'KEY2': 'VAL2'\n }\n\n Note: `==VAL` breaks this as `str.split('=', 1)` is used.\n \"\"\"\n\n if not value:\n return {}\n else:\n out = {}\n for pair in value:\n if '=' not in pair:\n raise click.BadParameter(\"Invalid syntax for KEY=VAL arg: {}\".format(pair))\n else:\n k, v = pair.split('=', 1)\n k = k.lower()\n v = v.lower()\n out[k] = v\n return out\n\ndef abspath_forward_slashes(path):\n \"\"\"Return forward-slashed version of os.path.abspath\"\"\"\n return '/'.join(os.path.abspath(path).split(os.path.sep))\n\ndef file_in_handler(ctx, param, value):\n \"\"\"Normalize ordinary filesystem and VFS paths\"\"\"\n try:\n path, archive, scheme = parse_path(value)\n except ValueError as exc:\n raise click.BadParameter(str(exc))\n path_to_check = archive or path\n if not scheme in ['http', 'https', 's3'] and not os.path.exists(path_to_check):\n raise click.BadParameter(\n \"Input file {0} does not exist\".format(path_to_check))\n if archive and scheme:\n archive = abspath_forward_slashes(archive)\n path = \"{0}://{1}!{2}\".format(scheme, archive, path)\n elif scheme and scheme.startswith('http'):\n path = \"{0}://{1}\".format(scheme, path)\n elif scheme == 's3':\n path = \"{0}://{1}\".format(scheme, path)\n else:\n path = abspath_forward_slashes(path)\n return path\n\n\ndef from_like_context(ctx, param, value):\n \"\"\"Return the value for an option from the context if the option\n or `--all` is given, else return None.\"\"\"\n if ctx.obj and ctx.obj.get('like') and (\n value == 'like' or ctx.obj.get('all_like')):\n return ctx.obj['like'][param.name]\n else:\n return None\n\n\ndef like_handler(ctx, param, value):\n \"\"\"Copy a dataset's meta property to the command context for access\n from other callbacks.\"\"\"\n if ctx.obj is None:\n ctx.obj = {}\n if value:\n with rasterio.open(value) as src:\n metadata = src.meta\n ctx.obj['like'] = metadata\n ctx.obj['like']['transform'] = metadata['affine']\n ctx.obj['like']['tags'] = src.tags()\n\n\ndef nodata_handler(ctx, param, value):\n \"\"\"Get nodata value from a template file or command line.\"\"\"\n retval = from_like_context(ctx, param, value)\n if retval is None and value is not None:\n try:\n retval = float(value)\n except:\n raise click.BadParameter(\n \"%s is not a number.\" % repr(value),\n param=param, param_hint='nodata')\n return retval\n\n\n# Singular input file\nfile_in_arg = click.argument('INPUT', callback=file_in_handler)\n\n# Singular output file\nfile_out_arg = click.argument(\n 'OUTPUT',\n type=click.Path(resolve_path=True))\n\nbidx_opt = click.option(\n '-b', '--bidx',\n type=int,\n default=1,\n help=\"Input file band index (default: 1).\")\n\nbidx_mult_opt = click.option(\n '-b', '--bidx',\n multiple=True,\n help=\"Indexes of input file bands.\")\n\n# TODO: may be better suited to cligj\nbounds_opt = click.option(\n '--bounds',\n nargs=4, type=float, default=None,\n help='Output bounds: left bottom right top.')\n\ndimensions_opt = click.option(\n '--dimensions',\n nargs=2, type=int, default=None,\n help='Output dataset width, height in number of pixels.')\n\ndtype_opt = click.option(\n '-t', '--dtype',\n type=click.Choice([\n 'ubyte', 'uint8', 'uint16', 'int16', 'uint32', 'int32',\n 'float32', 'float64']),\n default=None,\n help=\"Output data type.\")\n\nlike_file_opt = click.option(\n '--like',\n type=click.Path(exists=True),\n help='Raster dataset to use as a template for obtaining affine '\n 'transform (bounds and resolution), crs, data type, and driver '\n 'used to create the output.')\n\nmasked_opt = click.option(\n '--masked/--not-masked',\n default=True,\n help=\"Evaluate expressions using masked arrays (the default) or ordinary \"\n \"numpy arrays.\")\n\noutput_opt = click.option(\n '-o', '--output',\n default=None,\n type=click.Path(resolve_path=True),\n help=\"Path to output file (optional alternative to a positional arg). \"\n \"Existing files will be overwritten (`--force-overwrite` is \"\n \"implied).\")\n\nresolution_opt = click.option(\n '-r', '--res',\n multiple=True, type=float, default=None,\n help='Output dataset resolution in units of coordinate '\n 'reference system. Pixels assumed to be square if this option '\n 'is used once, otherwise use: '\n '--res pixel_width --res pixel_height.')\n\ncreation_options = click.option(\n '--co', 'creation_options',\n metavar='NAME=VALUE',\n multiple=True,\n callback=_cb_key_val,\n help=\"Driver specific creation options.\"\n \"See the documentation for the selected output driver for \"\n \"more information.\")\n\nrgb_opt = click.option(\n '--rgb', 'photometric',\n flag_value='rgb',\n default=False,\n help=\"Set RGB photometric interpretation.\")\n\nforce_overwrite_opt = click.option(\n '--force-overwrite', 'force_overwrite',\n is_flag=True, type=bool, default=False,\n help=\"Always overwrite an existing output file.\")\n\nnodata_opt = click.option(\n '--nodata', callback=nodata_handler, default=None,\n help=\"New nodata value.\")\n\nlike_opt = click.option(\n '--like',\n type=click.Path(exists=True),\n callback=like_handler,\n is_eager=True,\n help=\"Raster dataset to use as a template for obtaining affine \"\n \"transform (bounds and resolution), crs, and nodata values.\")\n\nall_touched_opt = click.option(\n '-a', '--all', '--all_touched', 'all_touched',\n is_flag=True,\n default=False,\n help='Use all pixels touched by features, otherwise (default) use only '\n 'pixels whose center is within the polygon or that are selected by '\n 'Bresenhams line algorithm')\n", "path": "rasterio/rio/options.py"}]}
| 3,970 | 419 |
gh_patches_debug_7561
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6693
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
T - TypeError 'count.toLocaleString' in SSR
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/7090931/
```
TypeError: Cannot read property 'toLocaleString' of undefined
File "/app/kuma/javascript/dist/ssr.js", line 22, in kt
'{snip} ocuments found for "%(query)s" in %(locale)s.',a),{count:a.toLocaleString(),locale:n,query:c})," ",t))}function jt(e){var t=e.locale;return( {snip}
File "/app/kuma/javascript/dist/ssr.js", line 22, in a
'{snip} .state);null!=d&&(f.state=r({},f.state,d))}}else if(D={},f=a(o.props,i,s),null==(f=G(a,o.props,f,i))||null==f.render)return void he(e=f,a);i {snip}
File "/app/kuma/javascript/dist/ssr.js", line 22, in ye
'{snip} lement(e);){var i=e,c=i.type;if("function"!=typeof c)break;a(i,c)}return{child:e,context:t}}var ve=function(){function e(t,n){if(!(this inst {snip}
File "/app/kuma/javascript/dist/ssr.js", line 22, in e.render
'{snip} -- --\x3e"+I(n):(this.previousWasTextNode=!0,I(n));if(e=(t=ye(e,t,this.threadID)).child,t=t.context,null===e||!1===e)return"";if(!o.isValidE {snip}
File "/app/kuma/javascript/dist/ssr.js", line 22, in e.read
'{snip} +=c}else{var f=i.children[i.childIndex++],m="";try{m+=this.render(f,i.context,i.domNamespace)}catch(e){throw e}r.length<=this.suspenseDepth& {snip}
...
(5 additional frame(s) were not displayed)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/search/views.py`
Content:
```
1 from django.shortcuts import render
2 from django.urls import reverse_lazy
3 from django.views.decorators.cache import never_cache
4 from django.views.decorators.http import require_GET
5 from django.views.generic import RedirectView
6 from ratelimit.decorators import ratelimit
7
8 from kuma.api.v1.views import search as search_api
9 from kuma.core.decorators import shared_cache_control
10 from kuma.core.utils import is_wiki
11
12 from .search import SearchView
13
14 # Since the search endpoint accepts user input (via query parameters) and its
15 # response is compressed, use rate limiting to mitigate the BREACH attack
16 # (see http://breachattack.com/). It still needs to allow a user to click
17 # the filter switches (bug 1426968).
18 # Alternate: forbid gzip by setting Content-Encoding: identity
19 @never_cache
20 @require_GET
21 @ratelimit(key="user_or_ip", rate="25/m", block=True)
22 def search(request, *args, **kwargs):
23 """
24 The search view.
25 """
26 if is_wiki(request):
27 return wiki_search(request, *args, **kwargs)
28
29 results = search_api(request, *args, **kwargs).data
30
31 # Determine if there were validation errors
32 error = results.get("error") or results.get("q")
33 # If q is returned in the data, there was a validation error for that field,
34 # so return 400 status.
35 status = 200 if results.get("q") is None else 400
36
37 context = {"results": {"results": None if error else results, "error": error}}
38
39 return render(request, "search/react.html", context, status=status)
40
41
42 wiki_search = SearchView.as_view()
43
44
45 class SearchRedirectView(RedirectView):
46 permanent = True
47
48 def get_redirect_url(self, *args, **kwargs):
49 query_string = self.request.META.get("QUERY_STRING")
50 url = reverse_lazy(
51 "api.v1.search", kwargs={"locale": self.request.LANGUAGE_CODE}
52 )
53 if query_string:
54 url += "?" + query_string
55 return url
56
57
58 @shared_cache_control(s_maxage=60 * 60 * 24 * 7)
59 def plugin(request):
60 """Render an OpenSearch Plugin."""
61 return render(
62 request,
63 "search/plugin.html",
64 {"locale": request.LANGUAGE_CODE},
65 content_type="application/opensearchdescription+xml",
66 )
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kuma/search/views.py b/kuma/search/views.py
--- a/kuma/search/views.py
+++ b/kuma/search/views.py
@@ -33,6 +33,10 @@
# If q is returned in the data, there was a validation error for that field,
# so return 400 status.
status = 200 if results.get("q") is None else 400
+ # If there was an error with the pagination you'll get...
+ if results.get("detail"):
+ error = str(results["detail"])
+ status = 400
context = {"results": {"results": None if error else results, "error": error}}
|
{"golden_diff": "diff --git a/kuma/search/views.py b/kuma/search/views.py\n--- a/kuma/search/views.py\n+++ b/kuma/search/views.py\n@@ -33,6 +33,10 @@\n # If q is returned in the data, there was a validation error for that field,\n # so return 400 status.\n status = 200 if results.get(\"q\") is None else 400\n+ # If there was an error with the pagination you'll get...\n+ if results.get(\"detail\"):\n+ error = str(results[\"detail\"])\n+ status = 400\n \n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n", "issue": "T - TypeError 'count.toLocaleString' in SSR\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/7090931/\n\n```\nTypeError: Cannot read property 'toLocaleString' of undefined\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in kt\n '{snip} ocuments found for \"%(query)s\" in %(locale)s.',a),{count:a.toLocaleString(),locale:n,query:c}),\" \",t))}function jt(e){var t=e.locale;return( {snip}\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in a\n '{snip} .state);null!=d&&(f.state=r({},f.state,d))}}else if(D={},f=a(o.props,i,s),null==(f=G(a,o.props,f,i))||null==f.render)return void he(e=f,a);i {snip}\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in ye\n '{snip} lement(e);){var i=e,c=i.type;if(\"function\"!=typeof c)break;a(i,c)}return{child:e,context:t}}var ve=function(){function e(t,n){if(!(this inst {snip}\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in e.render\n '{snip} -- --\\x3e\"+I(n):(this.previousWasTextNode=!0,I(n));if(e=(t=ye(e,t,this.threadID)).child,t=t.context,null===e||!1===e)return\"\";if(!o.isValidE {snip}\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in e.read\n '{snip} +=c}else{var f=i.children[i.childIndex++],m=\"\";try{m+=this.render(f,i.context,i.domNamespace)}catch(e){throw e}r.length<=this.suspenseDepth& {snip}\n...\n(5 additional frame(s) were not displayed)\n```\n", "before_files": [{"content": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.views import search as search_api\nfrom kuma.core.decorators import shared_cache_control\nfrom kuma.core.utils import is_wiki\n\nfrom .search import SearchView\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n \"\"\"\n if is_wiki(request):\n return wiki_search(request, *args, **kwargs)\n\n results = search_api(request, *args, **kwargs).data\n\n # Determine if there were validation errors\n error = results.get(\"error\") or results.get(\"q\")\n # If q is returned in the data, there was a validation error for that field,\n # so return 400 status.\n status = 200 if results.get(\"q\") is None else 400\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n\n return render(request, \"search/react.html\", context, status=status)\n\n\nwiki_search = SearchView.as_view()\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\n \"api.v1.search\", kwargs={\"locale\": self.request.LANGUAGE_CODE}\n )\n if query_string:\n url += \"?\" + query_string\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}], "after_files": [{"content": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.views import search as search_api\nfrom kuma.core.decorators import shared_cache_control\nfrom kuma.core.utils import is_wiki\n\nfrom .search import SearchView\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n \"\"\"\n if is_wiki(request):\n return wiki_search(request, *args, **kwargs)\n\n results = search_api(request, *args, **kwargs).data\n\n # Determine if there were validation errors\n error = results.get(\"error\") or results.get(\"q\")\n # If q is returned in the data, there was a validation error for that field,\n # so return 400 status.\n status = 200 if results.get(\"q\") is None else 400\n # If there was an error with the pagination you'll get...\n if results.get(\"detail\"):\n error = str(results[\"detail\"])\n status = 400\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n\n return render(request, \"search/react.html\", context, status=status)\n\n\nwiki_search = SearchView.as_view()\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\n \"api.v1.search\", kwargs={\"locale\": self.request.LANGUAGE_CODE}\n )\n if query_string:\n url += \"?\" + query_string\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}]}
| 1,357 | 156 |
gh_patches_debug_40463
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1371
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Celery trace.id integration
**Is your feature request related to a problem? Please describe.**
As of now, it is impossible to keep the same trace id between a Django view and a Celery task launched from the same view.
**Describe the solution you'd like**
Provide a way to easily pass a trace parent string to the Celery task. Preferably via it's headers field (introduced in Celery 3.1).
### What would it looks likes
**User side code (Django view)**
```python
def get(self, request):
transaction = execution_context.get_transaction()
trace_parent = transaction.trace_parent
trace_parent_string = trace_parent.to_string()
my_celery_task.apply_async(headers={"elasticapm": {"trace_parent_string": trace_parent_string} })
```
**Library side code (`elasticapm.contrib.celery.__init__.py`), rewrite of `begin_transaction()`, naïve implementation**
```python
def begin_transaction(*args, **kwargs):
trace_parent = None
try:
trace_parent_string = kwargs["task"].request.headers["elasticapm"]["trace_parent_string"]
trace_parent = TraceParent.from_string(trace_parent_string)
except:
pass
client.begin_transaction("celery", trace_parent=trace_parent)
```
- **Why using Celery headers field ?** It seems the most unobstrusive way of doing it.
- **Why using a nested field (["elasticapm"]["trace_parent_string"]) ?** Seems "future proof", usefull future fields for elasticapm could be added under the "elasticapm" key. Users of the API shouldn't see their code break as they are aware that using this library, the headers Celery field has a reserved key "elasticapm" used for this integration.
**Additional context**
**For anyone wanting to try it, BEWARE !!** There is a Celery [bug](https://github.com/celery/celery/issues/4875) concerning it's headers field.
You might have to do this:
```python
my_celery_task.apply_async(headers={"headers": {"elasticapm": {"trace_parent_string": trace_parent_string} } })
```
Edits: fixed code error/typos
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/contrib/celery/__init__.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 from celery import signals, states
33
34 import elasticapm
35 from elasticapm.conf import constants
36 from elasticapm.utils import get_name_from_func
37
38
39 class CeleryFilter(object):
40 def filter(self, record):
41 if record.funcName in ("_log_error",):
42 return 0
43 else:
44 return 1
45
46
47 def register_exception_tracking(client):
48 dispatch_uid = "elasticapm-exc-tracking"
49
50 def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):
51 client.capture_exception(
52 extra={"task_id": task_id, "task": sender, "args": args, "kwargs": kwargs}, handled=False
53 )
54
55 signals.task_failure.disconnect(process_failure_signal, dispatch_uid=dispatch_uid)
56 signals.task_failure.connect(process_failure_signal, weak=False, dispatch_uid=dispatch_uid)
57 _register_worker_signals(client)
58
59
60 def register_instrumentation(client):
61 def begin_transaction(*args, **kwargs):
62 client.begin_transaction("celery")
63
64 def end_transaction(task_id, task, *args, **kwargs):
65 name = get_name_from_func(task)
66 state = kwargs.get("state", "None")
67 if state == states.SUCCESS:
68 outcome = constants.OUTCOME.SUCCESS
69 elif state in states.EXCEPTION_STATES:
70 outcome = constants.OUTCOME.FAILURE
71 else:
72 outcome = constants.OUTCOME.UNKNOWN
73 elasticapm.set_transaction_outcome(outcome, override=False)
74 client.end_transaction(name, state)
75
76 dispatch_uid = "elasticapm-tracing-%s"
77
78 # unregister any existing clients
79 signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % "prerun")
80 signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % "postrun")
81
82 # register for this client
83 signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % "prerun", weak=False)
84 signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % "postrun")
85 _register_worker_signals(client)
86
87
88 def _register_worker_signals(client):
89 def worker_shutdown(*args, **kwargs):
90 client.close()
91
92 def connect_worker_process_init(*args, **kwargs):
93 signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid="elasticapm-shutdown-worker", weak=False)
94
95 signals.worker_init.connect(
96 connect_worker_process_init, dispatch_uid="elasticapm-connect-start-threads", weak=False
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/contrib/celery/__init__.py b/elasticapm/contrib/celery/__init__.py
--- a/elasticapm/contrib/celery/__init__.py
+++ b/elasticapm/contrib/celery/__init__.py
@@ -27,13 +27,15 @@
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-
+from contextlib import suppress
from celery import signals, states
import elasticapm
from elasticapm.conf import constants
+from elasticapm.traces import execution_context
from elasticapm.utils import get_name_from_func
+from elasticapm.utils.disttracing import TraceParent
class CeleryFilter(object):
@@ -57,9 +59,41 @@
_register_worker_signals(client)
+def set_celery_headers(headers=None, **kwargs):
+ """
+ Add elasticapm specific information to celery headers
+ """
+ headers = {} if headers is None else headers
+
+ transaction = execution_context.get_transaction()
+ if transaction is not None:
+ trace_parent = transaction.trace_parent
+ trace_parent_string = trace_parent.to_string()
+
+ headers.update({"elasticapm": {"trace_parent_string": trace_parent_string}})
+
+
+def get_trace_parent(celery_task):
+ """
+ Return a trace parent contained in the request headers of a Celery Task object or None
+ """
+ trace_parent = None
+ with suppress(AttributeError, KeyError, TypeError):
+ if celery_task.request.headers is not None:
+ trace_parent_string = celery_task.request.headers["elasticapm"]["trace_parent_string"]
+ trace_parent = TraceParent.from_string(trace_parent_string)
+ else:
+ trace_parent_string = celery_task.request.elasticapm["trace_parent_string"]
+ trace_parent = TraceParent.from_string(trace_parent_string)
+ return trace_parent
+
+
def register_instrumentation(client):
def begin_transaction(*args, **kwargs):
- client.begin_transaction("celery")
+ task = kwargs["task"]
+
+ trace_parent = get_trace_parent(task)
+ client.begin_transaction("celery", trace_parent=trace_parent)
def end_transaction(task_id, task, *args, **kwargs):
name = get_name_from_func(task)
@@ -76,10 +110,12 @@
dispatch_uid = "elasticapm-tracing-%s"
# unregister any existing clients
+ signals.before_task_publish.disconnect(set_celery_headers, dispatch_uid=dispatch_uid % "before-publish")
signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % "prerun")
signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % "postrun")
# register for this client
+ signals.before_task_publish.connect(set_celery_headers, dispatch_uid=dispatch_uid % "before-publish")
signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % "prerun", weak=False)
signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % "postrun")
_register_worker_signals(client)
|
{"golden_diff": "diff --git a/elasticapm/contrib/celery/__init__.py b/elasticapm/contrib/celery/__init__.py\n--- a/elasticapm/contrib/celery/__init__.py\n+++ b/elasticapm/contrib/celery/__init__.py\n@@ -27,13 +27,15 @@\n # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n-\n+from contextlib import suppress\n \n from celery import signals, states\n \n import elasticapm\n from elasticapm.conf import constants\n+from elasticapm.traces import execution_context\n from elasticapm.utils import get_name_from_func\n+from elasticapm.utils.disttracing import TraceParent\n \n \n class CeleryFilter(object):\n@@ -57,9 +59,41 @@\n _register_worker_signals(client)\n \n \n+def set_celery_headers(headers=None, **kwargs):\n+ \"\"\"\n+ Add elasticapm specific information to celery headers\n+ \"\"\"\n+ headers = {} if headers is None else headers\n+\n+ transaction = execution_context.get_transaction()\n+ if transaction is not None:\n+ trace_parent = transaction.trace_parent\n+ trace_parent_string = trace_parent.to_string()\n+\n+ headers.update({\"elasticapm\": {\"trace_parent_string\": trace_parent_string}})\n+\n+\n+def get_trace_parent(celery_task):\n+ \"\"\"\n+ Return a trace parent contained in the request headers of a Celery Task object or None\n+ \"\"\"\n+ trace_parent = None\n+ with suppress(AttributeError, KeyError, TypeError):\n+ if celery_task.request.headers is not None:\n+ trace_parent_string = celery_task.request.headers[\"elasticapm\"][\"trace_parent_string\"]\n+ trace_parent = TraceParent.from_string(trace_parent_string)\n+ else:\n+ trace_parent_string = celery_task.request.elasticapm[\"trace_parent_string\"]\n+ trace_parent = TraceParent.from_string(trace_parent_string)\n+ return trace_parent\n+\n+\n def register_instrumentation(client):\n def begin_transaction(*args, **kwargs):\n- client.begin_transaction(\"celery\")\n+ task = kwargs[\"task\"]\n+\n+ trace_parent = get_trace_parent(task)\n+ client.begin_transaction(\"celery\", trace_parent=trace_parent)\n \n def end_transaction(task_id, task, *args, **kwargs):\n name = get_name_from_func(task)\n@@ -76,10 +110,12 @@\n dispatch_uid = \"elasticapm-tracing-%s\"\n \n # unregister any existing clients\n+ signals.before_task_publish.disconnect(set_celery_headers, dispatch_uid=dispatch_uid % \"before-publish\")\n signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\")\n signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % \"postrun\")\n \n # register for this client\n+ signals.before_task_publish.connect(set_celery_headers, dispatch_uid=dispatch_uid % \"before-publish\")\n signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\", weak=False)\n signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % \"postrun\")\n _register_worker_signals(client)\n", "issue": "Django Celery trace.id integration\n**Is your feature request related to a problem? Please describe.**\r\nAs of now, it is impossible to keep the same trace id between a Django view and a Celery task launched from the same view.\r\n\r\n**Describe the solution you'd like**\r\nProvide a way to easily pass a trace parent string to the Celery task. Preferably via it's headers field (introduced in Celery 3.1).\r\n\r\n### What would it looks likes\r\n**User side code (Django view)**\r\n```python\r\ndef get(self, request):\r\n transaction = execution_context.get_transaction()\r\n trace_parent = transaction.trace_parent\r\n trace_parent_string = trace_parent.to_string()\r\n my_celery_task.apply_async(headers={\"elasticapm\": {\"trace_parent_string\": trace_parent_string} })\r\n```\r\n\r\n**Library side code (`elasticapm.contrib.celery.__init__.py`), rewrite of `begin_transaction()`, na\u00efve implementation**\r\n```python\r\ndef begin_transaction(*args, **kwargs):\r\n trace_parent = None\r\n try:\r\n trace_parent_string = kwargs[\"task\"].request.headers[\"elasticapm\"][\"trace_parent_string\"]\r\n trace_parent = TraceParent.from_string(trace_parent_string)\r\n except:\r\n pass\r\n client.begin_transaction(\"celery\", trace_parent=trace_parent)\r\n```\r\n\r\n- **Why using Celery headers field ?** It seems the most unobstrusive way of doing it.\r\n- **Why using a nested field ([\"elasticapm\"][\"trace_parent_string\"]) ?** Seems \"future proof\", usefull future fields for elasticapm could be added under the \"elasticapm\" key. Users of the API shouldn't see their code break as they are aware that using this library, the headers Celery field has a reserved key \"elasticapm\" used for this integration.\r\n\r\n**Additional context**\r\n**For anyone wanting to try it, BEWARE !!** There is a Celery [bug](https://github.com/celery/celery/issues/4875) concerning it's headers field.\r\nYou might have to do this:\r\n```python\r\nmy_celery_task.apply_async(headers={\"headers\": {\"elasticapm\": {\"trace_parent_string\": trace_parent_string} } })\r\n```\r\n\r\nEdits: fixed code error/typos\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom celery import signals, states\n\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.utils import get_name_from_func\n\n\nclass CeleryFilter(object):\n def filter(self, record):\n if record.funcName in (\"_log_error\",):\n return 0\n else:\n return 1\n\n\ndef register_exception_tracking(client):\n dispatch_uid = \"elasticapm-exc-tracking\"\n\n def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):\n client.capture_exception(\n extra={\"task_id\": task_id, \"task\": sender, \"args\": args, \"kwargs\": kwargs}, handled=False\n )\n\n signals.task_failure.disconnect(process_failure_signal, dispatch_uid=dispatch_uid)\n signals.task_failure.connect(process_failure_signal, weak=False, dispatch_uid=dispatch_uid)\n _register_worker_signals(client)\n\n\ndef register_instrumentation(client):\n def begin_transaction(*args, **kwargs):\n client.begin_transaction(\"celery\")\n\n def end_transaction(task_id, task, *args, **kwargs):\n name = get_name_from_func(task)\n state = kwargs.get(\"state\", \"None\")\n if state == states.SUCCESS:\n outcome = constants.OUTCOME.SUCCESS\n elif state in states.EXCEPTION_STATES:\n outcome = constants.OUTCOME.FAILURE\n else:\n outcome = constants.OUTCOME.UNKNOWN\n elasticapm.set_transaction_outcome(outcome, override=False)\n client.end_transaction(name, state)\n\n dispatch_uid = \"elasticapm-tracing-%s\"\n\n # unregister any existing clients\n signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\")\n signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % \"postrun\")\n\n # register for this client\n signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\", weak=False)\n signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % \"postrun\")\n _register_worker_signals(client)\n\n\ndef _register_worker_signals(client):\n def worker_shutdown(*args, **kwargs):\n client.close()\n\n def connect_worker_process_init(*args, **kwargs):\n signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid=\"elasticapm-shutdown-worker\", weak=False)\n\n signals.worker_init.connect(\n connect_worker_process_init, dispatch_uid=\"elasticapm-connect-start-threads\", weak=False\n )\n", "path": "elasticapm/contrib/celery/__init__.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nfrom contextlib import suppress\n\nfrom celery import signals, states\n\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.traces import execution_context\nfrom elasticapm.utils import get_name_from_func\nfrom elasticapm.utils.disttracing import TraceParent\n\n\nclass CeleryFilter(object):\n def filter(self, record):\n if record.funcName in (\"_log_error\",):\n return 0\n else:\n return 1\n\n\ndef register_exception_tracking(client):\n dispatch_uid = \"elasticapm-exc-tracking\"\n\n def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):\n client.capture_exception(\n extra={\"task_id\": task_id, \"task\": sender, \"args\": args, \"kwargs\": kwargs}, handled=False\n )\n\n signals.task_failure.disconnect(process_failure_signal, dispatch_uid=dispatch_uid)\n signals.task_failure.connect(process_failure_signal, weak=False, dispatch_uid=dispatch_uid)\n _register_worker_signals(client)\n\n\ndef set_celery_headers(headers=None, **kwargs):\n \"\"\"\n Add elasticapm specific information to celery headers\n \"\"\"\n headers = {} if headers is None else headers\n\n transaction = execution_context.get_transaction()\n if transaction is not None:\n trace_parent = transaction.trace_parent\n trace_parent_string = trace_parent.to_string()\n\n headers.update({\"elasticapm\": {\"trace_parent_string\": trace_parent_string}})\n\n\ndef get_trace_parent(celery_task):\n \"\"\"\n Return a trace parent contained in the request headers of a Celery Task object or None\n \"\"\"\n trace_parent = None\n with suppress(AttributeError, KeyError, TypeError):\n if celery_task.request.headers is not None:\n trace_parent_string = celery_task.request.headers[\"elasticapm\"][\"trace_parent_string\"]\n trace_parent = TraceParent.from_string(trace_parent_string)\n else:\n trace_parent_string = celery_task.request.elasticapm[\"trace_parent_string\"]\n trace_parent = TraceParent.from_string(trace_parent_string)\n return trace_parent\n\n\ndef register_instrumentation(client):\n def begin_transaction(*args, **kwargs):\n task = kwargs[\"task\"]\n\n trace_parent = get_trace_parent(task)\n client.begin_transaction(\"celery\", trace_parent=trace_parent)\n\n def end_transaction(task_id, task, *args, **kwargs):\n name = get_name_from_func(task)\n state = kwargs.get(\"state\", \"None\")\n if state == states.SUCCESS:\n outcome = constants.OUTCOME.SUCCESS\n elif state in states.EXCEPTION_STATES:\n outcome = constants.OUTCOME.FAILURE\n else:\n outcome = constants.OUTCOME.UNKNOWN\n elasticapm.set_transaction_outcome(outcome, override=False)\n client.end_transaction(name, state)\n\n dispatch_uid = \"elasticapm-tracing-%s\"\n\n # unregister any existing clients\n signals.before_task_publish.disconnect(set_celery_headers, dispatch_uid=dispatch_uid % \"before-publish\")\n signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\")\n signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % \"postrun\")\n\n # register for this client\n signals.before_task_publish.connect(set_celery_headers, dispatch_uid=dispatch_uid % \"before-publish\")\n signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\", weak=False)\n signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % \"postrun\")\n _register_worker_signals(client)\n\n\ndef _register_worker_signals(client):\n def worker_shutdown(*args, **kwargs):\n client.close()\n\n def connect_worker_process_init(*args, **kwargs):\n signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid=\"elasticapm-shutdown-worker\", weak=False)\n\n signals.worker_init.connect(\n connect_worker_process_init, dispatch_uid=\"elasticapm-connect-start-threads\", weak=False\n )\n", "path": "elasticapm/contrib/celery/__init__.py"}]}
| 1,809 | 738 |
gh_patches_debug_46195
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-459
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support to replace original keys in post-transform
**Is your feature request related to a problem? Please describe.**
If the `output_postfix` is None, the post transform should use the original keys to save memory.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monai/transforms/post/dictionary.py`
Content:
```
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11 """
12 A collection of dictionary-based wrappers around the "vanilla" transforms for model output tensors
13 defined in :py:class:`monai.transforms.utility.array`.
14
15 Class names are ended with 'd' to denote dictionary-based transforms.
16 """
17
18 from monai.utils.misc import ensure_tuple_rep
19 from monai.transforms.compose import MapTransform
20 from monai.transforms.post.array import SplitChannel, Activations, AsDiscrete, KeepLargestConnectedComponent
21
22
23 class SplitChanneld(MapTransform):
24 """
25 Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`.
26 All the input specified by `keys` should be splitted into same count of data.
27
28 """
29
30 def __init__(self, keys, output_postfixes, to_onehot=False, num_classes=None):
31 """
32 Args:
33 keys (hashable items): keys of the corresponding items to be transformed.
34 See also: :py:class:`monai.transforms.compose.MapTransform`
35 output_postfixes (list, tuple): the postfixes to construct keys to store splitted data.
36 for example: if the key of input data is `pred` and split 2 classes, the output
37 data keys will be: pred_(output_postfixes[0]), pred_(output_postfixes[1])
38 to_onehot (bool or list of bool): whether to convert the data to One-Hot format, default is False.
39 num_classes (int or list of int): the class number used to convert to One-Hot format
40 if `to_onehot` is True.
41 """
42 super().__init__(keys)
43 if not isinstance(output_postfixes, (list, tuple)):
44 raise ValueError("must specify key postfixes to store splitted data.")
45 self.output_postfixes = output_postfixes
46 self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))
47 self.num_classes = ensure_tuple_rep(num_classes, len(self.keys))
48 self.splitter = SplitChannel()
49
50 def __call__(self, data):
51 d = dict(data)
52 for idx, key in enumerate(self.keys):
53 rets = self.splitter(d[key], self.to_onehot[idx], self.num_classes[idx])
54 assert len(self.output_postfixes) == len(rets), "count of splitted results must match output_postfixes."
55 for i, r in enumerate(rets):
56 d[f"{key}_{self.output_postfixes[i]}"] = r
57 return d
58
59
60 class Activationsd(MapTransform):
61 """
62 Dictionary-based wrapper of :py:class:`monai.transforms.AddActivations`.
63 Add activation layers to the input data specified by `keys`.
64 """
65
66 def __init__(self, keys, output_postfix="act", sigmoid=False, softmax=False, other=None):
67 """
68 Args:
69 keys (hashable items): keys of the corresponding items to model output and label.
70 See also: :py:class:`monai.transforms.compose.MapTransform`
71 output_postfix (str): the postfix string to construct keys to store converted data.
72 for example: if the keys of input data is `pred` and `label`, output_postfix is `act`,
73 the output data keys will be: `pred_act`, `label_act`.
74 sigmoid (bool, tuple or list of bool): whether to execute sigmoid function on model
75 output before transform.
76 softmax (bool, tuple or list of bool): whether to execute softmax function on model
77 output before transform.
78 other (Callable, tuple or list of Callables): callable function to execute other activation layers,
79 for example: `other = lambda x: torch.tanh(x)`
80 """
81 super().__init__(keys)
82 if not isinstance(output_postfix, str):
83 raise ValueError("output_postfix must be a string.")
84 self.output_postfix = output_postfix
85 self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys))
86 self.softmax = ensure_tuple_rep(softmax, len(self.keys))
87 self.other = ensure_tuple_rep(other, len(self.keys))
88 self.converter = Activations()
89
90 def __call__(self, data):
91 d = dict(data)
92 for idx, key in enumerate(self.keys):
93 ret = self.converter(d[key], self.sigmoid[idx], self.softmax[idx], self.other[idx])
94 d[f"{key}_{self.output_postfix}"] = ret
95 return d
96
97
98 class AsDiscreted(MapTransform):
99 """
100 Dictionary-based wrapper of :py:class:`monai.transforms.AsDiscrete`.
101 """
102
103 def __init__(
104 self,
105 keys,
106 output_postfix="discreted",
107 argmax=False,
108 to_onehot=False,
109 n_classes=None,
110 threshold_values=False,
111 logit_thresh=0.5,
112 ):
113 """
114 Args:
115 keys (hashable items): keys of the corresponding items to model output and label.
116 See also: :py:class:`monai.transforms.compose.MapTransform`
117 output_postfix (str): the postfix string to construct keys to store converted data.
118 for example: if the keys of input data is `pred` and `label`, output_postfix is `discreted`,
119 the output data keys will be: `pred_discreted`, `label_discreted`.
120 argmax (bool): whether to execute argmax function on input data before transform.
121 to_onehot (bool): whether to convert input data into the one-hot format. Defaults to False.
122 n_classes (bool): the number of classes to convert to One-Hot format.
123 threshold_values (bool): whether threshold the float value to int number 0 or 1, default is False.
124 logit_thresh (float): the threshold value for thresholding operation, default is 0.5.
125 """
126 super().__init__(keys)
127 if not isinstance(output_postfix, str):
128 raise ValueError("output_postfix must be a string.")
129 self.output_postfix = output_postfix
130 self.argmax = ensure_tuple_rep(argmax, len(self.keys))
131 self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))
132 self.n_classes = ensure_tuple_rep(n_classes, len(self.keys))
133 self.threshold_values = ensure_tuple_rep(threshold_values, len(self.keys))
134 self.logit_thresh = ensure_tuple_rep(logit_thresh, len(self.keys))
135 self.converter = AsDiscrete()
136
137 def __call__(self, data):
138 d = dict(data)
139 for idx, key in enumerate(self.keys):
140 d[f"{key}_{self.output_postfix}"] = self.converter(
141 d[key],
142 self.argmax[idx],
143 self.to_onehot[idx],
144 self.n_classes[idx],
145 self.threshold_values[idx],
146 self.logit_thresh[idx],
147 )
148 return d
149
150
151 class KeepLargestConnectedComponentd(MapTransform):
152 """
153 dictionary-based wrapper of :py:class:monai.transforms.utility.array.KeepLargestConnectedComponent.
154 """
155
156 def __init__(
157 self, keys, applied_values, independent=True, background=0, connectivity=None, output_postfix="largestcc",
158 ):
159 """
160 Args:
161 keys (hashable items): keys of the corresponding items to be transformed.
162 See also: :py:class:`monai.transforms.compose.MapTransform`
163 applied_values (list or tuple of int): number list for applying the connected component on.
164 The pixel whose value is not in this list will remain unchanged.
165 independent (bool): consider several labels as a whole or independent, default is `True`.
166 Example use case would be segment label 1 is liver and label 2 is liver tumor, in that case
167 you want this "independent" to be specified as False.
168 background (int): Background pixel value. The over-segmented pixels will be set as this value.
169 connectivity (int): Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.
170 Accepted values are ranging from 1 to input.ndim. If ``None``, a full
171 connectivity of ``input.ndim`` is used.
172 output_postfix (str): the postfix string to construct keys to store converted data.
173 for example: if the keys of input data is `label`, output_postfix is `largestcc`,
174 the output data keys will be: `label_largestcc`.
175 """
176 super().__init__(keys)
177 if not isinstance(output_postfix, str):
178 raise ValueError("output_postfix must be a string.")
179 self.output_postfix = output_postfix
180 self.converter = KeepLargestConnectedComponent(applied_values, independent, background, connectivity)
181
182 def __call__(self, data):
183 d = dict(data)
184 for idx, key in enumerate(self.keys):
185 d[f"{key}_{self.output_postfix}"] = self.converter(d[key])
186 return d
187
188
189 SplitChannelD = SplitChannelDict = SplitChanneld
190 ActivationsD = ActivationsDict = Activationsd
191 AsDiscreteD = AsDiscreteDict = AsDiscreted
192 KeepLargestConnectedComponentD = KeepLargestConnectedComponentDict = KeepLargestConnectedComponentd
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py
--- a/monai/transforms/post/dictionary.py
+++ b/monai/transforms/post/dictionary.py
@@ -71,6 +71,7 @@
output_postfix (str): the postfix string to construct keys to store converted data.
for example: if the keys of input data is `pred` and `label`, output_postfix is `act`,
the output data keys will be: `pred_act`, `label_act`.
+ if set to None, will replace the original data with the same key.
sigmoid (bool, tuple or list of bool): whether to execute sigmoid function on model
output before transform.
softmax (bool, tuple or list of bool): whether to execute softmax function on model
@@ -79,7 +80,7 @@
for example: `other = lambda x: torch.tanh(x)`
"""
super().__init__(keys)
- if not isinstance(output_postfix, str):
+ if output_postfix is not None and not isinstance(output_postfix, str):
raise ValueError("output_postfix must be a string.")
self.output_postfix = output_postfix
self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys))
@@ -91,7 +92,8 @@
d = dict(data)
for idx, key in enumerate(self.keys):
ret = self.converter(d[key], self.sigmoid[idx], self.softmax[idx], self.other[idx])
- d[f"{key}_{self.output_postfix}"] = ret
+ output_key = key if self.output_postfix is None else f"{key}_{self.output_postfix}"
+ d[output_key] = ret
return d
@@ -117,6 +119,7 @@
output_postfix (str): the postfix string to construct keys to store converted data.
for example: if the keys of input data is `pred` and `label`, output_postfix is `discreted`,
the output data keys will be: `pred_discreted`, `label_discreted`.
+ if set to None, will replace the original data with the same key.
argmax (bool): whether to execute argmax function on input data before transform.
to_onehot (bool): whether to convert input data into the one-hot format. Defaults to False.
n_classes (bool): the number of classes to convert to One-Hot format.
@@ -124,7 +127,7 @@
logit_thresh (float): the threshold value for thresholding operation, default is 0.5.
"""
super().__init__(keys)
- if not isinstance(output_postfix, str):
+ if output_postfix is not None and not isinstance(output_postfix, str):
raise ValueError("output_postfix must be a string.")
self.output_postfix = output_postfix
self.argmax = ensure_tuple_rep(argmax, len(self.keys))
@@ -137,7 +140,8 @@
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
- d[f"{key}_{self.output_postfix}"] = self.converter(
+ output_key = key if self.output_postfix is None else f"{key}_{self.output_postfix}"
+ d[output_key] = self.converter(
d[key],
self.argmax[idx],
self.to_onehot[idx],
@@ -172,9 +176,10 @@
output_postfix (str): the postfix string to construct keys to store converted data.
for example: if the keys of input data is `label`, output_postfix is `largestcc`,
the output data keys will be: `label_largestcc`.
+ if set to None, will replace the original data with the same key.
"""
super().__init__(keys)
- if not isinstance(output_postfix, str):
+ if output_postfix is not None and not isinstance(output_postfix, str):
raise ValueError("output_postfix must be a string.")
self.output_postfix = output_postfix
self.converter = KeepLargestConnectedComponent(applied_values, independent, background, connectivity)
@@ -182,7 +187,8 @@
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
- d[f"{key}_{self.output_postfix}"] = self.converter(d[key])
+ output_key = key if self.output_postfix is None else f"{key}_{self.output_postfix}"
+ d[output_key] = self.converter(d[key])
return d
|
{"golden_diff": "diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py\n--- a/monai/transforms/post/dictionary.py\n+++ b/monai/transforms/post/dictionary.py\n@@ -71,6 +71,7 @@\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `pred` and `label`, output_postfix is `act`,\n the output data keys will be: `pred_act`, `label_act`.\n+ if set to None, will replace the original data with the same key.\n sigmoid (bool, tuple or list of bool): whether to execute sigmoid function on model\n output before transform.\n softmax (bool, tuple or list of bool): whether to execute softmax function on model\n@@ -79,7 +80,7 @@\n for example: `other = lambda x: torch.tanh(x)`\n \"\"\"\n super().__init__(keys)\n- if not isinstance(output_postfix, str):\n+ if output_postfix is not None and not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys))\n@@ -91,7 +92,8 @@\n d = dict(data)\n for idx, key in enumerate(self.keys):\n ret = self.converter(d[key], self.sigmoid[idx], self.softmax[idx], self.other[idx])\n- d[f\"{key}_{self.output_postfix}\"] = ret\n+ output_key = key if self.output_postfix is None else f\"{key}_{self.output_postfix}\"\n+ d[output_key] = ret\n return d\n \n \n@@ -117,6 +119,7 @@\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `pred` and `label`, output_postfix is `discreted`,\n the output data keys will be: `pred_discreted`, `label_discreted`.\n+ if set to None, will replace the original data with the same key.\n argmax (bool): whether to execute argmax function on input data before transform.\n to_onehot (bool): whether to convert input data into the one-hot format. Defaults to False.\n n_classes (bool): the number of classes to convert to One-Hot format.\n@@ -124,7 +127,7 @@\n logit_thresh (float): the threshold value for thresholding operation, default is 0.5.\n \"\"\"\n super().__init__(keys)\n- if not isinstance(output_postfix, str):\n+ if output_postfix is not None and not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.argmax = ensure_tuple_rep(argmax, len(self.keys))\n@@ -137,7 +140,8 @@\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n- d[f\"{key}_{self.output_postfix}\"] = self.converter(\n+ output_key = key if self.output_postfix is None else f\"{key}_{self.output_postfix}\"\n+ d[output_key] = self.converter(\n d[key],\n self.argmax[idx],\n self.to_onehot[idx],\n@@ -172,9 +176,10 @@\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `label`, output_postfix is `largestcc`,\n the output data keys will be: `label_largestcc`.\n+ if set to None, will replace the original data with the same key.\n \"\"\"\n super().__init__(keys)\n- if not isinstance(output_postfix, str):\n+ if output_postfix is not None and not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.converter = KeepLargestConnectedComponent(applied_values, independent, background, connectivity)\n@@ -182,7 +187,8 @@\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n- d[f\"{key}_{self.output_postfix}\"] = self.converter(d[key])\n+ output_key = key if self.output_postfix is None else f\"{key}_{self.output_postfix}\"\n+ d[output_key] = self.converter(d[key])\n return d\n", "issue": "Support to replace original keys in post-transform\n**Is your feature request related to a problem? Please describe.**\r\nIf the `output_postfix` is None, the post transform should use the original keys to save memory.\r\n\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of dictionary-based wrappers around the \"vanilla\" transforms for model output tensors\ndefined in :py:class:`monai.transforms.utility.array`.\n\nClass names are ended with 'd' to denote dictionary-based transforms.\n\"\"\"\n\nfrom monai.utils.misc import ensure_tuple_rep\nfrom monai.transforms.compose import MapTransform\nfrom monai.transforms.post.array import SplitChannel, Activations, AsDiscrete, KeepLargestConnectedComponent\n\n\nclass SplitChanneld(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`.\n All the input specified by `keys` should be splitted into same count of data.\n\n \"\"\"\n\n def __init__(self, keys, output_postfixes, to_onehot=False, num_classes=None):\n \"\"\"\n Args:\n keys (hashable items): keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n output_postfixes (list, tuple): the postfixes to construct keys to store splitted data.\n for example: if the key of input data is `pred` and split 2 classes, the output\n data keys will be: pred_(output_postfixes[0]), pred_(output_postfixes[1])\n to_onehot (bool or list of bool): whether to convert the data to One-Hot format, default is False.\n num_classes (int or list of int): the class number used to convert to One-Hot format\n if `to_onehot` is True.\n \"\"\"\n super().__init__(keys)\n if not isinstance(output_postfixes, (list, tuple)):\n raise ValueError(\"must specify key postfixes to store splitted data.\")\n self.output_postfixes = output_postfixes\n self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))\n self.num_classes = ensure_tuple_rep(num_classes, len(self.keys))\n self.splitter = SplitChannel()\n\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n rets = self.splitter(d[key], self.to_onehot[idx], self.num_classes[idx])\n assert len(self.output_postfixes) == len(rets), \"count of splitted results must match output_postfixes.\"\n for i, r in enumerate(rets):\n d[f\"{key}_{self.output_postfixes[i]}\"] = r\n return d\n\n\nclass Activationsd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.AddActivations`.\n Add activation layers to the input data specified by `keys`.\n \"\"\"\n\n def __init__(self, keys, output_postfix=\"act\", sigmoid=False, softmax=False, other=None):\n \"\"\"\n Args:\n keys (hashable items): keys of the corresponding items to model output and label.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `pred` and `label`, output_postfix is `act`,\n the output data keys will be: `pred_act`, `label_act`.\n sigmoid (bool, tuple or list of bool): whether to execute sigmoid function on model\n output before transform.\n softmax (bool, tuple or list of bool): whether to execute softmax function on model\n output before transform.\n other (Callable, tuple or list of Callables): callable function to execute other activation layers,\n for example: `other = lambda x: torch.tanh(x)`\n \"\"\"\n super().__init__(keys)\n if not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys))\n self.softmax = ensure_tuple_rep(softmax, len(self.keys))\n self.other = ensure_tuple_rep(other, len(self.keys))\n self.converter = Activations()\n\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n ret = self.converter(d[key], self.sigmoid[idx], self.softmax[idx], self.other[idx])\n d[f\"{key}_{self.output_postfix}\"] = ret\n return d\n\n\nclass AsDiscreted(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.AsDiscrete`.\n \"\"\"\n\n def __init__(\n self,\n keys,\n output_postfix=\"discreted\",\n argmax=False,\n to_onehot=False,\n n_classes=None,\n threshold_values=False,\n logit_thresh=0.5,\n ):\n \"\"\"\n Args:\n keys (hashable items): keys of the corresponding items to model output and label.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `pred` and `label`, output_postfix is `discreted`,\n the output data keys will be: `pred_discreted`, `label_discreted`.\n argmax (bool): whether to execute argmax function on input data before transform.\n to_onehot (bool): whether to convert input data into the one-hot format. Defaults to False.\n n_classes (bool): the number of classes to convert to One-Hot format.\n threshold_values (bool): whether threshold the float value to int number 0 or 1, default is False.\n logit_thresh (float): the threshold value for thresholding operation, default is 0.5.\n \"\"\"\n super().__init__(keys)\n if not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.argmax = ensure_tuple_rep(argmax, len(self.keys))\n self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))\n self.n_classes = ensure_tuple_rep(n_classes, len(self.keys))\n self.threshold_values = ensure_tuple_rep(threshold_values, len(self.keys))\n self.logit_thresh = ensure_tuple_rep(logit_thresh, len(self.keys))\n self.converter = AsDiscrete()\n\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n d[f\"{key}_{self.output_postfix}\"] = self.converter(\n d[key],\n self.argmax[idx],\n self.to_onehot[idx],\n self.n_classes[idx],\n self.threshold_values[idx],\n self.logit_thresh[idx],\n )\n return d\n\n\nclass KeepLargestConnectedComponentd(MapTransform):\n \"\"\"\n dictionary-based wrapper of :py:class:monai.transforms.utility.array.KeepLargestConnectedComponent.\n \"\"\"\n\n def __init__(\n self, keys, applied_values, independent=True, background=0, connectivity=None, output_postfix=\"largestcc\",\n ):\n \"\"\"\n Args:\n keys (hashable items): keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n applied_values (list or tuple of int): number list for applying the connected component on.\n The pixel whose value is not in this list will remain unchanged.\n independent (bool): consider several labels as a whole or independent, default is `True`.\n Example use case would be segment label 1 is liver and label 2 is liver tumor, in that case\n you want this \"independent\" to be specified as False.\n background (int): Background pixel value. The over-segmented pixels will be set as this value.\n connectivity (int): Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.\n Accepted values are ranging from 1 to input.ndim. If ``None``, a full\n connectivity of ``input.ndim`` is used.\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `label`, output_postfix is `largestcc`,\n the output data keys will be: `label_largestcc`.\n \"\"\"\n super().__init__(keys)\n if not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.converter = KeepLargestConnectedComponent(applied_values, independent, background, connectivity)\n\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n d[f\"{key}_{self.output_postfix}\"] = self.converter(d[key])\n return d\n\n\nSplitChannelD = SplitChannelDict = SplitChanneld\nActivationsD = ActivationsDict = Activationsd\nAsDiscreteD = AsDiscreteDict = AsDiscreted\nKeepLargestConnectedComponentD = KeepLargestConnectedComponentDict = KeepLargestConnectedComponentd\n", "path": "monai/transforms/post/dictionary.py"}], "after_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of dictionary-based wrappers around the \"vanilla\" transforms for model output tensors\ndefined in :py:class:`monai.transforms.utility.array`.\n\nClass names are ended with 'd' to denote dictionary-based transforms.\n\"\"\"\n\nfrom monai.utils.misc import ensure_tuple_rep\nfrom monai.transforms.compose import MapTransform\nfrom monai.transforms.post.array import SplitChannel, Activations, AsDiscrete, KeepLargestConnectedComponent\n\n\nclass SplitChanneld(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`.\n All the input specified by `keys` should be splitted into same count of data.\n\n \"\"\"\n\n def __init__(self, keys, output_postfixes, to_onehot=False, num_classes=None):\n \"\"\"\n Args:\n keys (hashable items): keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n output_postfixes (list, tuple): the postfixes to construct keys to store splitted data.\n for example: if the key of input data is `pred` and split 2 classes, the output\n data keys will be: pred_(output_postfixes[0]), pred_(output_postfixes[1])\n to_onehot (bool or list of bool): whether to convert the data to One-Hot format, default is False.\n num_classes (int or list of int): the class number used to convert to One-Hot format\n if `to_onehot` is True.\n \"\"\"\n super().__init__(keys)\n if not isinstance(output_postfixes, (list, tuple)):\n raise ValueError(\"must specify key postfixes to store splitted data.\")\n self.output_postfixes = output_postfixes\n self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))\n self.num_classes = ensure_tuple_rep(num_classes, len(self.keys))\n self.splitter = SplitChannel()\n\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n rets = self.splitter(d[key], self.to_onehot[idx], self.num_classes[idx])\n assert len(self.output_postfixes) == len(rets), \"count of splitted results must match output_postfixes.\"\n for i, r in enumerate(rets):\n d[f\"{key}_{self.output_postfixes[i]}\"] = r\n return d\n\n\nclass Activationsd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.AddActivations`.\n Add activation layers to the input data specified by `keys`.\n \"\"\"\n\n def __init__(self, keys, output_postfix=\"act\", sigmoid=False, softmax=False, other=None):\n \"\"\"\n Args:\n keys (hashable items): keys of the corresponding items to model output and label.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `pred` and `label`, output_postfix is `act`,\n the output data keys will be: `pred_act`, `label_act`.\n if set to None, will replace the original data with the same key.\n sigmoid (bool, tuple or list of bool): whether to execute sigmoid function on model\n output before transform.\n softmax (bool, tuple or list of bool): whether to execute softmax function on model\n output before transform.\n other (Callable, tuple or list of Callables): callable function to execute other activation layers,\n for example: `other = lambda x: torch.tanh(x)`\n \"\"\"\n super().__init__(keys)\n if output_postfix is not None and not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys))\n self.softmax = ensure_tuple_rep(softmax, len(self.keys))\n self.other = ensure_tuple_rep(other, len(self.keys))\n self.converter = Activations()\n\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n ret = self.converter(d[key], self.sigmoid[idx], self.softmax[idx], self.other[idx])\n output_key = key if self.output_postfix is None else f\"{key}_{self.output_postfix}\"\n d[output_key] = ret\n return d\n\n\nclass AsDiscreted(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.AsDiscrete`.\n \"\"\"\n\n def __init__(\n self,\n keys,\n output_postfix=\"discreted\",\n argmax=False,\n to_onehot=False,\n n_classes=None,\n threshold_values=False,\n logit_thresh=0.5,\n ):\n \"\"\"\n Args:\n keys (hashable items): keys of the corresponding items to model output and label.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `pred` and `label`, output_postfix is `discreted`,\n the output data keys will be: `pred_discreted`, `label_discreted`.\n if set to None, will replace the original data with the same key.\n argmax (bool): whether to execute argmax function on input data before transform.\n to_onehot (bool): whether to convert input data into the one-hot format. Defaults to False.\n n_classes (bool): the number of classes to convert to One-Hot format.\n threshold_values (bool): whether threshold the float value to int number 0 or 1, default is False.\n logit_thresh (float): the threshold value for thresholding operation, default is 0.5.\n \"\"\"\n super().__init__(keys)\n if output_postfix is not None and not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.argmax = ensure_tuple_rep(argmax, len(self.keys))\n self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))\n self.n_classes = ensure_tuple_rep(n_classes, len(self.keys))\n self.threshold_values = ensure_tuple_rep(threshold_values, len(self.keys))\n self.logit_thresh = ensure_tuple_rep(logit_thresh, len(self.keys))\n self.converter = AsDiscrete()\n\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n output_key = key if self.output_postfix is None else f\"{key}_{self.output_postfix}\"\n d[output_key] = self.converter(\n d[key],\n self.argmax[idx],\n self.to_onehot[idx],\n self.n_classes[idx],\n self.threshold_values[idx],\n self.logit_thresh[idx],\n )\n return d\n\n\nclass KeepLargestConnectedComponentd(MapTransform):\n \"\"\"\n dictionary-based wrapper of :py:class:monai.transforms.utility.array.KeepLargestConnectedComponent.\n \"\"\"\n\n def __init__(\n self, keys, applied_values, independent=True, background=0, connectivity=None, output_postfix=\"largestcc\",\n ):\n \"\"\"\n Args:\n keys (hashable items): keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n applied_values (list or tuple of int): number list for applying the connected component on.\n The pixel whose value is not in this list will remain unchanged.\n independent (bool): consider several labels as a whole or independent, default is `True`.\n Example use case would be segment label 1 is liver and label 2 is liver tumor, in that case\n you want this \"independent\" to be specified as False.\n background (int): Background pixel value. The over-segmented pixels will be set as this value.\n connectivity (int): Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.\n Accepted values are ranging from 1 to input.ndim. If ``None``, a full\n connectivity of ``input.ndim`` is used.\n output_postfix (str): the postfix string to construct keys to store converted data.\n for example: if the keys of input data is `label`, output_postfix is `largestcc`,\n the output data keys will be: `label_largestcc`.\n if set to None, will replace the original data with the same key.\n \"\"\"\n super().__init__(keys)\n if output_postfix is not None and not isinstance(output_postfix, str):\n raise ValueError(\"output_postfix must be a string.\")\n self.output_postfix = output_postfix\n self.converter = KeepLargestConnectedComponent(applied_values, independent, background, connectivity)\n\n def __call__(self, data):\n d = dict(data)\n for idx, key in enumerate(self.keys):\n output_key = key if self.output_postfix is None else f\"{key}_{self.output_postfix}\"\n d[output_key] = self.converter(d[key])\n return d\n\n\nSplitChannelD = SplitChannelDict = SplitChanneld\nActivationsD = ActivationsDict = Activationsd\nAsDiscreteD = AsDiscreteDict = AsDiscreted\nKeepLargestConnectedComponentD = KeepLargestConnectedComponentDict = KeepLargestConnectedComponentd\n", "path": "monai/transforms/post/dictionary.py"}]}
| 2,823 | 1,016 |
gh_patches_debug_22130
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-6596
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature: support glob patterns for paths in file bundle plugin `update` extra
### Feature scope
CLI (options, error messages, logging, etc.)
### Description
### Overview
File bundle plugins can specify a number of files to update with the `update` extra, when `meltano upgrade files` is run.
`meltano.yml`
```yml
version: 1
default_environment: dev
environments:
- name: dev
- name: staging
- name: prod
project_id: fefc3baf-ebb0-4f68-87d1-fe5b3afbe6e8
plugins:
files:
- name: files-dbt
pip_url: git+https://github.com/meltano/files-dbt
update:
transform/models/.gitkeep: true
transform/profile/profiles.yml: true
transform/.gitignore: true
transform/dbt_project.yml: true
```
Currently, each file than can be updated by `meltano upgrade files` must have its path declared under `update` individually. This can lead to bloated `meltano.yml` file bundle definitions that specify many files within common directories as upgradable.
### Proposal
Support glob patterns to reduce the number of paths needed to specify all files required for upgrade.
All bundle files:
```yml
update:
'**/*': true
```
All bundle `.yml` files:
```yml
update:
'**/*.yml': true
```
All bundle `.yml` files under the `transform` directory:
```yml
update:
'transform/**/*.yml': true
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/plugin/file.py`
Content:
```
1 """Meltano file plugin type."""
2
3 from __future__ import annotations
4
5 from typing import TYPE_CHECKING
6
7 import structlog
8
9 from meltano.core.behavior.hookable import hook
10 from meltano.core.plugin import BasePlugin, PluginType
11 from meltano.core.plugin.project_plugin import ProjectPlugin
12 from meltano.core.plugin.settings_service import PluginSettingsService
13 from meltano.core.plugin_install_service import (
14 PluginInstallReason,
15 PluginInstallService,
16 )
17 from meltano.core.setting_definition import SettingDefinition, SettingKind
18 from meltano.core.venv_service import VirtualEnv
19
20 if TYPE_CHECKING:
21 from os import PathLike
22 from pathlib import Path
23
24 from meltano.core.project import Project
25
26
27 logger = structlog.getLogger(__name__)
28
29
30 class FilePlugin(BasePlugin):
31 """Meltano file plugin type."""
32
33 __plugin_type__ = PluginType.FILES
34
35 EXTRA_SETTINGS = [
36 SettingDefinition(
37 name="_update", kind=SettingKind.OBJECT, aliases=["update"], value={}
38 )
39 ]
40
41 def is_invokable(self) -> bool:
42 """Return whether the plugin is invokable.
43
44 Returns:
45 True if the plugin is invokable, False otherwise.
46 """
47 return False
48
49 def should_add_to_file(self) -> bool:
50 """Return whether the plugin should be added to `meltano.yml`.
51
52 Returns:
53 True if the plugin should be added to `meltano.yml`, False otherwise.
54 """
55 return len(self.extras.get("update", [])) > 0
56
57 def file_contents(self, project: Project) -> dict[Path, str]:
58 """Return the contents of the files to be created or updated.
59
60 Args:
61 project: The Meltano project.
62
63 Returns:
64 A dictionary of file names and their contents.
65 """
66 venv = VirtualEnv(project.plugin_dir(self, "venv"))
67 bundle_dir = venv.site_packages_dir.joinpath("bundle")
68
69 return {
70 path.relative_to(bundle_dir): path.read_text()
71 for path in bundle_dir.glob("**/*")
72 if path.is_file()
73 and "__pycache__" not in path.parts
74 and path != bundle_dir.joinpath("__init__.py")
75 }
76
77 def update_file_header(self, relative_path: PathLike) -> str:
78 """Return the header to be added to the top of the file.
79
80 Args:
81 relative_path: The relative path of the file.
82
83 Returns:
84 The header to be added to the top of the file.
85 """
86 return "\n".join(
87 (
88 f"# This file is managed by the '{self.name}' {self.type.descriptor} and updated automatically when `meltano upgrade` is run.",
89 f"# To prevent any manual changes from being overwritten, remove the {self.type.descriptor} from `meltano.yml` or disable automatic updates:",
90 f"# meltano config --plugin-type={self.type} {self.name} set _update {relative_path} false",
91 )
92 )
93
94 def project_file_contents(
95 self,
96 project: Project,
97 paths_to_update: list[str],
98 ) -> dict[Path, str]:
99 """Return the contents of the files to be created or updated in the project.
100
101 Args:
102 project: The Meltano project.
103 paths_to_update: The paths of the files to be updated.
104
105 Returns:
106 A dictionary of file names and their contents.
107 """
108
109 def with_update_header(content: str, relative_path: PathLike):
110 if str(relative_path) in paths_to_update:
111 content = "\n\n".join([self.update_file_header(relative_path), content])
112
113 return content
114
115 return {
116 relative_path: with_update_header(content, relative_path)
117 for relative_path, content in self.file_contents(project).items()
118 }
119
120 def write_file(
121 self,
122 project: Project,
123 relative_path: PathLike,
124 content: str,
125 ) -> bool:
126 """Write the file to the project.
127
128 Args:
129 project: The Meltano project.
130 relative_path: The relative path of the file.
131 content: The contents of the file.
132
133 Returns:
134 True if the file was written, False otherwise.
135 """
136 project_path = project.root_dir(relative_path)
137 if project_path.exists() and project_path.read_text() == content:
138 return False
139
140 project_path.parent.mkdir(parents=True, exist_ok=True)
141 project_path.write_text(content)
142
143 return True
144
145 def write_files(
146 self,
147 project: Project,
148 files_content: dict[Path, str],
149 ) -> list[Path]:
150 """Write the files to the project.
151
152 Args:
153 project: The Meltano project.
154 files_content: A dictionary of file names and their contents.
155
156 Returns:
157 A list of the paths of the files that were written.
158 """
159 return [
160 relative_path
161 for relative_path, content in files_content.items()
162 if self.write_file(project, relative_path, content)
163 ]
164
165 def files_to_create(
166 self,
167 project: Project,
168 paths_to_update: list[str],
169 ) -> dict[Path, str]:
170 """Return the contents of the files to be created in the project.
171
172 Args:
173 project: The Meltano project.
174 paths_to_update: The paths of the files to be updated.
175
176 Returns:
177 A dictionary of file names and their contents.
178 """
179
180 def rename_if_exists(relative_path: Path):
181 if not project.root_dir(relative_path).exists():
182 return relative_path
183
184 logger.info(
185 f"File {str(relative_path)!r} already exists, keeping both versions"
186 )
187 return relative_path.with_name(
188 f"{relative_path.stem} ({self.name}){relative_path.suffix}"
189 )
190
191 return {
192 rename_if_exists(relative_path): content
193 for relative_path, content in self.project_file_contents(
194 project, paths_to_update
195 ).items()
196 }
197
198 def files_to_update(
199 self,
200 project: Project,
201 paths_to_update: list[str],
202 ) -> dict[Path, str]:
203 """Return the contents of the files to be updated in the project.
204
205 Args:
206 project: The Meltano project.
207 paths_to_update: The paths of the files to be updated.
208
209 Returns:
210 A dictionary of file names and their contents.
211 """
212 return {
213 relative_path: content
214 for relative_path, content in self.project_file_contents(
215 project, paths_to_update
216 ).items()
217 if str(relative_path) in paths_to_update
218 }
219
220 def create_files(
221 self,
222 project: Project,
223 paths_to_update: list[str] | None = None,
224 ) -> list[Path]:
225 """Create the files in the project.
226
227 Args:
228 project: The Meltano project.
229 paths_to_update: Optional paths of the files to be updated.
230
231 Returns:
232 A list of the paths of the files that were created.
233 """
234 return self.write_files(
235 project,
236 self.files_to_create(
237 project, [] if paths_to_update is None else paths_to_update
238 ),
239 )
240
241 def update_files(
242 self,
243 project: Project,
244 paths_to_update: list[str] | None = None,
245 ) -> list[Path]:
246 """Update the files in the project.
247
248 Args:
249 project: The Meltano project.
250 paths_to_update: Optional paths of the files to be updated.
251
252 Returns:
253 A list of the paths of the files that were updated.
254 """
255 return self.write_files(
256 project,
257 self.files_to_update(
258 project, [] if paths_to_update is None else paths_to_update
259 ),
260 )
261
262 @hook("after_install")
263 async def after_install(
264 self,
265 installer: PluginInstallService,
266 plugin: ProjectPlugin,
267 reason: PluginInstallReason,
268 ):
269 """Trigger after install tasks.
270
271 Args:
272 installer: The plugin installer.
273 plugin: The installed plugin.
274 reason: The reason for the installation.
275 """
276 project = installer.project
277 plugins_service = installer.plugins_service
278
279 plugin_settings_service = PluginSettingsService(
280 project, plugin, plugins_service=plugins_service
281 )
282 update_config = plugin_settings_service.get("_update")
283 paths_to_update = [
284 path for path, to_update in update_config.items() if to_update
285 ]
286
287 if reason is PluginInstallReason.ADD:
288 logger.info(f"Adding '{plugin.name}' files to project...")
289
290 for path in self.create_files(project, paths_to_update):
291 logger.info(f"Created {path}")
292 elif reason is PluginInstallReason.UPGRADE:
293 logger.info(f"Updating '{plugin.name}' files in project...")
294
295 updated_paths = self.update_files(project, paths_to_update)
296 if not updated_paths:
297 logger.info("Nothing to update")
298 return
299
300 for path in updated_paths:
301 logger.info(f"Updated {path}")
302 else:
303 logger.info(
304 f"Run `meltano upgrade files` to update your project's '{plugin.name}' files."
305 )
306
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/meltano/core/plugin/file.py b/src/meltano/core/plugin/file.py
--- a/src/meltano/core/plugin/file.py
+++ b/src/meltano/core/plugin/file.py
@@ -107,7 +107,7 @@
"""
def with_update_header(content: str, relative_path: PathLike):
- if str(relative_path) in paths_to_update:
+ if any(relative_path.match(path) for path in paths_to_update):
content = "\n\n".join([self.update_file_header(relative_path), content])
return content
@@ -209,12 +209,11 @@
Returns:
A dictionary of file names and their contents.
"""
+ file_contents = self.project_file_contents(project, paths_to_update)
return {
relative_path: content
- for relative_path, content in self.project_file_contents(
- project, paths_to_update
- ).items()
- if str(relative_path) in paths_to_update
+ for relative_path, content in file_contents.items()
+ if any(relative_path.match(path) for path in paths_to_update)
}
def create_files(
|
{"golden_diff": "diff --git a/src/meltano/core/plugin/file.py b/src/meltano/core/plugin/file.py\n--- a/src/meltano/core/plugin/file.py\n+++ b/src/meltano/core/plugin/file.py\n@@ -107,7 +107,7 @@\n \"\"\"\n \n def with_update_header(content: str, relative_path: PathLike):\n- if str(relative_path) in paths_to_update:\n+ if any(relative_path.match(path) for path in paths_to_update):\n content = \"\\n\\n\".join([self.update_file_header(relative_path), content])\n \n return content\n@@ -209,12 +209,11 @@\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n+ file_contents = self.project_file_contents(project, paths_to_update)\n return {\n relative_path: content\n- for relative_path, content in self.project_file_contents(\n- project, paths_to_update\n- ).items()\n- if str(relative_path) in paths_to_update\n+ for relative_path, content in file_contents.items()\n+ if any(relative_path.match(path) for path in paths_to_update)\n }\n \n def create_files(\n", "issue": "feature: support glob patterns for paths in file bundle plugin `update` extra\n### Feature scope\n\nCLI (options, error messages, logging, etc.)\n\n### Description\n\n### Overview\r\nFile bundle plugins can specify a number of files to update with the `update` extra, when `meltano upgrade files` is run.\r\n\r\n`meltano.yml`\r\n```yml\r\nversion: 1\r\ndefault_environment: dev\r\nenvironments:\r\n- name: dev\r\n- name: staging\r\n- name: prod\r\nproject_id: fefc3baf-ebb0-4f68-87d1-fe5b3afbe6e8\r\nplugins:\r\n files:\r\n - name: files-dbt\r\n pip_url: git+https://github.com/meltano/files-dbt\r\n update:\r\n transform/models/.gitkeep: true\r\n transform/profile/profiles.yml: true\r\n transform/.gitignore: true\r\n transform/dbt_project.yml: true\r\n```\r\n\r\nCurrently, each file than can be updated by `meltano upgrade files` must have its path declared under `update` individually. This can lead to bloated `meltano.yml` file bundle definitions that specify many files within common directories as upgradable.\r\n\r\n### Proposal\r\nSupport glob patterns to reduce the number of paths needed to specify all files required for upgrade.\r\n\r\nAll bundle files:\r\n```yml\r\n update:\r\n '**/*': true\r\n```\r\n\r\nAll bundle `.yml` files:\r\n```yml\r\n update:\r\n '**/*.yml': true\r\n```\r\n\r\nAll bundle `.yml` files under the `transform` directory:\r\n```yml\r\n update:\r\n 'transform/**/*.yml': true\r\n```\n", "before_files": [{"content": "\"\"\"Meltano file plugin type.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport structlog\n\nfrom meltano.core.behavior.hookable import hook\nfrom meltano.core.plugin import BasePlugin, PluginType\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.plugin.settings_service import PluginSettingsService\nfrom meltano.core.plugin_install_service import (\n PluginInstallReason,\n PluginInstallService,\n)\nfrom meltano.core.setting_definition import SettingDefinition, SettingKind\nfrom meltano.core.venv_service import VirtualEnv\n\nif TYPE_CHECKING:\n from os import PathLike\n from pathlib import Path\n\n from meltano.core.project import Project\n\n\nlogger = structlog.getLogger(__name__)\n\n\nclass FilePlugin(BasePlugin):\n \"\"\"Meltano file plugin type.\"\"\"\n\n __plugin_type__ = PluginType.FILES\n\n EXTRA_SETTINGS = [\n SettingDefinition(\n name=\"_update\", kind=SettingKind.OBJECT, aliases=[\"update\"], value={}\n )\n ]\n\n def is_invokable(self) -> bool:\n \"\"\"Return whether the plugin is invokable.\n\n Returns:\n True if the plugin is invokable, False otherwise.\n \"\"\"\n return False\n\n def should_add_to_file(self) -> bool:\n \"\"\"Return whether the plugin should be added to `meltano.yml`.\n\n Returns:\n True if the plugin should be added to `meltano.yml`, False otherwise.\n \"\"\"\n return len(self.extras.get(\"update\", [])) > 0\n\n def file_contents(self, project: Project) -> dict[Path, str]:\n \"\"\"Return the contents of the files to be created or updated.\n\n Args:\n project: The Meltano project.\n\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n venv = VirtualEnv(project.plugin_dir(self, \"venv\"))\n bundle_dir = venv.site_packages_dir.joinpath(\"bundle\")\n\n return {\n path.relative_to(bundle_dir): path.read_text()\n for path in bundle_dir.glob(\"**/*\")\n if path.is_file()\n and \"__pycache__\" not in path.parts\n and path != bundle_dir.joinpath(\"__init__.py\")\n }\n\n def update_file_header(self, relative_path: PathLike) -> str:\n \"\"\"Return the header to be added to the top of the file.\n\n Args:\n relative_path: The relative path of the file.\n\n Returns:\n The header to be added to the top of the file.\n \"\"\"\n return \"\\n\".join(\n (\n f\"# This file is managed by the '{self.name}' {self.type.descriptor} and updated automatically when `meltano upgrade` is run.\",\n f\"# To prevent any manual changes from being overwritten, remove the {self.type.descriptor} from `meltano.yml` or disable automatic updates:\",\n f\"# meltano config --plugin-type={self.type} {self.name} set _update {relative_path} false\",\n )\n )\n\n def project_file_contents(\n self,\n project: Project,\n paths_to_update: list[str],\n ) -> dict[Path, str]:\n \"\"\"Return the contents of the files to be created or updated in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: The paths of the files to be updated.\n\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n\n def with_update_header(content: str, relative_path: PathLike):\n if str(relative_path) in paths_to_update:\n content = \"\\n\\n\".join([self.update_file_header(relative_path), content])\n\n return content\n\n return {\n relative_path: with_update_header(content, relative_path)\n for relative_path, content in self.file_contents(project).items()\n }\n\n def write_file(\n self,\n project: Project,\n relative_path: PathLike,\n content: str,\n ) -> bool:\n \"\"\"Write the file to the project.\n\n Args:\n project: The Meltano project.\n relative_path: The relative path of the file.\n content: The contents of the file.\n\n Returns:\n True if the file was written, False otherwise.\n \"\"\"\n project_path = project.root_dir(relative_path)\n if project_path.exists() and project_path.read_text() == content:\n return False\n\n project_path.parent.mkdir(parents=True, exist_ok=True)\n project_path.write_text(content)\n\n return True\n\n def write_files(\n self,\n project: Project,\n files_content: dict[Path, str],\n ) -> list[Path]:\n \"\"\"Write the files to the project.\n\n Args:\n project: The Meltano project.\n files_content: A dictionary of file names and their contents.\n\n Returns:\n A list of the paths of the files that were written.\n \"\"\"\n return [\n relative_path\n for relative_path, content in files_content.items()\n if self.write_file(project, relative_path, content)\n ]\n\n def files_to_create(\n self,\n project: Project,\n paths_to_update: list[str],\n ) -> dict[Path, str]:\n \"\"\"Return the contents of the files to be created in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: The paths of the files to be updated.\n\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n\n def rename_if_exists(relative_path: Path):\n if not project.root_dir(relative_path).exists():\n return relative_path\n\n logger.info(\n f\"File {str(relative_path)!r} already exists, keeping both versions\"\n )\n return relative_path.with_name(\n f\"{relative_path.stem} ({self.name}){relative_path.suffix}\"\n )\n\n return {\n rename_if_exists(relative_path): content\n for relative_path, content in self.project_file_contents(\n project, paths_to_update\n ).items()\n }\n\n def files_to_update(\n self,\n project: Project,\n paths_to_update: list[str],\n ) -> dict[Path, str]:\n \"\"\"Return the contents of the files to be updated in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: The paths of the files to be updated.\n\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n return {\n relative_path: content\n for relative_path, content in self.project_file_contents(\n project, paths_to_update\n ).items()\n if str(relative_path) in paths_to_update\n }\n\n def create_files(\n self,\n project: Project,\n paths_to_update: list[str] | None = None,\n ) -> list[Path]:\n \"\"\"Create the files in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: Optional paths of the files to be updated.\n\n Returns:\n A list of the paths of the files that were created.\n \"\"\"\n return self.write_files(\n project,\n self.files_to_create(\n project, [] if paths_to_update is None else paths_to_update\n ),\n )\n\n def update_files(\n self,\n project: Project,\n paths_to_update: list[str] | None = None,\n ) -> list[Path]:\n \"\"\"Update the files in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: Optional paths of the files to be updated.\n\n Returns:\n A list of the paths of the files that were updated.\n \"\"\"\n return self.write_files(\n project,\n self.files_to_update(\n project, [] if paths_to_update is None else paths_to_update\n ),\n )\n\n @hook(\"after_install\")\n async def after_install(\n self,\n installer: PluginInstallService,\n plugin: ProjectPlugin,\n reason: PluginInstallReason,\n ):\n \"\"\"Trigger after install tasks.\n\n Args:\n installer: The plugin installer.\n plugin: The installed plugin.\n reason: The reason for the installation.\n \"\"\"\n project = installer.project\n plugins_service = installer.plugins_service\n\n plugin_settings_service = PluginSettingsService(\n project, plugin, plugins_service=plugins_service\n )\n update_config = plugin_settings_service.get(\"_update\")\n paths_to_update = [\n path for path, to_update in update_config.items() if to_update\n ]\n\n if reason is PluginInstallReason.ADD:\n logger.info(f\"Adding '{plugin.name}' files to project...\")\n\n for path in self.create_files(project, paths_to_update):\n logger.info(f\"Created {path}\")\n elif reason is PluginInstallReason.UPGRADE:\n logger.info(f\"Updating '{plugin.name}' files in project...\")\n\n updated_paths = self.update_files(project, paths_to_update)\n if not updated_paths:\n logger.info(\"Nothing to update\")\n return\n\n for path in updated_paths:\n logger.info(f\"Updated {path}\")\n else:\n logger.info(\n f\"Run `meltano upgrade files` to update your project's '{plugin.name}' files.\"\n )\n", "path": "src/meltano/core/plugin/file.py"}], "after_files": [{"content": "\"\"\"Meltano file plugin type.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport structlog\n\nfrom meltano.core.behavior.hookable import hook\nfrom meltano.core.plugin import BasePlugin, PluginType\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.plugin.settings_service import PluginSettingsService\nfrom meltano.core.plugin_install_service import (\n PluginInstallReason,\n PluginInstallService,\n)\nfrom meltano.core.setting_definition import SettingDefinition, SettingKind\nfrom meltano.core.venv_service import VirtualEnv\n\nif TYPE_CHECKING:\n from os import PathLike\n from pathlib import Path\n\n from meltano.core.project import Project\n\n\nlogger = structlog.getLogger(__name__)\n\n\nclass FilePlugin(BasePlugin):\n \"\"\"Meltano file plugin type.\"\"\"\n\n __plugin_type__ = PluginType.FILES\n\n EXTRA_SETTINGS = [\n SettingDefinition(\n name=\"_update\", kind=SettingKind.OBJECT, aliases=[\"update\"], value={}\n )\n ]\n\n def is_invokable(self) -> bool:\n \"\"\"Return whether the plugin is invokable.\n\n Returns:\n True if the plugin is invokable, False otherwise.\n \"\"\"\n return False\n\n def should_add_to_file(self) -> bool:\n \"\"\"Return whether the plugin should be added to `meltano.yml`.\n\n Returns:\n True if the plugin should be added to `meltano.yml`, False otherwise.\n \"\"\"\n return len(self.extras.get(\"update\", [])) > 0\n\n def file_contents(self, project: Project) -> dict[Path, str]:\n \"\"\"Return the contents of the files to be created or updated.\n\n Args:\n project: The Meltano project.\n\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n venv = VirtualEnv(project.plugin_dir(self, \"venv\"))\n bundle_dir = venv.site_packages_dir.joinpath(\"bundle\")\n\n return {\n path.relative_to(bundle_dir): path.read_text()\n for path in bundle_dir.glob(\"**/*\")\n if path.is_file()\n and \"__pycache__\" not in path.parts\n and path != bundle_dir.joinpath(\"__init__.py\")\n }\n\n def update_file_header(self, relative_path: PathLike) -> str:\n \"\"\"Return the header to be added to the top of the file.\n\n Args:\n relative_path: The relative path of the file.\n\n Returns:\n The header to be added to the top of the file.\n \"\"\"\n return \"\\n\".join(\n (\n f\"# This file is managed by the '{self.name}' {self.type.descriptor} and updated automatically when `meltano upgrade` is run.\",\n f\"# To prevent any manual changes from being overwritten, remove the {self.type.descriptor} from `meltano.yml` or disable automatic updates:\",\n f\"# meltano config --plugin-type={self.type} {self.name} set _update {relative_path} false\",\n )\n )\n\n def project_file_contents(\n self,\n project: Project,\n paths_to_update: list[str],\n ) -> dict[Path, str]:\n \"\"\"Return the contents of the files to be created or updated in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: The paths of the files to be updated.\n\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n\n def with_update_header(content: str, relative_path: PathLike):\n if any(relative_path.match(path) for path in paths_to_update):\n content = \"\\n\\n\".join([self.update_file_header(relative_path), content])\n\n return content\n\n return {\n relative_path: with_update_header(content, relative_path)\n for relative_path, content in self.file_contents(project).items()\n }\n\n def write_file(\n self,\n project: Project,\n relative_path: PathLike,\n content: str,\n ) -> bool:\n \"\"\"Write the file to the project.\n\n Args:\n project: The Meltano project.\n relative_path: The relative path of the file.\n content: The contents of the file.\n\n Returns:\n True if the file was written, False otherwise.\n \"\"\"\n project_path = project.root_dir(relative_path)\n if project_path.exists() and project_path.read_text() == content:\n return False\n\n project_path.parent.mkdir(parents=True, exist_ok=True)\n project_path.write_text(content)\n\n return True\n\n def write_files(\n self,\n project: Project,\n files_content: dict[Path, str],\n ) -> list[Path]:\n \"\"\"Write the files to the project.\n\n Args:\n project: The Meltano project.\n files_content: A dictionary of file names and their contents.\n\n Returns:\n A list of the paths of the files that were written.\n \"\"\"\n return [\n relative_path\n for relative_path, content in files_content.items()\n if self.write_file(project, relative_path, content)\n ]\n\n def files_to_create(\n self,\n project: Project,\n paths_to_update: list[str],\n ) -> dict[Path, str]:\n \"\"\"Return the contents of the files to be created in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: The paths of the files to be updated.\n\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n\n def rename_if_exists(relative_path: Path):\n if not project.root_dir(relative_path).exists():\n return relative_path\n\n logger.info(\n f\"File {str(relative_path)!r} already exists, keeping both versions\"\n )\n return relative_path.with_name(\n f\"{relative_path.stem} ({self.name}){relative_path.suffix}\"\n )\n\n return {\n rename_if_exists(relative_path): content\n for relative_path, content in self.project_file_contents(\n project, paths_to_update\n ).items()\n }\n\n def files_to_update(\n self,\n project: Project,\n paths_to_update: list[str],\n ) -> dict[Path, str]:\n \"\"\"Return the contents of the files to be updated in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: The paths of the files to be updated.\n\n Returns:\n A dictionary of file names and their contents.\n \"\"\"\n file_contents = self.project_file_contents(project, paths_to_update)\n return {\n relative_path: content\n for relative_path, content in file_contents.items()\n if any(relative_path.match(path) for path in paths_to_update)\n }\n\n def create_files(\n self,\n project: Project,\n paths_to_update: list[str] | None = None,\n ) -> list[Path]:\n \"\"\"Create the files in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: Optional paths of the files to be updated.\n\n Returns:\n A list of the paths of the files that were created.\n \"\"\"\n return self.write_files(\n project,\n self.files_to_create(\n project, [] if paths_to_update is None else paths_to_update\n ),\n )\n\n def update_files(\n self,\n project: Project,\n paths_to_update: list[str] | None = None,\n ) -> list[Path]:\n \"\"\"Update the files in the project.\n\n Args:\n project: The Meltano project.\n paths_to_update: Optional paths of the files to be updated.\n\n Returns:\n A list of the paths of the files that were updated.\n \"\"\"\n return self.write_files(\n project,\n self.files_to_update(\n project, [] if paths_to_update is None else paths_to_update\n ),\n )\n\n @hook(\"after_install\")\n async def after_install(\n self,\n installer: PluginInstallService,\n plugin: ProjectPlugin,\n reason: PluginInstallReason,\n ):\n \"\"\"Trigger after install tasks.\n\n Args:\n installer: The plugin installer.\n plugin: The installed plugin.\n reason: The reason for the installation.\n \"\"\"\n project = installer.project\n plugins_service = installer.plugins_service\n\n plugin_settings_service = PluginSettingsService(\n project, plugin, plugins_service=plugins_service\n )\n update_config = plugin_settings_service.get(\"_update\")\n paths_to_update = [\n path for path, to_update in update_config.items() if to_update\n ]\n\n if reason is PluginInstallReason.ADD:\n logger.info(f\"Adding '{plugin.name}' files to project...\")\n\n for path in self.create_files(project, paths_to_update):\n logger.info(f\"Created {path}\")\n elif reason is PluginInstallReason.UPGRADE:\n logger.info(f\"Updating '{plugin.name}' files in project...\")\n\n updated_paths = self.update_files(project, paths_to_update)\n if not updated_paths:\n logger.info(\"Nothing to update\")\n return\n\n for path in updated_paths:\n logger.info(f\"Updated {path}\")\n else:\n logger.info(\n f\"Run `meltano upgrade files` to update your project's '{plugin.name}' files.\"\n )\n", "path": "src/meltano/core/plugin/file.py"}]}
| 3,395 | 256 |
gh_patches_debug_66285
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Poetry run: ModuleOrPackageNotFound with implicit namespace packages (PEP420)
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Arch Linux 4.18.16
- **Poetry version**: 0.12.5
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/Kazy/692963f6a41c64d38f38ac9a3f95619a
## Issue
I'm using implicit namespace packages to organize the packages at work, which works well by specifying the `packages` value in the `pyproject.toml` like that:
```toml
packages = [
{ include = "org" }
]
```
The file structure is like that:
```
├── org
│ └── subpackage
│ ├── __init__.py
│ └── command
│ └── cli.py
└── pyproject.toml
```
The issue is when you try to do `poetry run my-command`, you get:
```
[ModuleOrPackageNotFound]
No file/folder found for package org-subpackage-command
```
I already found how to fix it and will follow with a PR, but I wanted to create the issue in case my fix isn't the right one, and to make organization easier on your side as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/console/commands/run.py`
Content:
```
1 from .env_command import EnvCommand
2
3
4 class RunCommand(EnvCommand):
5 """
6 Runs a command in the appropriate environment.
7
8 run
9 { args* : The command and arguments/options to run. }
10 """
11
12 def handle(self):
13 args = self.argument("args")
14 script = args[0]
15 scripts = self.poetry.local_config.get("scripts")
16
17 if scripts and script in scripts:
18 return self.run_script(scripts[script], args)
19
20 return self.env.execute(*args)
21
22 def run_script(self, script, args):
23 if isinstance(script, dict):
24 script = script["callable"]
25
26 module, callable_ = script.split(":")
27
28 src_in_sys_path = "sys.path.append('src'); " if self._module.is_in_src() else ""
29
30 cmd = ["python", "-c"]
31
32 cmd += [
33 '"import sys; '
34 "from importlib import import_module; "
35 "sys.argv = {!r}; {}"
36 "import_module('{}').{}()\"".format(
37 args, src_in_sys_path, module, callable_
38 )
39 ]
40
41 return self.env.run(*cmd, shell=True, call=True)
42
43 @property
44 def _module(self):
45 from ...masonry.utils.module import Module
46
47 poetry = self.poetry
48 package = poetry.package
49 path = poetry.file.parent
50 module = Module(package.name, path.as_posix())
51 return module
52
53 def merge_application_definition(self, merge_args=True):
54 if self._application is None or (
55 self._application_definition_merged
56 and (self._application_definition_merged_with_args or not merge_args)
57 ):
58 return
59
60 if merge_args:
61 current_arguments = self._definition.get_arguments()
62 self._definition.set_arguments(
63 self._application.get_definition().get_arguments()
64 )
65 self._definition.add_arguments(current_arguments)
66
67 self._application_definition_merged = True
68 if merge_args:
69 self._application_definition_merged_with_args = True
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/poetry/console/commands/run.py b/poetry/console/commands/run.py
--- a/poetry/console/commands/run.py
+++ b/poetry/console/commands/run.py
@@ -47,7 +47,7 @@
poetry = self.poetry
package = poetry.package
path = poetry.file.parent
- module = Module(package.name, path.as_posix())
+ module = Module(package.name, path.as_posix(), package.packages)
return module
def merge_application_definition(self, merge_args=True):
|
{"golden_diff": "diff --git a/poetry/console/commands/run.py b/poetry/console/commands/run.py\n--- a/poetry/console/commands/run.py\n+++ b/poetry/console/commands/run.py\n@@ -47,7 +47,7 @@\n poetry = self.poetry\n package = poetry.package\n path = poetry.file.parent\n- module = Module(package.name, path.as_posix())\n+ module = Module(package.name, path.as_posix(), package.packages)\n return module\n \n def merge_application_definition(self, merge_args=True):\n", "issue": "Poetry run: ModuleOrPackageNotFound with implicit namespace packages (PEP420)\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Arch Linux 4.18.16\r\n- **Poetry version**: 0.12.5\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/Kazy/692963f6a41c64d38f38ac9a3f95619a\r\n\r\n## Issue\r\nI'm using implicit namespace packages to organize the packages at work, which works well by specifying the `packages` value in the `pyproject.toml` like that:\r\n```toml\r\npackages = [\r\n { include = \"org\" }\r\n]\r\n```\r\nThe file structure is like that:\r\n```\r\n\u251c\u2500\u2500 org\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 subpackage\r\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 __init__.py\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 command\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 cli.py\r\n\u2514\u2500\u2500 pyproject.toml\r\n```\r\n\r\nThe issue is when you try to do `poetry run my-command`, you get:\r\n```\r\n[ModuleOrPackageNotFound]\r\nNo file/folder found for package org-subpackage-command\r\n```\r\n\r\nI already found how to fix it and will follow with a PR, but I wanted to create the issue in case my fix isn't the right one, and to make organization easier on your side as well.\r\n\n", "before_files": [{"content": "from .env_command import EnvCommand\n\n\nclass RunCommand(EnvCommand):\n \"\"\"\n Runs a command in the appropriate environment.\n\n run\n { args* : The command and arguments/options to run. }\n \"\"\"\n\n def handle(self):\n args = self.argument(\"args\")\n script = args[0]\n scripts = self.poetry.local_config.get(\"scripts\")\n\n if scripts and script in scripts:\n return self.run_script(scripts[script], args)\n\n return self.env.execute(*args)\n\n def run_script(self, script, args):\n if isinstance(script, dict):\n script = script[\"callable\"]\n\n module, callable_ = script.split(\":\")\n\n src_in_sys_path = \"sys.path.append('src'); \" if self._module.is_in_src() else \"\"\n\n cmd = [\"python\", \"-c\"]\n\n cmd += [\n '\"import sys; '\n \"from importlib import import_module; \"\n \"sys.argv = {!r}; {}\"\n \"import_module('{}').{}()\\\"\".format(\n args, src_in_sys_path, module, callable_\n )\n ]\n\n return self.env.run(*cmd, shell=True, call=True)\n\n @property\n def _module(self):\n from ...masonry.utils.module import Module\n\n poetry = self.poetry\n package = poetry.package\n path = poetry.file.parent\n module = Module(package.name, path.as_posix())\n return module\n\n def merge_application_definition(self, merge_args=True):\n if self._application is None or (\n self._application_definition_merged\n and (self._application_definition_merged_with_args or not merge_args)\n ):\n return\n\n if merge_args:\n current_arguments = self._definition.get_arguments()\n self._definition.set_arguments(\n self._application.get_definition().get_arguments()\n )\n self._definition.add_arguments(current_arguments)\n\n self._application_definition_merged = True\n if merge_args:\n self._application_definition_merged_with_args = True\n", "path": "poetry/console/commands/run.py"}], "after_files": [{"content": "from .env_command import EnvCommand\n\n\nclass RunCommand(EnvCommand):\n \"\"\"\n Runs a command in the appropriate environment.\n\n run\n { args* : The command and arguments/options to run. }\n \"\"\"\n\n def handle(self):\n args = self.argument(\"args\")\n script = args[0]\n scripts = self.poetry.local_config.get(\"scripts\")\n\n if scripts and script in scripts:\n return self.run_script(scripts[script], args)\n\n return self.env.execute(*args)\n\n def run_script(self, script, args):\n if isinstance(script, dict):\n script = script[\"callable\"]\n\n module, callable_ = script.split(\":\")\n\n src_in_sys_path = \"sys.path.append('src'); \" if self._module.is_in_src() else \"\"\n\n cmd = [\"python\", \"-c\"]\n\n cmd += [\n '\"import sys; '\n \"from importlib import import_module; \"\n \"sys.argv = {!r}; {}\"\n \"import_module('{}').{}()\\\"\".format(\n args, src_in_sys_path, module, callable_\n )\n ]\n\n return self.env.run(*cmd, shell=True, call=True)\n\n @property\n def _module(self):\n from ...masonry.utils.module import Module\n\n poetry = self.poetry\n package = poetry.package\n path = poetry.file.parent\n module = Module(package.name, path.as_posix(), package.packages)\n return module\n\n def merge_application_definition(self, merge_args=True):\n if self._application is None or (\n self._application_definition_merged\n and (self._application_definition_merged_with_args or not merge_args)\n ):\n return\n\n if merge_args:\n current_arguments = self._definition.get_arguments()\n self._definition.set_arguments(\n self._application.get_definition().get_arguments()\n )\n self._definition.add_arguments(current_arguments)\n\n self._application_definition_merged = True\n if merge_args:\n self._application_definition_merged_with_args = True\n", "path": "poetry/console/commands/run.py"}]}
| 1,238 | 121 |
gh_patches_debug_25695
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-6665
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
export_csv example under 0.12.7dev11 doesn't resize table
Running example export_csv from https://github.com/bokeh/bokeh/tree/master/examples/app/export_csv
under 0.12.7dev11 the table doesn't resize and extra rows are filled with 'undefined', '$NaN'.
The number of rows is 248 and doesn't change when moving the slider.
The rows after 248 are not shown.
Under 0.12.6 everything works perfect.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/app/export_csv/main.py`
Content:
```
1 from os.path import dirname, join
2
3 import pandas as pd
4
5 from bokeh.layouts import row, widgetbox
6 from bokeh.models import ColumnDataSource, CustomJS
7 from bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter
8 from bokeh.io import curdoc
9
10 df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
11
12 source = ColumnDataSource(data=dict())
13
14 def update():
15 current = df[df['salary'] <= slider.value].dropna()
16 source.data = {
17 'name' : current.name,
18 'salary' : current.salary,
19 'years_experience' : current.years_experience,
20 }
21
22 slider = Slider(title="Max Salary", start=10000, end=250000, value=150000, step=1000)
23 slider.on_change('value', lambda attr, old, new: update())
24
25 button = Button(label="Download", button_type="success")
26 button.callback = CustomJS(args=dict(source=source),
27 code=open(join(dirname(__file__), "download.js")).read())
28
29 columns = [
30 TableColumn(field="name", title="Employee Name"),
31 TableColumn(field="salary", title="Income", formatter=NumberFormatter(format="$0,0.00")),
32 TableColumn(field="years_experience", title="Experience (years)")
33 ]
34
35 data_table = DataTable(source=source, columns=columns, width=800)
36
37 controls = widgetbox(slider, button)
38 table = widgetbox(data_table)
39
40 curdoc().add_root(row(controls, table))
41 curdoc().title = "Export CSV"
42
43 update()
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/app/export_csv/main.py b/examples/app/export_csv/main.py
--- a/examples/app/export_csv/main.py
+++ b/examples/app/export_csv/main.py
@@ -4,7 +4,7 @@
from bokeh.layouts import row, widgetbox
from bokeh.models import ColumnDataSource, CustomJS
-from bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter
+from bokeh.models.widgets import RangeSlider, Button, DataTable, TableColumn, NumberFormatter
from bokeh.io import curdoc
df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
@@ -12,14 +12,14 @@
source = ColumnDataSource(data=dict())
def update():
- current = df[df['salary'] <= slider.value].dropna()
+ current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()
source.data = {
'name' : current.name,
'salary' : current.salary,
'years_experience' : current.years_experience,
}
-slider = Slider(title="Max Salary", start=10000, end=250000, value=150000, step=1000)
+slider = RangeSlider(title="Max Salary", start=10000, end=110000, value=(10000, 50000), step=1000, format="0,0")
slider.on_change('value', lambda attr, old, new: update())
button = Button(label="Download", button_type="success")
|
{"golden_diff": "diff --git a/examples/app/export_csv/main.py b/examples/app/export_csv/main.py\n--- a/examples/app/export_csv/main.py\n+++ b/examples/app/export_csv/main.py\n@@ -4,7 +4,7 @@\n \n from bokeh.layouts import row, widgetbox\n from bokeh.models import ColumnDataSource, CustomJS\n-from bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter\n+from bokeh.models.widgets import RangeSlider, Button, DataTable, TableColumn, NumberFormatter\n from bokeh.io import curdoc\n \n df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))\n@@ -12,14 +12,14 @@\n source = ColumnDataSource(data=dict())\n \n def update():\n- current = df[df['salary'] <= slider.value].dropna()\n+ current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()\n source.data = {\n 'name' : current.name,\n 'salary' : current.salary,\n 'years_experience' : current.years_experience,\n }\n \n-slider = Slider(title=\"Max Salary\", start=10000, end=250000, value=150000, step=1000)\n+slider = RangeSlider(title=\"Max Salary\", start=10000, end=110000, value=(10000, 50000), step=1000, format=\"0,0\")\n slider.on_change('value', lambda attr, old, new: update())\n \n button = Button(label=\"Download\", button_type=\"success\")\n", "issue": "export_csv example under 0.12.7dev11 doesn't resize table\nRunning example export_csv from https://github.com/bokeh/bokeh/tree/master/examples/app/export_csv\r\n\r\nunder 0.12.7dev11 the table doesn't resize and extra rows are filled with 'undefined', '$NaN'.\r\nThe number of rows is 248 and doesn't change when moving the slider.\r\nThe rows after 248 are not shown.\r\n\r\nUnder 0.12.6 everything works perfect.\n", "before_files": [{"content": "from os.path import dirname, join\n\nimport pandas as pd\n\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.models import ColumnDataSource, CustomJS\nfrom bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter\nfrom bokeh.io import curdoc\n\ndf = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))\n\nsource = ColumnDataSource(data=dict())\n\ndef update():\n current = df[df['salary'] <= slider.value].dropna()\n source.data = {\n 'name' : current.name,\n 'salary' : current.salary,\n 'years_experience' : current.years_experience,\n }\n\nslider = Slider(title=\"Max Salary\", start=10000, end=250000, value=150000, step=1000)\nslider.on_change('value', lambda attr, old, new: update())\n\nbutton = Button(label=\"Download\", button_type=\"success\")\nbutton.callback = CustomJS(args=dict(source=source),\n code=open(join(dirname(__file__), \"download.js\")).read())\n\ncolumns = [\n TableColumn(field=\"name\", title=\"Employee Name\"),\n TableColumn(field=\"salary\", title=\"Income\", formatter=NumberFormatter(format=\"$0,0.00\")),\n TableColumn(field=\"years_experience\", title=\"Experience (years)\")\n]\n\ndata_table = DataTable(source=source, columns=columns, width=800)\n\ncontrols = widgetbox(slider, button)\ntable = widgetbox(data_table)\n\ncurdoc().add_root(row(controls, table))\ncurdoc().title = \"Export CSV\"\n\nupdate()\n", "path": "examples/app/export_csv/main.py"}], "after_files": [{"content": "from os.path import dirname, join\n\nimport pandas as pd\n\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.models import ColumnDataSource, CustomJS\nfrom bokeh.models.widgets import RangeSlider, Button, DataTable, TableColumn, NumberFormatter\nfrom bokeh.io import curdoc\n\ndf = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))\n\nsource = ColumnDataSource(data=dict())\n\ndef update():\n current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()\n source.data = {\n 'name' : current.name,\n 'salary' : current.salary,\n 'years_experience' : current.years_experience,\n }\n\nslider = RangeSlider(title=\"Max Salary\", start=10000, end=110000, value=(10000, 50000), step=1000, format=\"0,0\")\nslider.on_change('value', lambda attr, old, new: update())\n\nbutton = Button(label=\"Download\", button_type=\"success\")\nbutton.callback = CustomJS(args=dict(source=source),\n code=open(join(dirname(__file__), \"download.js\")).read())\n\ncolumns = [\n TableColumn(field=\"name\", title=\"Employee Name\"),\n TableColumn(field=\"salary\", title=\"Income\", formatter=NumberFormatter(format=\"$0,0.00\")),\n TableColumn(field=\"years_experience\", title=\"Experience (years)\")\n]\n\ndata_table = DataTable(source=source, columns=columns, width=800)\n\ncontrols = widgetbox(slider, button)\ntable = widgetbox(data_table)\n\ncurdoc().add_root(row(controls, table))\ncurdoc().title = \"Export CSV\"\n\nupdate()\n", "path": "examples/app/export_csv/main.py"}]}
| 797 | 361 |
gh_patches_debug_22499
|
rasdani/github-patches
|
git_diff
|
AUTOMATIC1111__stable-diffusion-webui-737
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GFPGAN restore faces error
Using GFPGAN restore faces gives following error
Traceback (most recent call last):
File "/home/x/stable-diff/stable-diffusion-webui/modules/ui.py", line 128, in f
res = list(func(*args, **kwargs))
File "/home/x/stable-diff/stable-diffusion-webui/webui.py", line 55, in f
res = func(*args, **kwargs)
File "/home/x/stable-diff/stable-diffusion-webui/modules/txt2img.py", line 39, in txt2img
processed = process_images(p)
File "/home/x/stable-diff/stable-diffusion-webui/modules/processing.py", line 314, in process_images
x_sample = modules.face_restoration.restore_faces(x_sample)
File "/home/x/stable-diff/stable-diffusion-webui/modules/face_restoration.py", line 19, in restore_faces
return face_restorer.restore(np_image)
File "/home/x/stable-diff/stable-diffusion-webui/modules/codeformer_model.py", line 79, in restore
self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
File "/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/utils/face_restoration_helper.py", line 151, in get_face_landmarks_5
bboxes = self.face_det.detect_faces(input_img)
File "/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/detection/retinaface/retinaface.py", line 231, in detect_faces
keep = py_cpu_nms(bounding_boxes, nms_threshold)
File "/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/detection/retinaface/retinaface_utils.py", line 41, in py_cpu_nms
keep = torchvision.ops.nms(
File "/home/x/.local/lib/python3.10/site-packages/torchvision/ops/boxes.py", line 40, in nms
_assert_has_ops()
File "/home/x/.local/lib/python3.10/site-packages/torchvision/extension.py", line 33, in _assert_has_ops
raise RuntimeError(
RuntimeError: Couldn't load custom C++ ops. This can happen if your PyTorch and torchvision versions are incompatible, or if you had errors while compiling torchvision from source. For further information on the compatible versions, check https://github.com/pytorch/vision#installation for the compatibility matrix. Please check your PyTorch version with torch.__version__ and your torchvision version with torchvision.__version__ and verify if they are compatible, and if not please reinstall torchvision so that it matches your PyTorch install.
Running: python -c "import torch; import torchvision; print(torch.__version__); print(torchvision.__version__)"
Gives the following results:
1.12.1+cu113
0.13.1+cu102
on Latest Arch Linux.
GFPGAN works without issues in this similar tool: https://github.com/cmdr2/stable-diffusion-ui
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `launch.py`
Content:
```
1 # this scripts installs necessary requirements and launches main program in webui.py
2
3 import subprocess
4 import os
5 import sys
6 import importlib.util
7 import shlex
8
9 dir_repos = "repositories"
10 dir_tmp = "tmp"
11
12 python = sys.executable
13 git = os.environ.get('GIT', "git")
14 torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
15 requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
16 commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
17
18 k_diffusion_package = os.environ.get('K_DIFFUSION_PACKAGE', "git+https://github.com/crowsonkb/k-diffusion.git@1a0703dfb7d24d8806267c3e7ccc4caf67fd1331")
19 gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
20
21 stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
22 taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
23 codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
24 blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
25
26 args = shlex.split(commandline_args)
27
28
29 def extract_arg(args, name):
30 return [x for x in args if x != name], name in args
31
32
33 args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
34
35
36 def repo_dir(name):
37 return os.path.join(dir_repos, name)
38
39
40 def run(command, desc=None, errdesc=None):
41 if desc is not None:
42 print(desc)
43
44 result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
45
46 if result.returncode != 0:
47
48 message = f"""{errdesc or 'Error running command'}.
49 Command: {command}
50 Error code: {result.returncode}
51 stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
52 stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
53 """
54 raise RuntimeError(message)
55
56 return result.stdout.decode(encoding="utf8", errors="ignore")
57
58
59 def run_python(code, desc=None, errdesc=None):
60 return run(f'"{python}" -c "{code}"', desc, errdesc)
61
62
63 def run_pip(args, desc=None):
64 return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
65
66
67 def check_run(command):
68 result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
69 return result.returncode == 0
70
71
72 def check_run_python(code):
73 return check_run(f'"{python}" -c "{code}"')
74
75
76 def is_installed(package):
77 try:
78 spec = importlib.util.find_spec(package)
79 except ModuleNotFoundError:
80 return False
81
82 return spec is not None
83
84
85 def git_clone(url, dir, name, commithash=None):
86 # TODO clone into temporary dir and move if successful
87
88 if os.path.exists(dir):
89 return
90
91 run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
92
93 if commithash is not None:
94 run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
95
96
97 try:
98 commit = run(f"{git} rev-parse HEAD").strip()
99 except Exception:
100 commit = "<none>"
101
102 print(f"Python {sys.version}")
103 print(f"Commit hash: {commit}")
104
105 if not is_installed("torch"):
106 run(f'"{python}" -m {torch_command}', "Installing torch", "Couldn't install torch")
107
108 if not skip_torch_cuda_test:
109 run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDINE_ARGS variable to disable this check'")
110
111 if not is_installed("k_diffusion.sampling"):
112 run_pip(f"install {k_diffusion_package}", "k-diffusion")
113
114 if not is_installed("gfpgan"):
115 run_pip(f"install {gfpgan_package}", "gfpgan")
116
117 os.makedirs(dir_repos, exist_ok=True)
118
119 git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
120 git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
121 git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
122 git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
123
124 if not is_installed("lpips"):
125 run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
126
127 run_pip(f"install -r {requirements_file}", "requirements for Web UI")
128
129 sys.argv += args
130
131
132 def start_webui():
133 print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
134 import webui
135 webui.webui()
136
137 if __name__ == "__main__":
138 start_webui()
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/launch.py b/launch.py
--- a/launch.py
+++ b/launch.py
@@ -11,7 +11,7 @@
python = sys.executable
git = os.environ.get('GIT', "git")
-torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
+torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
@@ -92,8 +92,9 @@
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
-if not is_installed("torch"):
- run(f'"{python}" -m {torch_command}', "Installing torch", "Couldn't install torch")
+
+if not is_installed("torch") or not is_installed("torchvision"):
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU'")
|
{"golden_diff": "diff --git a/launch.py b/launch.py\n--- a/launch.py\n+++ b/launch.py\n@@ -11,7 +11,7 @@\n \r\n python = sys.executable\r\n git = os.environ.get('GIT', \"git\")\r\n-torch_command = os.environ.get('TORCH_COMMAND', \"pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113\")\r\n+torch_command = os.environ.get('TORCH_COMMAND', \"pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113\")\r\n requirements_file = os.environ.get('REQS_FILE', \"requirements_versions.txt\")\r\n commandline_args = os.environ.get('COMMANDLINE_ARGS', \"\")\r\n \r\n@@ -92,8 +92,9 @@\n print(f\"Python {sys.version}\")\r\n print(f\"Commit hash: {commit}\")\r\n \r\n-if not is_installed(\"torch\"):\r\n- run(f'\"{python}\" -m {torch_command}', \"Installing torch\", \"Couldn't install torch\")\r\n+\r\n+if not is_installed(\"torch\") or not is_installed(\"torchvision\"):\r\n+ run(f'\"{python}\" -m {torch_command}', \"Installing torch and torchvision\", \"Couldn't install torch\")\r\n \r\n run_python(\"import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU'\")\n", "issue": "GFPGAN restore faces error\nUsing GFPGAN restore faces gives following error\r\n\r\nTraceback (most recent call last):\r\n File \"/home/x/stable-diff/stable-diffusion-webui/modules/ui.py\", line 128, in f\r\n res = list(func(*args, **kwargs))\r\n File \"/home/x/stable-diff/stable-diffusion-webui/webui.py\", line 55, in f\r\n res = func(*args, **kwargs)\r\n File \"/home/x/stable-diff/stable-diffusion-webui/modules/txt2img.py\", line 39, in txt2img\r\n processed = process_images(p)\r\n File \"/home/x/stable-diff/stable-diffusion-webui/modules/processing.py\", line 314, in process_images\r\n x_sample = modules.face_restoration.restore_faces(x_sample)\r\n File \"/home/x/stable-diff/stable-diffusion-webui/modules/face_restoration.py\", line 19, in restore_faces\r\n return face_restorer.restore(np_image)\r\n File \"/home/x/stable-diff/stable-diffusion-webui/modules/codeformer_model.py\", line 79, in restore\r\n self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)\r\n File \"/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/utils/face_restoration_helper.py\", line 151, in get_face_landmarks_5\r\n bboxes = self.face_det.detect_faces(input_img)\r\n File \"/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/detection/retinaface/retinaface.py\", line 231, in detect_faces\r\n keep = py_cpu_nms(bounding_boxes, nms_threshold)\r\n File \"/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/detection/retinaface/retinaface_utils.py\", line 41, in py_cpu_nms\r\n keep = torchvision.ops.nms(\r\n File \"/home/x/.local/lib/python3.10/site-packages/torchvision/ops/boxes.py\", line 40, in nms\r\n _assert_has_ops()\r\n File \"/home/x/.local/lib/python3.10/site-packages/torchvision/extension.py\", line 33, in _assert_has_ops\r\n raise RuntimeError(\r\nRuntimeError: Couldn't load custom C++ ops. This can happen if your PyTorch and torchvision versions are incompatible, or if you had errors while compiling torchvision from source. For further information on the compatible versions, check https://github.com/pytorch/vision#installation for the compatibility matrix. Please check your PyTorch version with torch.__version__ and your torchvision version with torchvision.__version__ and verify if they are compatible, and if not please reinstall torchvision so that it matches your PyTorch install.\r\n\r\n\r\nRunning: python -c \"import torch; import torchvision; print(torch.__version__); print(torchvision.__version__)\"\r\n\r\nGives the following results:\r\n1.12.1+cu113\r\n0.13.1+cu102\r\n\r\n\r\non Latest Arch Linux.\r\n\r\nGFPGAN works without issues in this similar tool: https://github.com/cmdr2/stable-diffusion-ui\r\n\r\n\n", "before_files": [{"content": "# this scripts installs necessary requirements and launches main program in webui.py\r\n\r\nimport subprocess\r\nimport os\r\nimport sys\r\nimport importlib.util\r\nimport shlex\r\n\r\ndir_repos = \"repositories\"\r\ndir_tmp = \"tmp\"\r\n\r\npython = sys.executable\r\ngit = os.environ.get('GIT', \"git\")\r\ntorch_command = os.environ.get('TORCH_COMMAND', \"pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113\")\r\nrequirements_file = os.environ.get('REQS_FILE', \"requirements_versions.txt\")\r\ncommandline_args = os.environ.get('COMMANDLINE_ARGS', \"\")\r\n\r\nk_diffusion_package = os.environ.get('K_DIFFUSION_PACKAGE', \"git+https://github.com/crowsonkb/k-diffusion.git@1a0703dfb7d24d8806267c3e7ccc4caf67fd1331\")\r\ngfpgan_package = os.environ.get('GFPGAN_PACKAGE', \"git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379\")\r\n\r\nstable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', \"69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc\")\r\ntaming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', \"24268930bf1dce879235a7fddd0b2355b84d7ea6\")\r\ncodeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', \"c5b4593074ba6214284d6acd5f1719b6c5d739af\")\r\nblip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', \"48211a1594f1321b00f14c9f7a5b4813144b2fb9\")\r\n\r\nargs = shlex.split(commandline_args)\r\n\r\n\r\ndef extract_arg(args, name):\r\n return [x for x in args if x != name], name in args\r\n\r\n\r\nargs, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')\r\n\r\n\r\ndef repo_dir(name):\r\n return os.path.join(dir_repos, name)\r\n\r\n\r\ndef run(command, desc=None, errdesc=None):\r\n if desc is not None:\r\n print(desc)\r\n\r\n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n\r\n if result.returncode != 0:\r\n\r\n message = f\"\"\"{errdesc or 'Error running command'}.\r\nCommand: {command}\r\nError code: {result.returncode}\r\nstdout: {result.stdout.decode(encoding=\"utf8\", errors=\"ignore\") if len(result.stdout)>0 else '<empty>'}\r\nstderr: {result.stderr.decode(encoding=\"utf8\", errors=\"ignore\") if len(result.stderr)>0 else '<empty>'}\r\n\"\"\"\r\n raise RuntimeError(message)\r\n\r\n return result.stdout.decode(encoding=\"utf8\", errors=\"ignore\")\r\n\r\n\r\ndef run_python(code, desc=None, errdesc=None):\r\n return run(f'\"{python}\" -c \"{code}\"', desc, errdesc)\r\n\r\n\r\ndef run_pip(args, desc=None):\r\n return run(f'\"{python}\" -m pip {args} --prefer-binary', desc=f\"Installing {desc}\", errdesc=f\"Couldn't install {desc}\")\r\n\r\n\r\ndef check_run(command):\r\n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n return result.returncode == 0\r\n\r\n\r\ndef check_run_python(code):\r\n return check_run(f'\"{python}\" -c \"{code}\"')\r\n\r\n\r\ndef is_installed(package):\r\n try:\r\n spec = importlib.util.find_spec(package)\r\n except ModuleNotFoundError:\r\n return False\r\n\r\n return spec is not None\r\n\r\n\r\ndef git_clone(url, dir, name, commithash=None):\r\n # TODO clone into temporary dir and move if successful\r\n\r\n if os.path.exists(dir):\r\n return\r\n\r\n run(f'\"{git}\" clone \"{url}\" \"{dir}\"', f\"Cloning {name} into {dir}...\", f\"Couldn't clone {name}\")\r\n\r\n if commithash is not None:\r\n run(f'\"{git}\" -C {dir} checkout {commithash}', None, \"Couldn't checkout {name}'s hash: {commithash}\")\r\n\r\n\r\ntry:\r\n commit = run(f\"{git} rev-parse HEAD\").strip()\r\nexcept Exception:\r\n commit = \"<none>\"\r\n\r\nprint(f\"Python {sys.version}\")\r\nprint(f\"Commit hash: {commit}\")\r\n\r\nif not is_installed(\"torch\"):\r\n run(f'\"{python}\" -m {torch_command}', \"Installing torch\", \"Couldn't install torch\")\r\n\r\nif not skip_torch_cuda_test:\r\n run_python(\"import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDINE_ARGS variable to disable this check'\")\r\n\r\nif not is_installed(\"k_diffusion.sampling\"):\r\n run_pip(f\"install {k_diffusion_package}\", \"k-diffusion\")\r\n\r\nif not is_installed(\"gfpgan\"):\r\n run_pip(f\"install {gfpgan_package}\", \"gfpgan\")\r\n\r\nos.makedirs(dir_repos, exist_ok=True)\r\n\r\ngit_clone(\"https://github.com/CompVis/stable-diffusion.git\", repo_dir('stable-diffusion'), \"Stable Diffusion\", stable_diffusion_commit_hash)\r\ngit_clone(\"https://github.com/CompVis/taming-transformers.git\", repo_dir('taming-transformers'), \"Taming Transformers\", taming_transformers_commit_hash)\r\ngit_clone(\"https://github.com/sczhou/CodeFormer.git\", repo_dir('CodeFormer'), \"CodeFormer\", codeformer_commit_hash)\r\ngit_clone(\"https://github.com/salesforce/BLIP.git\", repo_dir('BLIP'), \"BLIP\", blip_commit_hash)\r\n\r\nif not is_installed(\"lpips\"):\r\n run_pip(f\"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\", \"requirements for CodeFormer\")\r\n\r\nrun_pip(f\"install -r {requirements_file}\", \"requirements for Web UI\")\r\n\r\nsys.argv += args\r\n\r\n\r\ndef start_webui():\r\n print(f\"Launching Web UI with arguments: {' '.join(sys.argv[1:])}\")\r\n import webui\r\n webui.webui()\r\n\r\nif __name__ == \"__main__\":\r\n start_webui()\r\n", "path": "launch.py"}], "after_files": [{"content": "# this scripts installs necessary requirements and launches main program in webui.py\r\n\r\nimport subprocess\r\nimport os\r\nimport sys\r\nimport importlib.util\r\nimport shlex\r\n\r\ndir_repos = \"repositories\"\r\ndir_tmp = \"tmp\"\r\n\r\npython = sys.executable\r\ngit = os.environ.get('GIT', \"git\")\r\ntorch_command = os.environ.get('TORCH_COMMAND', \"pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113\")\r\nrequirements_file = os.environ.get('REQS_FILE', \"requirements_versions.txt\")\r\ncommandline_args = os.environ.get('COMMANDLINE_ARGS', \"\")\r\n\r\nk_diffusion_package = os.environ.get('K_DIFFUSION_PACKAGE', \"git+https://github.com/crowsonkb/k-diffusion.git@1a0703dfb7d24d8806267c3e7ccc4caf67fd1331\")\r\ngfpgan_package = os.environ.get('GFPGAN_PACKAGE', \"git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379\")\r\n\r\nstable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', \"69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc\")\r\ntaming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', \"24268930bf1dce879235a7fddd0b2355b84d7ea6\")\r\ncodeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', \"c5b4593074ba6214284d6acd5f1719b6c5d739af\")\r\nblip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', \"48211a1594f1321b00f14c9f7a5b4813144b2fb9\")\r\n\r\ndef repo_dir(name):\r\n return os.path.join(dir_repos, name)\r\n\r\n\r\ndef run(command, desc=None, errdesc=None):\r\n if desc is not None:\r\n print(desc)\r\n\r\n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n\r\n if result.returncode != 0:\r\n\r\n message = f\"\"\"{errdesc or 'Error running command'}.\r\nCommand: {command}\r\nError code: {result.returncode}\r\nstdout: {result.stdout.decode(encoding=\"utf8\", errors=\"ignore\") if len(result.stdout)>0 else '<empty>'}\r\nstderr: {result.stderr.decode(encoding=\"utf8\", errors=\"ignore\") if len(result.stderr)>0 else '<empty>'}\r\n\"\"\"\r\n raise RuntimeError(message)\r\n\r\n return result.stdout.decode(encoding=\"utf8\", errors=\"ignore\")\r\n\r\n\r\ndef run_python(code, desc=None, errdesc=None):\r\n return run(f'\"{python}\" -c \"{code}\"', desc, errdesc)\r\n\r\n\r\ndef run_pip(args, desc=None):\r\n return run(f'\"{python}\" -m pip {args} --prefer-binary', desc=f\"Installing {desc}\", errdesc=f\"Couldn't install {desc}\")\r\n\r\n\r\ndef check_run(command):\r\n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n return result.returncode == 0\r\n\r\n\r\ndef check_run_python(code):\r\n return check_run(f'\"{python}\" -c \"{code}\"')\r\n\r\n\r\ndef is_installed(package):\r\n try:\r\n spec = importlib.util.find_spec(package)\r\n except ModuleNotFoundError:\r\n return False\r\n\r\n return spec is not None\r\n\r\n\r\ndef git_clone(url, dir, name, commithash=None):\r\n # TODO clone into temporary dir and move if successful\r\n\r\n if os.path.exists(dir):\r\n return\r\n\r\n run(f'\"{git}\" clone \"{url}\" \"{dir}\"', f\"Cloning {name} into {dir}...\", f\"Couldn't clone {name}\")\r\n\r\n if commithash is not None:\r\n run(f'\"{git}\" -C {dir} checkout {commithash}', None, \"Couldn't checkout {name}'s hash: {commithash}\")\r\n\r\n\r\ntry:\r\n commit = run(f\"{git} rev-parse HEAD\").strip()\r\nexcept Exception:\r\n commit = \"<none>\"\r\n\r\nprint(f\"Python {sys.version}\")\r\nprint(f\"Commit hash: {commit}\")\r\n\r\n\r\nif not is_installed(\"torch\") or not is_installed(\"torchvision\"):\r\n run(f'\"{python}\" -m {torch_command}', \"Installing torch and torchvision\", \"Couldn't install torch\")\r\n\r\nrun_python(\"import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU'\")\r\n\r\nif not is_installed(\"k_diffusion.sampling\"):\r\n run_pip(f\"install {k_diffusion_package}\", \"k-diffusion\")\r\n\r\nif not is_installed(\"gfpgan\"):\r\n run_pip(f\"install {gfpgan_package}\", \"gfpgan\")\r\n\r\nos.makedirs(dir_repos, exist_ok=True)\r\n\r\ngit_clone(\"https://github.com/CompVis/stable-diffusion.git\", repo_dir('stable-diffusion'), \"Stable Diffusion\", stable_diffusion_commit_hash)\r\ngit_clone(\"https://github.com/CompVis/taming-transformers.git\", repo_dir('taming-transformers'), \"Taming Transformers\", taming_transformers_commit_hash)\r\ngit_clone(\"https://github.com/sczhou/CodeFormer.git\", repo_dir('CodeFormer'), \"CodeFormer\", codeformer_commit_hash)\r\ngit_clone(\"https://github.com/salesforce/BLIP.git\", repo_dir('BLIP'), \"BLIP\", blip_commit_hash)\r\n\r\nif not is_installed(\"lpips\"):\r\n run_pip(f\"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\", \"requirements for CodeFormer\")\r\n\r\nrun_pip(f\"install -r {requirements_file}\", \"requirements for Web UI\")\r\n\r\nsys.argv += shlex.split(commandline_args)\r\n\r\n\r\ndef start_webui():\r\n print(f\"Launching Web UI with arguments: {' '.join(sys.argv[1:])}\")\r\n import webui\r\n webui.webui()\r\n\r\n\r\nstart_webui()\r\n", "path": "launch.py"}]}
| 2,759 | 319 |
gh_patches_debug_9720
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-21138
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
triu_indices
Working on (#8431 -> #18033)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/creation.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from .tensor import Tensor
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):
12 array = ivy.array(data, dtype=dtype, device=place)
13 return Tensor(array, dtype=dtype, place=place)
14
15
16 @with_unsupported_dtypes({"2.5.1 and below": "int8"}, "paddle")
17 @to_ivy_arrays_and_back
18 def ones(shape, /, *, dtype=None, name=None):
19 dtype = "float32" if dtype is None else dtype
20 return ivy.ones(shape, dtype=dtype)
21
22
23 @with_unsupported_dtypes(
24 {"2.5.1 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
25 )
26 @to_ivy_arrays_and_back
27 def ones_like(x, /, *, dtype=None, name=None):
28 dtype = x.dtype if dtype is None else dtype
29 return ivy.ones_like(x, dtype=dtype)
30
31
32 @with_unsupported_dtypes({"2.5.1 and below": "int8"}, "paddle")
33 @to_ivy_arrays_and_back
34 def zeros(shape, /, *, dtype=None, name=None):
35 dtype = "float32" if dtype is None else dtype
36 return ivy.zeros(shape, dtype=dtype)
37
38
39 @with_unsupported_dtypes(
40 {"2.5.1 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
41 )
42 @to_ivy_arrays_and_back
43 def zeros_like(x, /, *, dtype=None, name=None):
44 dtype = x.dtype if dtype is None else dtype
45 return ivy.zeros_like(x, dtype=dtype)
46
47
48 @to_ivy_arrays_and_back
49 def full(shape, fill_value, /, *, dtype=None, name=None):
50 dtype = "float32" if dtype is None else dtype
51 return ivy.full(shape, fill_value, dtype=dtype)
52
53
54 @to_ivy_arrays_and_back
55 def full_like(x, fill_value, /, *, dtype=None, name=None):
56 dtype = x.dtype if dtype is None else dtype
57 return ivy.full_like(x, fill_value, dtype=dtype)
58
59
60 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
61 @to_ivy_arrays_and_back
62 def arange(start, end=None, step=1, dtype=None, name=None):
63 return ivy.arange(start, end, step=step, dtype=dtype)
64
65
66 @to_ivy_arrays_and_back
67 def empty(shape, dtype=None):
68 return ivy.empty(shape=shape, dtype=dtype)
69
70
71 @to_ivy_arrays_and_back
72 def eye(num_rows, num_columns=None, dtype=None, name=None):
73 return ivy.eye(num_rows, num_columns, dtype=dtype)
74
75
76 @to_ivy_arrays_and_back
77 def empty_like(x, dtype=None, name=None):
78 return ivy.empty_like(x, dtype=dtype)
79
80
81 @with_unsupported_dtypes(
82 {
83 "2.5.1 and below": (
84 "uint8",
85 "int8",
86 "int16",
87 "float16",
88 "complex64",
89 "complex128",
90 "bool",
91 )
92 },
93 "paddle",
94 )
95 @to_ivy_arrays_and_back
96 def tril(x, diagonal=0, name=None):
97 return ivy.tril(x, k=diagonal)
98
99
100 @with_unsupported_dtypes(
101 {
102 "2.5.1 and below": (
103 "uint8",
104 "int8",
105 "int16",
106 "float16",
107 "complex64",
108 "complex128",
109 "bool",
110 )
111 },
112 "paddle",
113 )
114 @to_ivy_arrays_and_back
115 def triu(x, diagonal=0, name=None):
116 return ivy.triu(x, k=diagonal)
117
118
119 @with_supported_dtypes(
120 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
121 )
122 @to_ivy_arrays_and_back
123 def diagflat(x, offset=0, name=None):
124 arr = ivy.diagflat(x, offset=offset)
125 return arr
126
127
128 @with_supported_dtypes(
129 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
130 )
131 @to_ivy_arrays_and_back
132 def meshgrid(*args, **kwargs):
133 return ivy.meshgrid(*args, indexing="ij")
134
135
136 @with_supported_dtypes({"2.5.1 and below": ("int32", "int64")}, "paddle")
137 @to_ivy_arrays_and_back
138 def tril_indices(row, col, offset=0, dtype="int64"):
139 arr = ivy.tril_indices(row, col, offset)
140 arr = ivy.astype(arr, dtype)
141 return arr
142
143
144 @with_supported_dtypes(
145 {"2.5.1 and below": ("float16", "float32", "float64", "int32", "int64", "bool")},
146 "paddle",
147 )
148 @to_ivy_arrays_and_back
149 def assign(x, output=None):
150 if len(ivy.shape(x)) == 0:
151 x = ivy.reshape(ivy.Array(x), (1,))
152 if ivy.exists(output):
153 output = ivy.reshape(ivy.Array(output), (1,))
154 else:
155 x = ivy.reshape(x, ivy.shape(x))
156 ret = ivy.copy_array(x, to_ivy_array=False, out=output)
157 return ret
158
159
160 @with_supported_dtypes(
161 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
162 )
163 @to_ivy_arrays_and_back
164 def diag(x, offset=0, padding_value=0, name=None):
165 if len(x.shape) == 1:
166 padding_value = ivy.astype(padding_value, ivy.dtype(x))
167 ret = ivy.diagflat(x, offset=offset, padding_value=padding_value)
168 if len(ret.shape) != 2:
169 ret = ivy.reshape(ret, (1, 1))
170 else:
171 ret = ivy.diag(x, k=offset)
172 return ret
173
174
175 @with_supported_dtypes(
176 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
177 )
178 @to_ivy_arrays_and_back
179 def logspace(start, stop, num, base=10.0, dtype=None, name=None):
180 return ivy.logspace(start, stop, num=num, base=base, dtype=dtype)
181
182
183 @with_supported_dtypes(
184 {"2.5.1 and below": ("float32", "float64")},
185 "paddle",
186 )
187 @to_ivy_arrays_and_back
188 def complex(real, imag, name=None):
189 assert real.dtype == imag.dtype, (
190 "(InvalidArgument) The type of data we are trying to retrieve does not match"
191 " the type of data currently contained in the container."
192 )
193 complex_dtype = "complex64" if real.dtype == "float32" else "complex128"
194 imag_cmplx = ivy.astype(imag, complex_dtype) * 1j
195 complex_array = real + imag_cmplx
196 return complex_array
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py
--- a/ivy/functional/frontends/paddle/tensor/creation.py
+++ b/ivy/functional/frontends/paddle/tensor/creation.py
@@ -180,6 +180,16 @@
return ivy.logspace(start, stop, num=num, base=base, dtype=dtype)
+@with_supported_dtypes({"2.5.1 and below": ("int32", "int64")}, "paddle")
+@to_ivy_arrays_and_back
+def triu_indices(row, col=None, offset=0, dtype="int64"):
+ arr = ivy.triu_indices(row, col, offset)
+ if not ivy.to_scalar(ivy.shape(arr[0], as_array=True)):
+ return arr
+ arr = ivy.astype(arr, dtype)
+ return arr
+
+
@with_supported_dtypes(
{"2.5.1 and below": ("float32", "float64")},
"paddle",
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py\n--- a/ivy/functional/frontends/paddle/tensor/creation.py\n+++ b/ivy/functional/frontends/paddle/tensor/creation.py\n@@ -180,6 +180,16 @@\n return ivy.logspace(start, stop, num=num, base=base, dtype=dtype)\r\n \r\n \r\n+@with_supported_dtypes({\"2.5.1 and below\": (\"int32\", \"int64\")}, \"paddle\")\r\n+@to_ivy_arrays_and_back\r\n+def triu_indices(row, col=None, offset=0, dtype=\"int64\"):\r\n+ arr = ivy.triu_indices(row, col, offset)\r\n+ if not ivy.to_scalar(ivy.shape(arr[0], as_array=True)):\r\n+ return arr\r\n+ arr = ivy.astype(arr, dtype)\r\n+ return arr\r\n+\r\n+\r\n @with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\r\n \"paddle\",\n", "issue": "triu_indices\nWorking on (#8431 -> #18033)\n", "before_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.1 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.1 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.1 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.1 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.1 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.1 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef meshgrid(*args, **kwargs):\r\n return ivy.meshgrid(*args, indexing=\"ij\")\r\n\r\n\r\n@with_supported_dtypes({\"2.5.1 and below\": (\"int32\", \"int64\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef tril_indices(row, col, offset=0, dtype=\"int64\"):\r\n arr = ivy.tril_indices(row, col, offset)\r\n arr = ivy.astype(arr, dtype)\r\n return arr\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\", \"bool\")},\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef assign(x, output=None):\r\n if len(ivy.shape(x)) == 0:\r\n x = ivy.reshape(ivy.Array(x), (1,))\r\n if ivy.exists(output):\r\n output = ivy.reshape(ivy.Array(output), (1,))\r\n else:\r\n x = ivy.reshape(x, ivy.shape(x))\r\n ret = ivy.copy_array(x, to_ivy_array=False, out=output)\r\n return ret\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diag(x, offset=0, padding_value=0, name=None):\r\n if len(x.shape) == 1:\r\n padding_value = ivy.astype(padding_value, ivy.dtype(x))\r\n ret = ivy.diagflat(x, offset=offset, padding_value=padding_value)\r\n if len(ret.shape) != 2:\r\n ret = ivy.reshape(ret, (1, 1))\r\n else:\r\n ret = ivy.diag(x, k=offset)\r\n return ret\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef logspace(start, stop, num, base=10.0, dtype=None, name=None):\r\n return ivy.logspace(start, stop, num=num, base=base, dtype=dtype)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef complex(real, imag, name=None):\r\n assert real.dtype == imag.dtype, (\r\n \"(InvalidArgument) The type of data we are trying to retrieve does not match\"\r\n \" the type of data currently contained in the container.\"\r\n )\r\n complex_dtype = \"complex64\" if real.dtype == \"float32\" else \"complex128\"\r\n imag_cmplx = ivy.astype(imag, complex_dtype) * 1j\r\n complex_array = real + imag_cmplx\r\n return complex_array\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}], "after_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.1 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.1 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.1 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.1 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.1 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.1 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef meshgrid(*args, **kwargs):\r\n return ivy.meshgrid(*args, indexing=\"ij\")\r\n\r\n\r\n@with_supported_dtypes({\"2.5.1 and below\": (\"int32\", \"int64\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef tril_indices(row, col, offset=0, dtype=\"int64\"):\r\n arr = ivy.tril_indices(row, col, offset)\r\n arr = ivy.astype(arr, dtype)\r\n return arr\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\", \"bool\")},\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef assign(x, output=None):\r\n if len(ivy.shape(x)) == 0:\r\n x = ivy.reshape(ivy.Array(x), (1,))\r\n if ivy.exists(output):\r\n output = ivy.reshape(ivy.Array(output), (1,))\r\n else:\r\n x = ivy.reshape(x, ivy.shape(x))\r\n ret = ivy.copy_array(x, to_ivy_array=False, out=output)\r\n return ret\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diag(x, offset=0, padding_value=0, name=None):\r\n if len(x.shape) == 1:\r\n padding_value = ivy.astype(padding_value, ivy.dtype(x))\r\n ret = ivy.diagflat(x, offset=offset, padding_value=padding_value)\r\n if len(ret.shape) != 2:\r\n ret = ivy.reshape(ret, (1, 1))\r\n else:\r\n ret = ivy.diag(x, k=offset)\r\n return ret\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef logspace(start, stop, num, base=10.0, dtype=None, name=None):\r\n return ivy.logspace(start, stop, num=num, base=base, dtype=dtype)\r\n\r\n\r\n@with_supported_dtypes({\"2.5.1 and below\": (\"int32\", \"int64\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef triu_indices(row, col=None, offset=0, dtype=\"int64\"):\r\n arr = ivy.triu_indices(row, col, offset)\r\n if not ivy.to_scalar(ivy.shape(arr[0], as_array=True)):\r\n return arr\r\n arr = ivy.astype(arr, dtype)\r\n return arr\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef complex(real, imag, name=None):\r\n assert real.dtype == imag.dtype, (\r\n \"(InvalidArgument) The type of data we are trying to retrieve does not match\"\r\n \" the type of data currently contained in the container.\"\r\n )\r\n complex_dtype = \"complex64\" if real.dtype == \"float32\" else \"complex128\"\r\n imag_cmplx = ivy.astype(imag, complex_dtype) * 1j\r\n complex_array = real + imag_cmplx\r\n return complex_array\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}]}
| 2,493 | 251 |
gh_patches_debug_21709
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-706
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TensorBoardLogger and ModelCheckpoint are not using the same folder by default
## 🐛 Bug
(master branch)
By default, the TensorBoardLogger writes logs into `lightning_logs/0` but ModelCheckpoint writes checkpoint into `lightning_logs/version_0`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/logging/tensorboard.py`
Content:
```
1 import os
2 from warnings import warn
3 from argparse import Namespace
4 from pkg_resources import parse_version
5
6 import torch
7 import pandas as pd
8 from torch.utils.tensorboard import SummaryWriter
9
10 from .base import LightningLoggerBase, rank_zero_only
11
12
13 class TensorBoardLogger(LightningLoggerBase):
14 r"""
15
16 Log to local file system in TensorBoard format
17
18 Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to
19 `os.path.join(save_dir, name, version)`
20
21 Example
22 --------
23
24 .. code-block:: python
25
26 logger = TensorBoardLogger("tb_logs", name="my_model")
27 trainer = Trainer(logger=logger)
28 trainer.train(model)
29
30 Args:
31 save_dir (str): Save directory
32 name (str): Experiment name. Defaults to "default".
33 version (int): Experiment version. If version is not specified the logger inspects the save
34 directory for existing versions, then automatically assigns the next available version.
35 \**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.
36
37 """
38 NAME_CSV_TAGS = 'meta_tags.csv'
39
40 def __init__(self, save_dir, name="default", version=None, **kwargs):
41 super().__init__()
42 self.save_dir = save_dir
43 self._name = name
44 self._version = version
45
46 self._experiment = None
47 self.tags = {}
48 self.kwargs = kwargs
49
50 @property
51 def experiment(self):
52 r"""
53
54 Actual tensorboard object. To use tensorboard features do the following.
55
56 Example::
57
58 self.logger.experiment.some_tensorboard_function()
59
60 """
61 if self._experiment is not None:
62 return self._experiment
63
64 root_dir = os.path.join(self.save_dir, self.name)
65 os.makedirs(root_dir, exist_ok=True)
66 log_dir = os.path.join(root_dir, str(self.version))
67 self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)
68 return self._experiment
69
70 @rank_zero_only
71 def log_hyperparams(self, params):
72 if params is None:
73 return
74
75 # in case converting from namespace
76 if isinstance(params, Namespace):
77 params = vars(params)
78 params = dict(params)
79
80 if parse_version(torch.__version__) < parse_version("1.3.0"):
81 warn(
82 f"Hyperparameter logging is not available for Torch version {torch.__version__}."
83 " Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable"
84 " hyperparameter logging."
85 )
86 else:
87 # `add_hparams` requires both - hparams and metric
88 self.experiment.add_hparams(hparam_dict=params, metric_dict={})
89 # some alternative should be added
90 self.tags.update(params)
91
92 @rank_zero_only
93 def log_metrics(self, metrics, step=None):
94 for k, v in metrics.items():
95 if isinstance(v, torch.Tensor):
96 v = v.item()
97 self.experiment.add_scalar(k, v, step)
98
99 @rank_zero_only
100 def save(self):
101 try:
102 self.experiment.flush()
103 except AttributeError:
104 # you are using PT version (<v1.2) which does not have implemented flush
105 self.experiment._get_file_writer().flush()
106
107 # create a preudo standard path ala test-tube
108 dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)
109 if not os.path.isdir(dir_path):
110 dir_path = self.save_dir
111 # prepare the file path
112 meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)
113 # save the metatags file
114 df = pd.DataFrame({'key': list(self.tags.keys()),
115 'value': list(self.tags.values())})
116 df.to_csv(meta_tags_path, index=False)
117
118 @rank_zero_only
119 def finalize(self, status):
120 self.save()
121
122 @property
123 def name(self):
124 return self._name
125
126 @property
127 def version(self):
128 if self._version is None:
129 self._version = self._get_next_version()
130 return self._version
131
132 def _get_next_version(self):
133 root_dir = os.path.join(self.save_dir, self.name)
134 existing_versions = [
135 int(d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d)) and d.isdigit()
136 ]
137 if len(existing_versions) == 0:
138 return 0
139 else:
140 return max(existing_versions) + 1
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_lightning/logging/tensorboard.py b/pytorch_lightning/logging/tensorboard.py
--- a/pytorch_lightning/logging/tensorboard.py
+++ b/pytorch_lightning/logging/tensorboard.py
@@ -63,7 +63,7 @@
root_dir = os.path.join(self.save_dir, self.name)
os.makedirs(root_dir, exist_ok=True)
- log_dir = os.path.join(root_dir, str(self.version))
+ log_dir = os.path.join(root_dir, "version_" + str(self.version))
self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)
return self._experiment
@@ -131,9 +131,11 @@
def _get_next_version(self):
root_dir = os.path.join(self.save_dir, self.name)
- existing_versions = [
- int(d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d)) and d.isdigit()
- ]
+ existing_versions = []
+ for d in os.listdir(root_dir):
+ if os.path.isdir(os.path.join(root_dir, d)) and d.startswith("version_"):
+ existing_versions.append(int(d.split("_")[1]))
+
if len(existing_versions) == 0:
return 0
else:
|
{"golden_diff": "diff --git a/pytorch_lightning/logging/tensorboard.py b/pytorch_lightning/logging/tensorboard.py\n--- a/pytorch_lightning/logging/tensorboard.py\n+++ b/pytorch_lightning/logging/tensorboard.py\n@@ -63,7 +63,7 @@\n \n root_dir = os.path.join(self.save_dir, self.name)\n os.makedirs(root_dir, exist_ok=True)\n- log_dir = os.path.join(root_dir, str(self.version))\n+ log_dir = os.path.join(root_dir, \"version_\" + str(self.version))\n self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)\n return self._experiment\n \n@@ -131,9 +131,11 @@\n \n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n- existing_versions = [\n- int(d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d)) and d.isdigit()\n- ]\n+ existing_versions = []\n+ for d in os.listdir(root_dir):\n+ if os.path.isdir(os.path.join(root_dir, d)) and d.startswith(\"version_\"):\n+ existing_versions.append(int(d.split(\"_\")[1]))\n+\n if len(existing_versions) == 0:\n return 0\n else:\n", "issue": "TensorBoardLogger and ModelCheckpoint are not using the same folder by default\n## \ud83d\udc1b Bug\r\n(master branch)\r\nBy default, the TensorBoardLogger writes logs into `lightning_logs/0` but ModelCheckpoint writes checkpoint into `lightning_logs/version_0`.\n", "before_files": [{"content": "import os\nfrom warnings import warn\nfrom argparse import Namespace\nfrom pkg_resources import parse_version\n\nimport torch\nimport pandas as pd\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\n\nclass TensorBoardLogger(LightningLoggerBase):\n r\"\"\"\n\n Log to local file system in TensorBoard format\n\n Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to\n `os.path.join(save_dir, name, version)`\n\n Example\n --------\n\n .. code-block:: python\n\n logger = TensorBoardLogger(\"tb_logs\", name=\"my_model\")\n trainer = Trainer(logger=logger)\n trainer.train(model)\n\n Args:\n save_dir (str): Save directory\n name (str): Experiment name. Defaults to \"default\".\n version (int): Experiment version. If version is not specified the logger inspects the save\n directory for existing versions, then automatically assigns the next available version.\n \\**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.\n\n \"\"\"\n NAME_CSV_TAGS = 'meta_tags.csv'\n\n def __init__(self, save_dir, name=\"default\", version=None, **kwargs):\n super().__init__()\n self.save_dir = save_dir\n self._name = name\n self._version = version\n\n self._experiment = None\n self.tags = {}\n self.kwargs = kwargs\n\n @property\n def experiment(self):\n r\"\"\"\n\n Actual tensorboard object. To use tensorboard features do the following.\n\n Example::\n\n self.logger.experiment.some_tensorboard_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n root_dir = os.path.join(self.save_dir, self.name)\n os.makedirs(root_dir, exist_ok=True)\n log_dir = os.path.join(root_dir, str(self.version))\n self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n if params is None:\n return\n\n # in case converting from namespace\n if isinstance(params, Namespace):\n params = vars(params)\n params = dict(params)\n\n if parse_version(torch.__version__) < parse_version(\"1.3.0\"):\n warn(\n f\"Hyperparameter logging is not available for Torch version {torch.__version__}.\"\n \" Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable\"\n \" hyperparameter logging.\"\n )\n else:\n # `add_hparams` requires both - hparams and metric\n self.experiment.add_hparams(hparam_dict=params, metric_dict={})\n # some alternative should be added\n self.tags.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n for k, v in metrics.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n self.experiment.add_scalar(k, v, step)\n\n @rank_zero_only\n def save(self):\n try:\n self.experiment.flush()\n except AttributeError:\n # you are using PT version (<v1.2) which does not have implemented flush\n self.experiment._get_file_writer().flush()\n\n # create a preudo standard path ala test-tube\n dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)\n if not os.path.isdir(dir_path):\n dir_path = self.save_dir\n # prepare the file path\n meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)\n # save the metatags file\n df = pd.DataFrame({'key': list(self.tags.keys()),\n 'value': list(self.tags.values())})\n df.to_csv(meta_tags_path, index=False)\n\n @rank_zero_only\n def finalize(self, status):\n self.save()\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n if self._version is None:\n self._version = self._get_next_version()\n return self._version\n\n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n existing_versions = [\n int(d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d)) and d.isdigit()\n ]\n if len(existing_versions) == 0:\n return 0\n else:\n return max(existing_versions) + 1\n", "path": "pytorch_lightning/logging/tensorboard.py"}], "after_files": [{"content": "import os\nfrom warnings import warn\nfrom argparse import Namespace\nfrom pkg_resources import parse_version\n\nimport torch\nimport pandas as pd\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\n\nclass TensorBoardLogger(LightningLoggerBase):\n r\"\"\"\n\n Log to local file system in TensorBoard format\n\n Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to\n `os.path.join(save_dir, name, version)`\n\n Example\n --------\n\n .. code-block:: python\n\n logger = TensorBoardLogger(\"tb_logs\", name=\"my_model\")\n trainer = Trainer(logger=logger)\n trainer.train(model)\n\n Args:\n save_dir (str): Save directory\n name (str): Experiment name. Defaults to \"default\".\n version (int): Experiment version. If version is not specified the logger inspects the save\n directory for existing versions, then automatically assigns the next available version.\n \\**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.\n\n \"\"\"\n NAME_CSV_TAGS = 'meta_tags.csv'\n\n def __init__(self, save_dir, name=\"default\", version=None, **kwargs):\n super().__init__()\n self.save_dir = save_dir\n self._name = name\n self._version = version\n\n self._experiment = None\n self.tags = {}\n self.kwargs = kwargs\n\n @property\n def experiment(self):\n r\"\"\"\n\n Actual tensorboard object. To use tensorboard features do the following.\n\n Example::\n\n self.logger.experiment.some_tensorboard_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n root_dir = os.path.join(self.save_dir, self.name)\n os.makedirs(root_dir, exist_ok=True)\n log_dir = os.path.join(root_dir, \"version_\" + str(self.version))\n self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n if params is None:\n return\n\n # in case converting from namespace\n if isinstance(params, Namespace):\n params = vars(params)\n params = dict(params)\n\n if parse_version(torch.__version__) < parse_version(\"1.3.0\"):\n warn(\n f\"Hyperparameter logging is not available for Torch version {torch.__version__}.\"\n \" Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable\"\n \" hyperparameter logging.\"\n )\n else:\n # `add_hparams` requires both - hparams and metric\n self.experiment.add_hparams(hparam_dict=params, metric_dict={})\n # some alternative should be added\n self.tags.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n for k, v in metrics.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n self.experiment.add_scalar(k, v, step)\n\n @rank_zero_only\n def save(self):\n try:\n self.experiment.flush()\n except AttributeError:\n # you are using PT version (<v1.2) which does not have implemented flush\n self.experiment._get_file_writer().flush()\n\n # create a preudo standard path ala test-tube\n dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)\n if not os.path.isdir(dir_path):\n dir_path = self.save_dir\n # prepare the file path\n meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)\n # save the metatags file\n df = pd.DataFrame({'key': list(self.tags.keys()),\n 'value': list(self.tags.values())})\n df.to_csv(meta_tags_path, index=False)\n\n @rank_zero_only\n def finalize(self, status):\n self.save()\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n if self._version is None:\n self._version = self._get_next_version()\n return self._version\n\n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n existing_versions = []\n for d in os.listdir(root_dir):\n if os.path.isdir(os.path.join(root_dir, d)) and d.startswith(\"version_\"):\n existing_versions.append(int(d.split(\"_\")[1]))\n\n if len(existing_versions) == 0:\n return 0\n else:\n return max(existing_versions) + 1\n", "path": "pytorch_lightning/logging/tensorboard.py"}]}
| 1,627 | 285 |
gh_patches_debug_17184
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-33
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not print page title above each page
Having a h1 HOME on your home page looks stupid. Either remove this completely and show currently selected page in menu, or put page title at top of content by default, so it is there by default but can be edited away if needed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/comicsite/views.py`
Content:
```
1 '''
2 Created on Jun 18, 2012
3
4 Testing views. Each of these views is referenced in urls.py
5
6 @author: Sjoerd
7 '''
8
9 from django.http import HttpResponse
10 from django.http import Http404
11 from django.shortcuts import render_to_response
12 from django.template import RequestContext
13
14 from comicsite.models import ComicSite,Page,ComicSiteException
15 from dataproviders import FileSystemDataProvider
16
17
18 def index(request):
19 return HttpResponse("ComicSite index page.",context_instance=RequestContext(request))
20
21
22 def site(request, site_short_name):
23 """ show a single COMIC site, default start page """
24 #TODO: Is it bad to use site name here, which is not the specified key?
25
26 site = getSite(site_short_name)
27
28 pages = getPages(site_short_name)
29
30 return render_to_response('page.html', {'site': site, 'page': pages[0], "pages":pages },context_instance=RequestContext(request))
31
32
33 def page(request, site_short_name, page_title):
34 """ show a single page on a site """
35
36 try:
37 p = Page.objects.get(ComicSite__short_name=site_short_name, title=page_title)
38 except Page.DoesNotExist:
39 raise Http404
40 pages = getPages(site_short_name)
41
42 return render_to_response('page.html', {'site': p.ComicSite, 'page': p, "pages":pages },context_instance=RequestContext(request))
43
44
45
46
47 def dataPage(request):
48 """ test function for data provider. Just get some files from provider and show them as list"""
49 #= r"D:\userdata\Sjoerd\Aptana Studio 3 Workspace\comic-django\django\static\files"
50
51 path = r"D:\userdata\Sjoerd\Aptana Studio 3 Workspace\comic-django\django\static\files"
52 dp = FileSystemDataProvider.FileSystemDataProvider(path)
53 images = dp.getImages()
54
55 htmlOut = "available files:"+", ".join(images)
56 p = createTestPage(html=htmlOut)
57 pages = [p]
58
59 return render_to_response('page.html', {'site': p.ComicSite, 'page': p, "pages":pages },context_instance=RequestContext(request))
60
61 # ======================================== not called directly from urls.py =========================================
62
63 def getSite(site_short_name):
64 try:
65 site = ComicSite.objects.get(short_name=site_short_name)
66 except ComicSite.DoesNotExist:
67 raise Http404
68 return site
69
70
71 def getPages(site_short_name):
72 """ get all pages of the given site from db"""
73 try:
74 pages = Page.objects.filter(ComicSite__short_name=site_short_name)
75 except Page.DoesNotExist:
76 raise Http404
77 return pages
78
79 # trying to follow pep 0008 here, finally.
80 def site_exists(site_short_name):
81 try:
82 site = ComicSite.objects.get(short_name=site_short_name)
83 return True
84 except ComicSite.DoesNotExist:
85 return False
86
87
88 # ====================================================== debug and test ==================================================
89 def createTestPage(title="testPage",html=""):
90 """ Create a quick mockup on the ComicSite 'Test'"""
91
92 if site_exists("test"):
93 #TODO log a warning here, no exception.
94 raise ComicSiteException("I am creating a spoof ComicSite called 'test' on the fly, by a project called 'test' was already defined in DB. This message should be a warning instead of an exception")
95
96 # if no site exists by that name, create it on the fly.
97 site = ComicSite()
98 site.short_name = "test"
99 site.name = "Test Page"
100 site.skin = ""
101
102 return Page(ComicSite=site,title=title,html=html)
103
104
105 def givePageHTML(page):
106 return "<h1>%s</h1> <p>%s</p>" %(page.title ,page.html)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/comicsite/views.py b/django/comicsite/views.py
--- a/django/comicsite/views.py
+++ b/django/comicsite/views.py
@@ -27,7 +27,7 @@
pages = getPages(site_short_name)
- return render_to_response('page.html', {'site': site, 'page': pages[0], "pages":pages },context_instance=RequestContext(request))
+ return render_to_response('page.html', {'site': site, 'currentpage': pages[0], "pages":pages },context_instance=RequestContext(request))
def page(request, site_short_name, page_title):
@@ -39,7 +39,7 @@
raise Http404
pages = getPages(site_short_name)
- return render_to_response('page.html', {'site': p.ComicSite, 'page': p, "pages":pages },context_instance=RequestContext(request))
+ return render_to_response('page.html', {'site': p.ComicSite, 'currentpage': p, "pages":pages },context_instance=RequestContext(request))
|
{"golden_diff": "diff --git a/django/comicsite/views.py b/django/comicsite/views.py\n--- a/django/comicsite/views.py\n+++ b/django/comicsite/views.py\n@@ -27,7 +27,7 @@\n \n pages = getPages(site_short_name)\n \n- return render_to_response('page.html', {'site': site, 'page': pages[0], \"pages\":pages },context_instance=RequestContext(request))\n+ return render_to_response('page.html', {'site': site, 'currentpage': pages[0], \"pages\":pages },context_instance=RequestContext(request))\n \n \n def page(request, site_short_name, page_title):\n@@ -39,7 +39,7 @@\n raise Http404\n pages = getPages(site_short_name)\n \n- return render_to_response('page.html', {'site': p.ComicSite, 'page': p, \"pages\":pages },context_instance=RequestContext(request))\n+ return render_to_response('page.html', {'site': p.ComicSite, 'currentpage': p, \"pages\":pages },context_instance=RequestContext(request))\n", "issue": "Do not print page title above each page\nHaving a h1 HOME on your home page looks stupid. Either remove this completely and show currently selected page in menu, or put page title at top of content by default, so it is there by default but can be edited away if needed \n\n", "before_files": [{"content": "'''\nCreated on Jun 18, 2012\n\nTesting views. Each of these views is referenced in urls.py \n\n@author: Sjoerd\n'''\n\nfrom django.http import HttpResponse\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom comicsite.models import ComicSite,Page,ComicSiteException\nfrom dataproviders import FileSystemDataProvider\n\n\ndef index(request):\n return HttpResponse(\"ComicSite index page.\",context_instance=RequestContext(request))\n\n\ndef site(request, site_short_name):\n \"\"\" show a single COMIC site, default start page \"\"\"\n #TODO: Is it bad to use site name here, which is not the specified key?\n \n site = getSite(site_short_name)\n \n pages = getPages(site_short_name)\n \n return render_to_response('page.html', {'site': site, 'page': pages[0], \"pages\":pages },context_instance=RequestContext(request))\n \n\ndef page(request, site_short_name, page_title):\n \"\"\" show a single page on a site \"\"\"\n \n try:\n p = Page.objects.get(ComicSite__short_name=site_short_name, title=page_title)\n except Page.DoesNotExist: \n raise Http404\n pages = getPages(site_short_name)\n \n return render_to_response('page.html', {'site': p.ComicSite, 'page': p, \"pages\":pages },context_instance=RequestContext(request))\n \n \n \n\ndef dataPage(request):\n \"\"\" test function for data provider. Just get some files from provider and show them as list\"\"\"\n #= r\"D:\\userdata\\Sjoerd\\Aptana Studio 3 Workspace\\comic-django\\django\\static\\files\"\n \n path = r\"D:\\userdata\\Sjoerd\\Aptana Studio 3 Workspace\\comic-django\\django\\static\\files\"\n dp = FileSystemDataProvider.FileSystemDataProvider(path)\n images = dp.getImages()\n \n htmlOut = \"available files:\"+\", \".join(images)\n p = createTestPage(html=htmlOut)\n pages = [p]\n \n return render_to_response('page.html', {'site': p.ComicSite, 'page': p, \"pages\":pages },context_instance=RequestContext(request))\n\n# ======================================== not called directly from urls.py =========================================\n\ndef getSite(site_short_name):\n try:\n site = ComicSite.objects.get(short_name=site_short_name)\n except ComicSite.DoesNotExist: \n raise Http404 \n return site \n \n \ndef getPages(site_short_name):\n \"\"\" get all pages of the given site from db\"\"\"\n try:\n pages = Page.objects.filter(ComicSite__short_name=site_short_name)\n except Page.DoesNotExist: \n raise Http404\n return pages\n\n# trying to follow pep 0008 here, finally.\ndef site_exists(site_short_name):\n try:\n site = ComicSite.objects.get(short_name=site_short_name)\n return True\n except ComicSite.DoesNotExist: \n return False\n \n \n# ====================================================== debug and test ==================================================\ndef createTestPage(title=\"testPage\",html=\"\"):\n \"\"\" Create a quick mockup on the ComicSite 'Test'\"\"\"\n \n if site_exists(\"test\"):\n #TODO log a warning here, no exception.\n raise ComicSiteException(\"I am creating a spoof ComicSite called 'test' on the fly, by a project called 'test' was already defined in DB. This message should be a warning instead of an exception\") \n \n # if no site exists by that name, create it on the fly.\n site = ComicSite()\n site.short_name = \"test\"\n site.name = \"Test Page\"\n site.skin = \"\"\n \n return Page(ComicSite=site,title=title,html=html)\n \n\ndef givePageHTML(page):\n return \"<h1>%s</h1> <p>%s</p>\" %(page.title ,page.html)", "path": "django/comicsite/views.py"}], "after_files": [{"content": "'''\nCreated on Jun 18, 2012\n\nTesting views. Each of these views is referenced in urls.py \n\n@author: Sjoerd\n'''\n\nfrom django.http import HttpResponse\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom comicsite.models import ComicSite,Page,ComicSiteException\nfrom dataproviders import FileSystemDataProvider\n\n\ndef index(request):\n return HttpResponse(\"ComicSite index page.\",context_instance=RequestContext(request))\n\n\ndef site(request, site_short_name):\n \"\"\" show a single COMIC site, default start page \"\"\"\n #TODO: Is it bad to use site name here, which is not the specified key?\n \n site = getSite(site_short_name)\n \n pages = getPages(site_short_name)\n \n return render_to_response('page.html', {'site': site, 'currentpage': pages[0], \"pages\":pages },context_instance=RequestContext(request))\n \n\ndef page(request, site_short_name, page_title):\n \"\"\" show a single page on a site \"\"\"\n \n try:\n p = Page.objects.get(ComicSite__short_name=site_short_name, title=page_title)\n except Page.DoesNotExist: \n raise Http404\n pages = getPages(site_short_name)\n \n return render_to_response('page.html', {'site': p.ComicSite, 'currentpage': p, \"pages\":pages },context_instance=RequestContext(request))\n \n \n \n\ndef dataPage(request):\n \"\"\" test function for data provider. Just get some files from provider and show them as list\"\"\"\n #= r\"D:\\userdata\\Sjoerd\\Aptana Studio 3 Workspace\\comic-django\\django\\static\\files\"\n \n path = r\"D:\\userdata\\Sjoerd\\Aptana Studio 3 Workspace\\comic-django\\django\\static\\files\"\n dp = FileSystemDataProvider.FileSystemDataProvider(path)\n images = dp.getImages()\n \n htmlOut = \"available files:\"+\", \".join(images)\n p = createTestPage(html=htmlOut)\n pages = [p]\n \n return render_to_response('page.html', {'site': p.ComicSite, 'page': p, \"pages\":pages },context_instance=RequestContext(request))\n\n# ======================================== not called directly from urls.py =========================================\n\ndef getSite(site_short_name):\n try:\n site = ComicSite.objects.get(short_name=site_short_name)\n except ComicSite.DoesNotExist: \n raise Http404 \n return site \n \n \ndef getPages(site_short_name):\n \"\"\" get all pages of the given site from db\"\"\"\n try:\n pages = Page.objects.filter(ComicSite__short_name=site_short_name)\n except Page.DoesNotExist: \n raise Http404\n return pages\n\n# trying to follow pep 0008 here, finally.\ndef site_exists(site_short_name):\n try:\n site = ComicSite.objects.get(short_name=site_short_name)\n return True\n except ComicSite.DoesNotExist: \n return False\n \n \n# ====================================================== debug and test ==================================================\ndef createTestPage(title=\"testPage\",html=\"\"):\n \"\"\" Create a quick mockup on the ComicSite 'Test'\"\"\"\n \n if site_exists(\"test\"):\n #TODO log a warning here, no exception.\n raise ComicSiteException(\"I am creating a spoof ComicSite called 'test' on the fly, by a project called 'test' was already defined in DB. This message should be a warning instead of an exception\") \n \n # if no site exists by that name, create it on the fly.\n site = ComicSite()\n site.short_name = \"test\"\n site.name = \"Test Page\"\n site.skin = \"\"\n \n return Page(ComicSite=site,title=title,html=html)\n \n\ndef givePageHTML(page):\n return \"<h1>%s</h1> <p>%s</p>\" %(page.title ,page.html)", "path": "django/comicsite/views.py"}]}
| 1,377 | 245 |
gh_patches_debug_19590
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-1913
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change order of the blog posts
Currently all blog posts that are published on grand-challenge are sorted based on the date the post was initially created. We would like to change this to the date the post was published such that the most recent post are shown on top. I had contact with @jmsmkn on slack after which he suggested a few changes I could make in the code to change this. After discussing this with Kiran we thought it might be best to first create an issue here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/blogs/models.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.auth import get_user_model
3 from django.db import models
4 from django_extensions.db.fields import AutoSlugField
5 from simple_history.models import HistoricalRecords
6 from stdimage import JPEGField
7
8 from grandchallenge.core.storage import get_logo_path, public_s3_storage
9 from grandchallenge.subdomains.utils import reverse
10
11
12 class Tag(models.Model):
13 name = models.CharField(max_length=200, unique=True)
14 slug = AutoSlugField(populate_from="name", max_length=200)
15
16 def __str__(self):
17 return self.name
18
19
20 class Post(models.Model):
21 created = models.DateTimeField(auto_now_add=True)
22 modified = models.DateTimeField(auto_now=True)
23
24 title = models.CharField(max_length=1024)
25 slug = AutoSlugField(populate_from="title", max_length=1024)
26 description = models.TextField()
27 content = models.TextField()
28
29 authors = models.ManyToManyField(
30 to=get_user_model(), related_name="blog_authors"
31 )
32
33 logo = JPEGField(
34 upload_to=get_logo_path,
35 storage=public_s3_storage,
36 variations=settings.STDIMAGE_SOCIAL_VARIATIONS,
37 )
38
39 tags = models.ManyToManyField(to=Tag, blank=True, related_name="posts")
40
41 published = models.BooleanField(default=False)
42
43 history = HistoricalRecords()
44
45 class Meta:
46 ordering = ("-created",)
47
48 def __str__(self):
49 return self.title
50
51 def get_absolute_url(self):
52 return reverse("blogs:detail", kwargs={"slug": self.slug})
53
54 @property
55 def public(self):
56 return self.published
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/blogs/models.py b/app/grandchallenge/blogs/models.py
--- a/app/grandchallenge/blogs/models.py
+++ b/app/grandchallenge/blogs/models.py
@@ -1,6 +1,7 @@
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
+from django.utils import timezone
from django_extensions.db.fields import AutoSlugField
from simple_history.models import HistoricalRecords
from stdimage import JPEGField
@@ -48,6 +49,16 @@
def __str__(self):
return self.title
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._published_orig = self.published
+
+ def save(self, *args, **kwargs):
+ if self._published_orig is False and self.published is True:
+ self.created = timezone.now()
+
+ super().save(*args, **kwargs)
+
def get_absolute_url(self):
return reverse("blogs:detail", kwargs={"slug": self.slug})
|
{"golden_diff": "diff --git a/app/grandchallenge/blogs/models.py b/app/grandchallenge/blogs/models.py\n--- a/app/grandchallenge/blogs/models.py\n+++ b/app/grandchallenge/blogs/models.py\n@@ -1,6 +1,7 @@\n from django.conf import settings\n from django.contrib.auth import get_user_model\n from django.db import models\n+from django.utils import timezone\n from django_extensions.db.fields import AutoSlugField\n from simple_history.models import HistoricalRecords\n from stdimage import JPEGField\n@@ -48,6 +49,16 @@\n def __str__(self):\n return self.title\n \n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self._published_orig = self.published\n+\n+ def save(self, *args, **kwargs):\n+ if self._published_orig is False and self.published is True:\n+ self.created = timezone.now()\n+\n+ super().save(*args, **kwargs)\n+\n def get_absolute_url(self):\n return reverse(\"blogs:detail\", kwargs={\"slug\": self.slug})\n", "issue": "Change order of the blog posts\nCurrently all blog posts that are published on grand-challenge are sorted based on the date the post was initially created. We would like to change this to the date the post was published such that the most recent post are shown on top. I had contact with @jmsmkn on slack after which he suggested a few changes I could make in the code to change this. After discussing this with Kiran we thought it might be best to first create an issue here. \r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django_extensions.db.fields import AutoSlugField\nfrom simple_history.models import HistoricalRecords\nfrom stdimage import JPEGField\n\nfrom grandchallenge.core.storage import get_logo_path, public_s3_storage\nfrom grandchallenge.subdomains.utils import reverse\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=200, unique=True)\n slug = AutoSlugField(populate_from=\"name\", max_length=200)\n\n def __str__(self):\n return self.name\n\n\nclass Post(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n title = models.CharField(max_length=1024)\n slug = AutoSlugField(populate_from=\"title\", max_length=1024)\n description = models.TextField()\n content = models.TextField()\n\n authors = models.ManyToManyField(\n to=get_user_model(), related_name=\"blog_authors\"\n )\n\n logo = JPEGField(\n upload_to=get_logo_path,\n storage=public_s3_storage,\n variations=settings.STDIMAGE_SOCIAL_VARIATIONS,\n )\n\n tags = models.ManyToManyField(to=Tag, blank=True, related_name=\"posts\")\n\n published = models.BooleanField(default=False)\n\n history = HistoricalRecords()\n\n class Meta:\n ordering = (\"-created\",)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse(\"blogs:detail\", kwargs={\"slug\": self.slug})\n\n @property\n def public(self):\n return self.published\n", "path": "app/grandchallenge/blogs/models.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils import timezone\nfrom django_extensions.db.fields import AutoSlugField\nfrom simple_history.models import HistoricalRecords\nfrom stdimage import JPEGField\n\nfrom grandchallenge.core.storage import get_logo_path, public_s3_storage\nfrom grandchallenge.subdomains.utils import reverse\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=200, unique=True)\n slug = AutoSlugField(populate_from=\"name\", max_length=200)\n\n def __str__(self):\n return self.name\n\n\nclass Post(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n title = models.CharField(max_length=1024)\n slug = AutoSlugField(populate_from=\"title\", max_length=1024)\n description = models.TextField()\n content = models.TextField()\n\n authors = models.ManyToManyField(\n to=get_user_model(), related_name=\"blog_authors\"\n )\n\n logo = JPEGField(\n upload_to=get_logo_path,\n storage=public_s3_storage,\n variations=settings.STDIMAGE_SOCIAL_VARIATIONS,\n )\n\n tags = models.ManyToManyField(to=Tag, blank=True, related_name=\"posts\")\n\n published = models.BooleanField(default=False)\n\n history = HistoricalRecords()\n\n class Meta:\n ordering = (\"-created\",)\n\n def __str__(self):\n return self.title\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._published_orig = self.published\n\n def save(self, *args, **kwargs):\n if self._published_orig is False and self.published is True:\n self.created = timezone.now()\n\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse(\"blogs:detail\", kwargs={\"slug\": self.slug})\n\n @property\n def public(self):\n return self.published\n", "path": "app/grandchallenge/blogs/models.py"}]}
| 823 | 238 |
gh_patches_debug_32645
|
rasdani/github-patches
|
git_diff
|
ddionrails__ddionrails-707
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove "namespace" fields in search index
Do we need a "namespace" field in the search index?
https://github.com/ddionrails/ddionrails/blob/f4f4c9356d2b23b596b02e9ca921106635a20282/templates/elastic/help.html#L13
https://github.com/ddionrails/ddionrails/blob/4f50e614f95c26c0625243a66608ea8ea0c52d84/ddionrails/instruments/imports.py#L53
https://github.com/ddionrails/ddionrails/blob/4f50e614f95c26c0625243a66608ea8ea0c52d84/ddionrails/data/imports.py#L66
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddionrails/imports/management/commands/index.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """ "Index" management command for ddionrails project """
4
5 import json
6 import pathlib
7
8 import djclick as click
9 from django.conf import settings
10 from elasticsearch import Elasticsearch, helpers
11
12 from ddionrails.concepts.models import Concept
13 from ddionrails.data.models import Variable
14 from ddionrails.instruments.models import Question
15 from ddionrails.publications.models import Publication
16 from ddionrails.studies.models import Study
17
18 elasticsearch_client = Elasticsearch(hosts=[settings.INDEX_HOST])
19
20
21 def create(mapping_file: str) -> None:
22 """ Create an Elasticsearch index
23
24 using:
25 - settings.INDEX_HOST
26 - settings.INDEX_NAME
27 - mapping_file
28 """
29 if elasticsearch_client.indices.exists(settings.INDEX_NAME):
30 click.secho(f'Index "{settings.INDEX_NAME}" already exists.', fg="red")
31 exit(1)
32 else:
33 if pathlib.Path(mapping_file).exists() is False:
34 click.secho(f'Mapping file "{mapping_file}" not found.', fg="red")
35 exit(1)
36 else:
37 with open(mapping_file, "r") as infile:
38 mapping = json.load(infile)
39 click.secho(
40 f'Creating index "{settings.INDEX_NAME}" with maping from "{mapping_file}"',
41 fg="green",
42 )
43 result = elasticsearch_client.indices.create(
44 index=settings.INDEX_NAME, body=mapping
45 )
46 click.secho(str(result), fg="green")
47
48
49 def delete() -> None:
50 """ Delete an Elasticsearch index
51
52 using:
53 - settings.INDEX_HOST
54 - settings.INDEX_NAME
55 """
56 if elasticsearch_client.indices.exists(settings.INDEX_NAME):
57 click.secho(f'Deleting index "{settings.INDEX_NAME}"', fg="green")
58 result = elasticsearch_client.indices.delete(index=settings.INDEX_NAME)
59 click.secho(str(result), fg="green")
60 else:
61 click.secho(f'Index "{settings.INDEX_NAME}" does not exist.', fg="red")
62 exit(1)
63
64
65 def reset(mapping_file: str) -> None:
66 """ Reset an Elasticsearch index
67
68 using:
69 - settings.INDEX_HOST
70 - settings.INDEX_NAME
71 - mapping_file
72 """
73 if pathlib.Path(mapping_file).exists() is False:
74 click.secho(f'Mapping file "{mapping_file}" not found.', fg="red")
75 exit(1)
76 delete()
77 create(mapping_file)
78
79
80 def concepts():
81 """ Iterate over all concepts in the database """
82
83 queryset = Concept.objects.prefetch_related("variables").all()
84 for concept in queryset:
85 study = list(
86 Study.objects.filter(datasets__variables__concept_id=concept.id)
87 .values_list("name", flat=True)
88 .distinct()
89 )
90 yield {
91 "_index": settings.INDEX_NAME,
92 "_type": concept.DOC_TYPE,
93 "_id": str(concept.id),
94 "_source": {"name": concept.name, "label": concept.label, "study": study},
95 }
96
97
98 def publications():
99 """ Iterate over all publications in the database """
100
101 queryset = Publication.objects.select_related("study").all()
102 for publication in queryset:
103 yield {
104 "_index": settings.INDEX_NAME,
105 "_type": publication.DOC_TYPE,
106 "_id": str(publication.id),
107 "_source": publication.to_elastic_dict(),
108 }
109
110
111 def questions():
112 """ Iterate over all questions in the database """
113
114 queryset = Question.objects.select_related(
115 "instrument",
116 "instrument__analysis_unit",
117 "instrument__period",
118 "instrument__study",
119 ).all()
120 for question in queryset:
121 period = question.get_period(period_id="name")
122 try:
123 analysis_unit = question.instrument.analysis_unit.name
124 except AttributeError:
125 analysis_unit = None
126 yield {
127 "_index": settings.INDEX_NAME,
128 "_type": question.DOC_TYPE,
129 "_id": str(question.id),
130 "_source": {
131 "period": period,
132 "analysis_unit": analysis_unit,
133 "question": question.name,
134 "name": question.name,
135 "label": question.label,
136 "items": question.items,
137 "sort_id": question.sort_id,
138 "instrument": question.instrument.name,
139 "study": question.instrument.study.name,
140 "namespace": question.instrument.study.name,
141 },
142 }
143
144
145 def variables():
146 """ Iterate over all variables in the database """
147
148 queryset = Variable.objects.select_related(
149 "dataset",
150 "dataset__study",
151 "dataset__analysis_unit",
152 "dataset__conceptual_dataset",
153 "dataset__period",
154 ).all()
155 for variable in queryset:
156 period = variable.get_period(period_id="name")
157 try:
158 analysis_unit = variable.dataset.analysis_unit.name
159 except AttributeError:
160 analysis_unit = None
161 try:
162 sub_type = variable.dataset.conceptual_dataset.name
163 except AttributeError:
164 sub_type = None
165
166 yield {
167 "_index": settings.INDEX_NAME,
168 "_type": variable.DOC_TYPE,
169 "_id": str(variable.id),
170 "_source": {
171 "name": variable.name,
172 "variable": variable.name,
173 "label": variable.label,
174 "label_de": variable.label_de,
175 "dataset": variable.dataset.name,
176 "period": period,
177 "sub_type": sub_type,
178 "analysis_unit": analysis_unit,
179 "study": variable.dataset.study.name,
180 "namespace": variable.dataset.study.name,
181 "categories": variable.categories,
182 },
183 }
184
185
186 def populate():
187 """ Workaround """
188 print(f"Indexing {Publication.objects.count()} publications into Elasticsearch")
189 result = helpers.bulk(elasticsearch_client, publications())
190 print(result)
191
192 print(f"Indexing {Concept.objects.count()} concepts into Elasticsearch")
193 result = helpers.bulk(elasticsearch_client, concepts())
194 print(result)
195
196 print(f"Indexing {Question.objects.count()} questions into Elasticsearch")
197 result = helpers.bulk(elasticsearch_client, questions())
198 print(result)
199
200 print(f"Indexing {Variable.objects.count()} variables into Elasticsearch")
201 result = helpers.bulk(elasticsearch_client, variables())
202 print(result)
203
204
205 @click.group()
206 def command():
207 """ddionrails: Elasticsearch index creation/deletion/reset tool."""
208
209
210 @command.command(
211 "create",
212 short_help='Create the index defined in "settings.INDEX_NAME" and the given "mapping_file"',
213 )
214 @click.option(
215 "-f",
216 "--file",
217 "mapping_file",
218 default="ddionrails/elastic/mapping.json",
219 help='Elasticsearch mapping file in JSON format (defaults to "ddionrails/elastic/mapping.json")',
220 )
221 def create_command(mapping_file: str) -> None:
222 """ Create an Elasticsearch index
223
224 using:\n
225 - settings.INDEX_HOST\n
226 - settings.INDEX_NAME\n
227 - mapping_file
228 """
229 create(mapping_file)
230
231
232 @command.command("delete", short_help='Delete the index defined in "settings.INDEX_NAME"')
233 def delete_command():
234 """ Delete an Elasticsearch index
235
236 using:\n
237 - settings.INDEX_HOST\n
238 - settings.INDEX_NAME
239 """
240 delete()
241
242
243 @command.command(
244 "reset",
245 short_help='Reset the index defined in "settings.INDEX_NAME" and the given "mapping_file"',
246 )
247 @click.option(
248 "-f",
249 "--file",
250 "mapping_file",
251 default="ddionrails/elastic/mapping.json",
252 help='Elasticsearch mapping file in JSON format (defaults to "ddionrails/elastic/mapping.json")',
253 )
254 def reset(mapping_file: str) -> None:
255 """ Reset an Elasticsearch index
256
257 using:\n
258 - settings.INDEX_HOST\n
259 - settings.INDEX_NAME\n
260 - mapping_file
261 """
262 delete()
263 create(mapping_file)
264
265
266 @command.command("populate", short_help="Populate the Elasticsearch index")
267 def populate_command() -> None:
268 """ Populate the Elasticsearch index """
269 populate()
270
271
272 # remove "verbosity", "settings", "pythonpath", "traceback", "color" options from django-click
273 command.params = command.params[:2] + command.params[7:]
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddionrails/imports/management/commands/index.py b/ddionrails/imports/management/commands/index.py
--- a/ddionrails/imports/management/commands/index.py
+++ b/ddionrails/imports/management/commands/index.py
@@ -122,7 +122,7 @@
try:
analysis_unit = question.instrument.analysis_unit.name
except AttributeError:
- analysis_unit = None
+ analysis_unit = "None"
yield {
"_index": settings.INDEX_NAME,
"_type": question.DOC_TYPE,
@@ -137,7 +137,6 @@
"sort_id": question.sort_id,
"instrument": question.instrument.name,
"study": question.instrument.study.name,
- "namespace": question.instrument.study.name,
},
}
@@ -157,11 +156,11 @@
try:
analysis_unit = variable.dataset.analysis_unit.name
except AttributeError:
- analysis_unit = None
+ analysis_unit = "None"
try:
sub_type = variable.dataset.conceptual_dataset.name
except AttributeError:
- sub_type = None
+ sub_type = "None"
yield {
"_index": settings.INDEX_NAME,
@@ -177,7 +176,6 @@
"sub_type": sub_type,
"analysis_unit": analysis_unit,
"study": variable.dataset.study.name,
- "namespace": variable.dataset.study.name,
"categories": variable.categories,
},
}
|
{"golden_diff": "diff --git a/ddionrails/imports/management/commands/index.py b/ddionrails/imports/management/commands/index.py\n--- a/ddionrails/imports/management/commands/index.py\n+++ b/ddionrails/imports/management/commands/index.py\n@@ -122,7 +122,7 @@\n try:\n analysis_unit = question.instrument.analysis_unit.name\n except AttributeError:\n- analysis_unit = None\n+ analysis_unit = \"None\"\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": question.DOC_TYPE,\n@@ -137,7 +137,6 @@\n \"sort_id\": question.sort_id,\n \"instrument\": question.instrument.name,\n \"study\": question.instrument.study.name,\n- \"namespace\": question.instrument.study.name,\n },\n }\n \n@@ -157,11 +156,11 @@\n try:\n analysis_unit = variable.dataset.analysis_unit.name\n except AttributeError:\n- analysis_unit = None\n+ analysis_unit = \"None\"\n try:\n sub_type = variable.dataset.conceptual_dataset.name\n except AttributeError:\n- sub_type = None\n+ sub_type = \"None\"\n \n yield {\n \"_index\": settings.INDEX_NAME,\n@@ -177,7 +176,6 @@\n \"sub_type\": sub_type,\n \"analysis_unit\": analysis_unit,\n \"study\": variable.dataset.study.name,\n- \"namespace\": variable.dataset.study.name,\n \"categories\": variable.categories,\n },\n }\n", "issue": "Remove \"namespace\" fields in search index\nDo we need a \"namespace\" field in the search index?\r\n\r\nhttps://github.com/ddionrails/ddionrails/blob/f4f4c9356d2b23b596b02e9ca921106635a20282/templates/elastic/help.html#L13\r\nhttps://github.com/ddionrails/ddionrails/blob/4f50e614f95c26c0625243a66608ea8ea0c52d84/ddionrails/instruments/imports.py#L53\r\nhttps://github.com/ddionrails/ddionrails/blob/4f50e614f95c26c0625243a66608ea8ea0c52d84/ddionrails/data/imports.py#L66\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\" \"Index\" management command for ddionrails project \"\"\"\n\nimport json\nimport pathlib\n\nimport djclick as click\nfrom django.conf import settings\nfrom elasticsearch import Elasticsearch, helpers\n\nfrom ddionrails.concepts.models import Concept\nfrom ddionrails.data.models import Variable\nfrom ddionrails.instruments.models import Question\nfrom ddionrails.publications.models import Publication\nfrom ddionrails.studies.models import Study\n\nelasticsearch_client = Elasticsearch(hosts=[settings.INDEX_HOST])\n\n\ndef create(mapping_file: str) -> None:\n \"\"\" Create an Elasticsearch index\n\n using:\n - settings.INDEX_HOST\n - settings.INDEX_NAME\n - mapping_file\n \"\"\"\n if elasticsearch_client.indices.exists(settings.INDEX_NAME):\n click.secho(f'Index \"{settings.INDEX_NAME}\" already exists.', fg=\"red\")\n exit(1)\n else:\n if pathlib.Path(mapping_file).exists() is False:\n click.secho(f'Mapping file \"{mapping_file}\" not found.', fg=\"red\")\n exit(1)\n else:\n with open(mapping_file, \"r\") as infile:\n mapping = json.load(infile)\n click.secho(\n f'Creating index \"{settings.INDEX_NAME}\" with maping from \"{mapping_file}\"',\n fg=\"green\",\n )\n result = elasticsearch_client.indices.create(\n index=settings.INDEX_NAME, body=mapping\n )\n click.secho(str(result), fg=\"green\")\n\n\ndef delete() -> None:\n \"\"\" Delete an Elasticsearch index\n\n using:\n - settings.INDEX_HOST\n - settings.INDEX_NAME\n \"\"\"\n if elasticsearch_client.indices.exists(settings.INDEX_NAME):\n click.secho(f'Deleting index \"{settings.INDEX_NAME}\"', fg=\"green\")\n result = elasticsearch_client.indices.delete(index=settings.INDEX_NAME)\n click.secho(str(result), fg=\"green\")\n else:\n click.secho(f'Index \"{settings.INDEX_NAME}\" does not exist.', fg=\"red\")\n exit(1)\n\n\ndef reset(mapping_file: str) -> None:\n \"\"\" Reset an Elasticsearch index\n\n using:\n - settings.INDEX_HOST\n - settings.INDEX_NAME\n - mapping_file\n \"\"\"\n if pathlib.Path(mapping_file).exists() is False:\n click.secho(f'Mapping file \"{mapping_file}\" not found.', fg=\"red\")\n exit(1)\n delete()\n create(mapping_file)\n\n\ndef concepts():\n \"\"\" Iterate over all concepts in the database \"\"\"\n\n queryset = Concept.objects.prefetch_related(\"variables\").all()\n for concept in queryset:\n study = list(\n Study.objects.filter(datasets__variables__concept_id=concept.id)\n .values_list(\"name\", flat=True)\n .distinct()\n )\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": concept.DOC_TYPE,\n \"_id\": str(concept.id),\n \"_source\": {\"name\": concept.name, \"label\": concept.label, \"study\": study},\n }\n\n\ndef publications():\n \"\"\" Iterate over all publications in the database \"\"\"\n\n queryset = Publication.objects.select_related(\"study\").all()\n for publication in queryset:\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": publication.DOC_TYPE,\n \"_id\": str(publication.id),\n \"_source\": publication.to_elastic_dict(),\n }\n\n\ndef questions():\n \"\"\" Iterate over all questions in the database \"\"\"\n\n queryset = Question.objects.select_related(\n \"instrument\",\n \"instrument__analysis_unit\",\n \"instrument__period\",\n \"instrument__study\",\n ).all()\n for question in queryset:\n period = question.get_period(period_id=\"name\")\n try:\n analysis_unit = question.instrument.analysis_unit.name\n except AttributeError:\n analysis_unit = None\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": question.DOC_TYPE,\n \"_id\": str(question.id),\n \"_source\": {\n \"period\": period,\n \"analysis_unit\": analysis_unit,\n \"question\": question.name,\n \"name\": question.name,\n \"label\": question.label,\n \"items\": question.items,\n \"sort_id\": question.sort_id,\n \"instrument\": question.instrument.name,\n \"study\": question.instrument.study.name,\n \"namespace\": question.instrument.study.name,\n },\n }\n\n\ndef variables():\n \"\"\" Iterate over all variables in the database \"\"\"\n\n queryset = Variable.objects.select_related(\n \"dataset\",\n \"dataset__study\",\n \"dataset__analysis_unit\",\n \"dataset__conceptual_dataset\",\n \"dataset__period\",\n ).all()\n for variable in queryset:\n period = variable.get_period(period_id=\"name\")\n try:\n analysis_unit = variable.dataset.analysis_unit.name\n except AttributeError:\n analysis_unit = None\n try:\n sub_type = variable.dataset.conceptual_dataset.name\n except AttributeError:\n sub_type = None\n\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": variable.DOC_TYPE,\n \"_id\": str(variable.id),\n \"_source\": {\n \"name\": variable.name,\n \"variable\": variable.name,\n \"label\": variable.label,\n \"label_de\": variable.label_de,\n \"dataset\": variable.dataset.name,\n \"period\": period,\n \"sub_type\": sub_type,\n \"analysis_unit\": analysis_unit,\n \"study\": variable.dataset.study.name,\n \"namespace\": variable.dataset.study.name,\n \"categories\": variable.categories,\n },\n }\n\n\ndef populate():\n \"\"\" Workaround \"\"\"\n print(f\"Indexing {Publication.objects.count()} publications into Elasticsearch\")\n result = helpers.bulk(elasticsearch_client, publications())\n print(result)\n\n print(f\"Indexing {Concept.objects.count()} concepts into Elasticsearch\")\n result = helpers.bulk(elasticsearch_client, concepts())\n print(result)\n\n print(f\"Indexing {Question.objects.count()} questions into Elasticsearch\")\n result = helpers.bulk(elasticsearch_client, questions())\n print(result)\n\n print(f\"Indexing {Variable.objects.count()} variables into Elasticsearch\")\n result = helpers.bulk(elasticsearch_client, variables())\n print(result)\n\n\[email protected]()\ndef command():\n \"\"\"ddionrails: Elasticsearch index creation/deletion/reset tool.\"\"\"\n\n\[email protected](\n \"create\",\n short_help='Create the index defined in \"settings.INDEX_NAME\" and the given \"mapping_file\"',\n)\[email protected](\n \"-f\",\n \"--file\",\n \"mapping_file\",\n default=\"ddionrails/elastic/mapping.json\",\n help='Elasticsearch mapping file in JSON format (defaults to \"ddionrails/elastic/mapping.json\")',\n)\ndef create_command(mapping_file: str) -> None:\n \"\"\" Create an Elasticsearch index\n\n using:\\n\n - settings.INDEX_HOST\\n\n - settings.INDEX_NAME\\n\n - mapping_file\n \"\"\"\n create(mapping_file)\n\n\[email protected](\"delete\", short_help='Delete the index defined in \"settings.INDEX_NAME\"')\ndef delete_command():\n \"\"\" Delete an Elasticsearch index\n\n using:\\n\n - settings.INDEX_HOST\\n\n - settings.INDEX_NAME\n \"\"\"\n delete()\n\n\[email protected](\n \"reset\",\n short_help='Reset the index defined in \"settings.INDEX_NAME\" and the given \"mapping_file\"',\n)\[email protected](\n \"-f\",\n \"--file\",\n \"mapping_file\",\n default=\"ddionrails/elastic/mapping.json\",\n help='Elasticsearch mapping file in JSON format (defaults to \"ddionrails/elastic/mapping.json\")',\n)\ndef reset(mapping_file: str) -> None:\n \"\"\" Reset an Elasticsearch index\n\n using:\\n\n - settings.INDEX_HOST\\n\n - settings.INDEX_NAME\\n\n - mapping_file\n \"\"\"\n delete()\n create(mapping_file)\n\n\[email protected](\"populate\", short_help=\"Populate the Elasticsearch index\")\ndef populate_command() -> None:\n \"\"\" Populate the Elasticsearch index \"\"\"\n populate()\n\n\n# remove \"verbosity\", \"settings\", \"pythonpath\", \"traceback\", \"color\" options from django-click\ncommand.params = command.params[:2] + command.params[7:]\n", "path": "ddionrails/imports/management/commands/index.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\" \"Index\" management command for ddionrails project \"\"\"\n\nimport json\nimport pathlib\n\nimport djclick as click\nfrom django.conf import settings\nfrom elasticsearch import Elasticsearch, helpers\n\nfrom ddionrails.concepts.models import Concept\nfrom ddionrails.data.models import Variable\nfrom ddionrails.instruments.models import Question\nfrom ddionrails.publications.models import Publication\nfrom ddionrails.studies.models import Study\n\nelasticsearch_client = Elasticsearch(hosts=[settings.INDEX_HOST])\n\n\ndef create(mapping_file: str) -> None:\n \"\"\" Create an Elasticsearch index\n\n using:\n - settings.INDEX_HOST\n - settings.INDEX_NAME\n - mapping_file\n \"\"\"\n if elasticsearch_client.indices.exists(settings.INDEX_NAME):\n click.secho(f'Index \"{settings.INDEX_NAME}\" already exists.', fg=\"red\")\n exit(1)\n else:\n if pathlib.Path(mapping_file).exists() is False:\n click.secho(f'Mapping file \"{mapping_file}\" not found.', fg=\"red\")\n exit(1)\n else:\n with open(mapping_file, \"r\") as infile:\n mapping = json.load(infile)\n click.secho(\n f'Creating index \"{settings.INDEX_NAME}\" with maping from \"{mapping_file}\"',\n fg=\"green\",\n )\n result = elasticsearch_client.indices.create(\n index=settings.INDEX_NAME, body=mapping\n )\n click.secho(str(result), fg=\"green\")\n\n\ndef delete() -> None:\n \"\"\" Delete an Elasticsearch index\n\n using:\n - settings.INDEX_HOST\n - settings.INDEX_NAME\n \"\"\"\n if elasticsearch_client.indices.exists(settings.INDEX_NAME):\n click.secho(f'Deleting index \"{settings.INDEX_NAME}\"', fg=\"green\")\n result = elasticsearch_client.indices.delete(index=settings.INDEX_NAME)\n click.secho(str(result), fg=\"green\")\n else:\n click.secho(f'Index \"{settings.INDEX_NAME}\" does not exist.', fg=\"red\")\n exit(1)\n\n\ndef reset(mapping_file: str) -> None:\n \"\"\" Reset an Elasticsearch index\n\n using:\n - settings.INDEX_HOST\n - settings.INDEX_NAME\n - mapping_file\n \"\"\"\n if pathlib.Path(mapping_file).exists() is False:\n click.secho(f'Mapping file \"{mapping_file}\" not found.', fg=\"red\")\n exit(1)\n delete()\n create(mapping_file)\n\n\ndef concepts():\n \"\"\" Iterate over all concepts in the database \"\"\"\n\n queryset = Concept.objects.prefetch_related(\"variables\").all()\n for concept in queryset:\n study = list(\n Study.objects.filter(datasets__variables__concept_id=concept.id)\n .values_list(\"name\", flat=True)\n .distinct()\n )\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": concept.DOC_TYPE,\n \"_id\": str(concept.id),\n \"_source\": {\"name\": concept.name, \"label\": concept.label, \"study\": study},\n }\n\n\ndef publications():\n \"\"\" Iterate over all publications in the database \"\"\"\n\n queryset = Publication.objects.select_related(\"study\").all()\n for publication in queryset:\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": publication.DOC_TYPE,\n \"_id\": str(publication.id),\n \"_source\": publication.to_elastic_dict(),\n }\n\n\ndef questions():\n \"\"\" Iterate over all questions in the database \"\"\"\n\n queryset = Question.objects.select_related(\n \"instrument\",\n \"instrument__analysis_unit\",\n \"instrument__period\",\n \"instrument__study\",\n ).all()\n for question in queryset:\n period = question.get_period(period_id=\"name\")\n try:\n analysis_unit = question.instrument.analysis_unit.name\n except AttributeError:\n analysis_unit = \"None\"\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": question.DOC_TYPE,\n \"_id\": str(question.id),\n \"_source\": {\n \"period\": period,\n \"analysis_unit\": analysis_unit,\n \"question\": question.name,\n \"name\": question.name,\n \"label\": question.label,\n \"items\": question.items,\n \"sort_id\": question.sort_id,\n \"instrument\": question.instrument.name,\n \"study\": question.instrument.study.name,\n },\n }\n\n\ndef variables():\n \"\"\" Iterate over all variables in the database \"\"\"\n\n queryset = Variable.objects.select_related(\n \"dataset\",\n \"dataset__study\",\n \"dataset__analysis_unit\",\n \"dataset__conceptual_dataset\",\n \"dataset__period\",\n ).all()\n for variable in queryset:\n period = variable.get_period(period_id=\"name\")\n try:\n analysis_unit = variable.dataset.analysis_unit.name\n except AttributeError:\n analysis_unit = \"None\"\n try:\n sub_type = variable.dataset.conceptual_dataset.name\n except AttributeError:\n sub_type = \"None\"\n\n yield {\n \"_index\": settings.INDEX_NAME,\n \"_type\": variable.DOC_TYPE,\n \"_id\": str(variable.id),\n \"_source\": {\n \"name\": variable.name,\n \"variable\": variable.name,\n \"label\": variable.label,\n \"label_de\": variable.label_de,\n \"dataset\": variable.dataset.name,\n \"period\": period,\n \"sub_type\": sub_type,\n \"analysis_unit\": analysis_unit,\n \"study\": variable.dataset.study.name,\n \"categories\": variable.categories,\n },\n }\n\n\ndef populate():\n \"\"\" Workaround \"\"\"\n print(f\"Indexing {Publication.objects.count()} publications into Elasticsearch\")\n result = helpers.bulk(elasticsearch_client, publications())\n print(result)\n\n print(f\"Indexing {Concept.objects.count()} concepts into Elasticsearch\")\n result = helpers.bulk(elasticsearch_client, concepts())\n print(result)\n\n print(f\"Indexing {Question.objects.count()} questions into Elasticsearch\")\n result = helpers.bulk(elasticsearch_client, questions())\n print(result)\n\n print(f\"Indexing {Variable.objects.count()} variables into Elasticsearch\")\n result = helpers.bulk(elasticsearch_client, variables())\n print(result)\n\n\[email protected]()\ndef command():\n \"\"\"ddionrails: Elasticsearch index creation/deletion/reset tool.\"\"\"\n\n\[email protected](\n \"create\",\n short_help='Create the index defined in \"settings.INDEX_NAME\" and the given \"mapping_file\"',\n)\[email protected](\n \"-f\",\n \"--file\",\n \"mapping_file\",\n default=\"ddionrails/elastic/mapping.json\",\n help='Elasticsearch mapping file in JSON format (defaults to \"ddionrails/elastic/mapping.json\")',\n)\ndef create_command(mapping_file: str) -> None:\n \"\"\" Create an Elasticsearch index\n\n using:\\n\n - settings.INDEX_HOST\\n\n - settings.INDEX_NAME\\n\n - mapping_file\n \"\"\"\n create(mapping_file)\n\n\[email protected](\"delete\", short_help='Delete the index defined in \"settings.INDEX_NAME\"')\ndef delete_command():\n \"\"\" Delete an Elasticsearch index\n\n using:\\n\n - settings.INDEX_HOST\\n\n - settings.INDEX_NAME\n \"\"\"\n delete()\n\n\[email protected](\n \"reset\",\n short_help='Reset the index defined in \"settings.INDEX_NAME\" and the given \"mapping_file\"',\n)\[email protected](\n \"-f\",\n \"--file\",\n \"mapping_file\",\n default=\"ddionrails/elastic/mapping.json\",\n help='Elasticsearch mapping file in JSON format (defaults to \"ddionrails/elastic/mapping.json\")',\n)\ndef reset(mapping_file: str) -> None:\n \"\"\" Reset an Elasticsearch index\n\n using:\\n\n - settings.INDEX_HOST\\n\n - settings.INDEX_NAME\\n\n - mapping_file\n \"\"\"\n delete()\n create(mapping_file)\n\n\[email protected](\"populate\", short_help=\"Populate the Elasticsearch index\")\ndef populate_command() -> None:\n \"\"\" Populate the Elasticsearch index \"\"\"\n populate()\n\n\n# remove \"verbosity\", \"settings\", \"pythonpath\", \"traceback\", \"color\" options from django-click\ncommand.params = command.params[:2] + command.params[7:]\n", "path": "ddionrails/imports/management/commands/index.py"}]}
| 2,943 | 333 |
gh_patches_debug_42716
|
rasdani/github-patches
|
git_diff
|
pallets__click-1061
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New upcoming hidden option in Click.Option is not hidden from bash completion
Thanks for wonderful package.
I don't know whether this is intended behavior or not but I was just trying out new upcoming release 7.0 for hidden option and autocompletion to Click.Option and hidden option hides option from help message but it shows up in bash completion.
However it will be good if we can hide it from bash completion also, so that hidden option is actually hidden from everywhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `click/_bashcomplete.py`
Content:
```
1 import collections
2 import copy
3 import os
4 import re
5
6 from .utils import echo
7 from .parser import split_arg_string
8 from .core import MultiCommand, Option, Argument
9 from .types import Choice
10
11 WORDBREAK = '='
12
13 # Note, only BASH version 4.4 and later have the nosort option.
14 COMPLETION_SCRIPT_BASH = '''
15 %(complete_func)s() {
16 local IFS=$'\n'
17 COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
18 COMP_CWORD=$COMP_CWORD \\
19 %(autocomplete_var)s=complete $1 ) )
20 return 0
21 }
22
23 %(complete_func)setup() {
24 local COMPLETION_OPTIONS=""
25 local BASH_VERSION_ARR=(${BASH_VERSION//./ })
26 if [ ${BASH_VERSION_ARR[0]} -ge 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ];then
27 COMPLETION_OPTIONS="-o nosort"
28 fi
29
30 complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s
31 }
32
33 %(complete_func)setup
34 '''
35
36 COMPLETION_SCRIPT_ZSH = '''
37 %(complete_func)s() {
38 local -a completions
39 local -a completions_with_descriptions
40 local -a response
41 response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\
42 COMP_CWORD=$((CURRENT-1)) \\
43 %(autocomplete_var)s=\"complete_zsh\" \\
44 %(script_names)s )}")
45
46 for key descr in ${(kv)response}; do
47 if [[ "$descr" == "_" ]]; then
48 completions+=("$key")
49 else
50 completions_with_descriptions+=("$key":"$descr")
51 fi
52 done
53
54 if [ -n "$completions_with_descriptions" ]; then
55 _describe -V unsorted completions_with_descriptions -U -Q
56 fi
57
58 if [ -n "$completions" ]; then
59 compadd -U -V unsorted -Q -a completions
60 fi
61 compstate[insert]="automenu"
62 }
63
64 compdef %(complete_func)s %(script_names)s
65 '''
66
67 _invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')
68
69
70 def get_completion_script(prog_name, complete_var, shell):
71 cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))
72 script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH
73 return (script % {
74 'complete_func': '_%s_completion' % cf_name,
75 'script_names': prog_name,
76 'autocomplete_var': complete_var,
77 }).strip() + ';'
78
79
80 def resolve_ctx(cli, prog_name, args):
81 """
82 Parse into a hierarchy of contexts. Contexts are connected through the parent variable.
83 :param cli: command definition
84 :param prog_name: the program that is running
85 :param args: full list of args
86 :return: the final context/command parsed
87 """
88 ctx = cli.make_context(prog_name, args, resilient_parsing=True)
89 args = ctx.protected_args + ctx.args
90 while args:
91 if isinstance(ctx.command, MultiCommand):
92 if not ctx.command.chain:
93 cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
94 if cmd is None:
95 return ctx
96 ctx = cmd.make_context(cmd_name, args, parent=ctx,
97 resilient_parsing=True)
98 args = ctx.protected_args + ctx.args
99 else:
100 # Walk chained subcommand contexts saving the last one.
101 while args:
102 cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
103 if cmd is None:
104 return ctx
105 sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
106 allow_extra_args=True,
107 allow_interspersed_args=False,
108 resilient_parsing=True)
109 args = sub_ctx.args
110 ctx = sub_ctx
111 args = sub_ctx.protected_args + sub_ctx.args
112 else:
113 break
114 return ctx
115
116
117 def start_of_option(param_str):
118 """
119 :param param_str: param_str to check
120 :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--")
121 """
122 return param_str and param_str[:1] == '-'
123
124
125 def is_incomplete_option(all_args, cmd_param):
126 """
127 :param all_args: the full original list of args supplied
128 :param cmd_param: the current command paramter
129 :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and
130 corresponds to this cmd_param. In other words whether this cmd_param option can still accept
131 values
132 """
133 if not isinstance(cmd_param, Option):
134 return False
135 if cmd_param.is_flag:
136 return False
137 last_option = None
138 for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):
139 if index + 1 > cmd_param.nargs:
140 break
141 if start_of_option(arg_str):
142 last_option = arg_str
143
144 return True if last_option and last_option in cmd_param.opts else False
145
146
147 def is_incomplete_argument(current_params, cmd_param):
148 """
149 :param current_params: the current params and values for this argument as already entered
150 :param cmd_param: the current command parameter
151 :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In
152 other words whether or not the this cmd_param argument can still accept values
153 """
154 if not isinstance(cmd_param, Argument):
155 return False
156 current_param_values = current_params[cmd_param.name]
157 if current_param_values is None:
158 return True
159 if cmd_param.nargs == -1:
160 return True
161 if isinstance(current_param_values, collections.Iterable) \
162 and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:
163 return True
164 return False
165
166
167 def get_user_autocompletions(ctx, args, incomplete, cmd_param):
168 """
169 :param ctx: context associated with the parsed command
170 :param args: full list of args
171 :param incomplete: the incomplete text to autocomplete
172 :param cmd_param: command definition
173 :return: all the possible user-specified completions for the param
174 """
175 results = []
176 if isinstance(cmd_param.type, Choice):
177 # Choices don't support descriptions.
178 results = [(c, None)
179 for c in cmd_param.type.choices if c.startswith(incomplete)]
180 elif cmd_param.autocompletion is not None:
181 dynamic_completions = cmd_param.autocompletion(ctx=ctx,
182 args=args,
183 incomplete=incomplete)
184 results = [c if isinstance(c, tuple) else (c, None)
185 for c in dynamic_completions]
186 return results
187
188
189 def add_subcommand_completions(ctx, incomplete, completions_out):
190 # Add subcommand completions.
191 if isinstance(ctx.command, MultiCommand):
192 completions_out.extend(
193 [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in ctx.command.list_commands(ctx) if c.startswith(incomplete)])
194
195 # Walk up the context list and add any other completion possibilities from chained commands
196 while ctx.parent is not None:
197 ctx = ctx.parent
198 if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
199 remaining_commands = sorted(
200 set(ctx.command.list_commands(ctx)) - set(ctx.protected_args))
201 completions_out.extend(
202 [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in remaining_commands if c.startswith(incomplete)])
203
204
205 def get_choices(cli, prog_name, args, incomplete):
206 """
207 :param cli: command definition
208 :param prog_name: the program that is running
209 :param args: full list of args
210 :param incomplete: the incomplete text to autocomplete
211 :return: all the possible completions for the incomplete
212 """
213 all_args = copy.deepcopy(args)
214
215 ctx = resolve_ctx(cli, prog_name, args)
216 if ctx is None:
217 return []
218
219 # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse
220 # without the '='
221 if start_of_option(incomplete) and WORDBREAK in incomplete:
222 partition_incomplete = incomplete.partition(WORDBREAK)
223 all_args.append(partition_incomplete[0])
224 incomplete = partition_incomplete[2]
225 elif incomplete == WORDBREAK:
226 incomplete = ''
227
228 completions = []
229 if start_of_option(incomplete):
230 # completions for partial options
231 for param in ctx.command.params:
232 if isinstance(param, Option):
233 param_opts = [param_opt for param_opt in param.opts +
234 param.secondary_opts if param_opt not in all_args or param.multiple]
235 completions.extend(
236 [(o, param.help) for o in param_opts if o.startswith(incomplete)])
237 return completions
238 # completion for option values from user supplied values
239 for param in ctx.command.params:
240 if is_incomplete_option(all_args, param):
241 return get_user_autocompletions(ctx, all_args, incomplete, param)
242 # completion for argument values from user supplied values
243 for param in ctx.command.params:
244 if is_incomplete_argument(ctx.params, param):
245 return get_user_autocompletions(ctx, all_args, incomplete, param)
246
247 add_subcommand_completions(ctx, incomplete, completions)
248 # Sort before returning so that proper ordering can be enforced in custom types.
249 return sorted(completions)
250
251
252 def do_complete(cli, prog_name, include_descriptions):
253 cwords = split_arg_string(os.environ['COMP_WORDS'])
254 cword = int(os.environ['COMP_CWORD'])
255 args = cwords[1:cword]
256 try:
257 incomplete = cwords[cword]
258 except IndexError:
259 incomplete = ''
260
261 for item in get_choices(cli, prog_name, args, incomplete):
262 echo(item[0])
263 if include_descriptions:
264 # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present.
265 echo(item[1] if item[1] else '_')
266
267 return True
268
269
270 def bashcomplete(cli, prog_name, complete_var, complete_instr):
271 if complete_instr.startswith('source'):
272 shell = 'zsh' if complete_instr == 'source_zsh' else 'bash'
273 echo(get_completion_script(prog_name, complete_var, shell))
274 return True
275 elif complete_instr == 'complete' or complete_instr == 'complete_zsh':
276 return do_complete(cli, prog_name, complete_instr == 'complete_zsh')
277 return False
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/click/_bashcomplete.py b/click/_bashcomplete.py
--- a/click/_bashcomplete.py
+++ b/click/_bashcomplete.py
@@ -23,7 +23,8 @@
%(complete_func)setup() {
local COMPLETION_OPTIONS=""
local BASH_VERSION_ARR=(${BASH_VERSION//./ })
- if [ ${BASH_VERSION_ARR[0]} -ge 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ];then
+ # Only BASH version 4.4 and later have the nosort option.
+ if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then
COMPLETION_OPTIONS="-o nosort"
fi
@@ -176,7 +177,7 @@
if isinstance(cmd_param.type, Choice):
# Choices don't support descriptions.
results = [(c, None)
- for c in cmd_param.type.choices if c.startswith(incomplete)]
+ for c in cmd_param.type.choices if str(c).startswith(incomplete)]
elif cmd_param.autocompletion is not None:
dynamic_completions = cmd_param.autocompletion(ctx=ctx,
args=args,
@@ -186,20 +187,32 @@
return results
+def get_visible_commands_starting_with(ctx, starts_with):
+ """
+ :param ctx: context associated with the parsed command
+ :starts_with: string that visible commands must start with.
+ :return: all visible (not hidden) commands that start with starts_with.
+ """
+ for c in ctx.command.list_commands(ctx):
+ if c.startswith(starts_with):
+ command = ctx.command.get_command(ctx, c)
+ if not command.hidden:
+ yield command
+
+
def add_subcommand_completions(ctx, incomplete, completions_out):
# Add subcommand completions.
if isinstance(ctx.command, MultiCommand):
completions_out.extend(
- [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in ctx.command.list_commands(ctx) if c.startswith(incomplete)])
+ [(c.name, c.get_short_help_str()) for c in get_visible_commands_starting_with(ctx, incomplete)])
# Walk up the context list and add any other completion possibilities from chained commands
while ctx.parent is not None:
ctx = ctx.parent
if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
- remaining_commands = sorted(
- set(ctx.command.list_commands(ctx)) - set(ctx.protected_args))
- completions_out.extend(
- [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in remaining_commands if c.startswith(incomplete)])
+ remaining_commands = [c for c in get_visible_commands_starting_with(ctx, incomplete)
+ if c.name not in ctx.protected_args]
+ completions_out.extend([(c.name, c.get_short_help_str()) for c in remaining_commands])
def get_choices(cli, prog_name, args, incomplete):
@@ -229,11 +242,10 @@
if start_of_option(incomplete):
# completions for partial options
for param in ctx.command.params:
- if isinstance(param, Option):
+ if isinstance(param, Option) and not param.hidden:
param_opts = [param_opt for param_opt in param.opts +
param.secondary_opts if param_opt not in all_args or param.multiple]
- completions.extend(
- [(o, param.help) for o in param_opts if o.startswith(incomplete)])
+ completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)])
return completions
# completion for option values from user supplied values
for param in ctx.command.params:
|
{"golden_diff": "diff --git a/click/_bashcomplete.py b/click/_bashcomplete.py\n--- a/click/_bashcomplete.py\n+++ b/click/_bashcomplete.py\n@@ -23,7 +23,8 @@\n %(complete_func)setup() {\n local COMPLETION_OPTIONS=\"\"\n local BASH_VERSION_ARR=(${BASH_VERSION//./ })\n- if [ ${BASH_VERSION_ARR[0]} -ge 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ];then\n+ # Only BASH version 4.4 and later have the nosort option.\n+ if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then\n COMPLETION_OPTIONS=\"-o nosort\"\n fi\n \n@@ -176,7 +177,7 @@\n if isinstance(cmd_param.type, Choice):\n # Choices don't support descriptions.\n results = [(c, None)\n- for c in cmd_param.type.choices if c.startswith(incomplete)]\n+ for c in cmd_param.type.choices if str(c).startswith(incomplete)]\n elif cmd_param.autocompletion is not None:\n dynamic_completions = cmd_param.autocompletion(ctx=ctx,\n args=args,\n@@ -186,20 +187,32 @@\n return results\n \n \n+def get_visible_commands_starting_with(ctx, starts_with):\n+ \"\"\"\n+ :param ctx: context associated with the parsed command\n+ :starts_with: string that visible commands must start with.\n+ :return: all visible (not hidden) commands that start with starts_with.\n+ \"\"\"\n+ for c in ctx.command.list_commands(ctx):\n+ if c.startswith(starts_with):\n+ command = ctx.command.get_command(ctx, c)\n+ if not command.hidden:\n+ yield command\n+\n+\n def add_subcommand_completions(ctx, incomplete, completions_out):\n # Add subcommand completions.\n if isinstance(ctx.command, MultiCommand):\n completions_out.extend(\n- [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in ctx.command.list_commands(ctx) if c.startswith(incomplete)])\n+ [(c.name, c.get_short_help_str()) for c in get_visible_commands_starting_with(ctx, incomplete)])\n \n # Walk up the context list and add any other completion possibilities from chained commands\n while ctx.parent is not None:\n ctx = ctx.parent\n if isinstance(ctx.command, MultiCommand) and ctx.command.chain:\n- remaining_commands = sorted(\n- set(ctx.command.list_commands(ctx)) - set(ctx.protected_args))\n- completions_out.extend(\n- [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in remaining_commands if c.startswith(incomplete)])\n+ remaining_commands = [c for c in get_visible_commands_starting_with(ctx, incomplete)\n+ if c.name not in ctx.protected_args]\n+ completions_out.extend([(c.name, c.get_short_help_str()) for c in remaining_commands])\n \n \n def get_choices(cli, prog_name, args, incomplete):\n@@ -229,11 +242,10 @@\n if start_of_option(incomplete):\n # completions for partial options\n for param in ctx.command.params:\n- if isinstance(param, Option):\n+ if isinstance(param, Option) and not param.hidden:\n param_opts = [param_opt for param_opt in param.opts +\n param.secondary_opts if param_opt not in all_args or param.multiple]\n- completions.extend(\n- [(o, param.help) for o in param_opts if o.startswith(incomplete)])\n+ completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)])\n return completions\n # completion for option values from user supplied values\n for param in ctx.command.params:\n", "issue": "New upcoming hidden option in Click.Option is not hidden from bash completion\nThanks for wonderful package.\r\n\r\nI don't know whether this is intended behavior or not but I was just trying out new upcoming release 7.0 for hidden option and autocompletion to Click.Option and hidden option hides option from help message but it shows up in bash completion.\r\nHowever it will be good if we can hide it from bash completion also, so that hidden option is actually hidden from everywhere.\n", "before_files": [{"content": "import collections\nimport copy\nimport os\nimport re\n\nfrom .utils import echo\nfrom .parser import split_arg_string\nfrom .core import MultiCommand, Option, Argument\nfrom .types import Choice\n\nWORDBREAK = '='\n\n# Note, only BASH version 4.4 and later have the nosort option.\nCOMPLETION_SCRIPT_BASH = '''\n%(complete_func)s() {\n local IFS=$'\\n'\n COMPREPLY=( $( env COMP_WORDS=\"${COMP_WORDS[*]}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n %(autocomplete_var)s=complete $1 ) )\n return 0\n}\n\n%(complete_func)setup() {\n local COMPLETION_OPTIONS=\"\"\n local BASH_VERSION_ARR=(${BASH_VERSION//./ })\n if [ ${BASH_VERSION_ARR[0]} -ge 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ];then\n COMPLETION_OPTIONS=\"-o nosort\"\n fi\n\n complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s\n}\n\n%(complete_func)setup\n'''\n\nCOMPLETION_SCRIPT_ZSH = '''\n%(complete_func)s() {\n local -a completions\n local -a completions_with_descriptions\n local -a response\n response=(\"${(@f)$( env COMP_WORDS=\\\"${words[*]}\\\" \\\\\n COMP_CWORD=$((CURRENT-1)) \\\\\n %(autocomplete_var)s=\\\"complete_zsh\\\" \\\\\n %(script_names)s )}\")\n\n for key descr in ${(kv)response}; do\n if [[ \"$descr\" == \"_\" ]]; then\n completions+=(\"$key\")\n else\n completions_with_descriptions+=(\"$key\":\"$descr\")\n fi\n done\n\n if [ -n \"$completions_with_descriptions\" ]; then\n _describe -V unsorted completions_with_descriptions -U -Q\n fi\n\n if [ -n \"$completions\" ]; then\n compadd -U -V unsorted -Q -a completions\n fi\n compstate[insert]=\"automenu\"\n}\n\ncompdef %(complete_func)s %(script_names)s\n'''\n\n_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')\n\n\ndef get_completion_script(prog_name, complete_var, shell):\n cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))\n script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH\n return (script % {\n 'complete_func': '_%s_completion' % cf_name,\n 'script_names': prog_name,\n 'autocomplete_var': complete_var,\n }).strip() + ';'\n\n\ndef resolve_ctx(cli, prog_name, args):\n \"\"\"\n Parse into a hierarchy of contexts. Contexts are connected through the parent variable.\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :return: the final context/command parsed\n \"\"\"\n ctx = cli.make_context(prog_name, args, resilient_parsing=True)\n args = ctx.protected_args + ctx.args\n while args:\n if isinstance(ctx.command, MultiCommand):\n if not ctx.command.chain:\n cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)\n if cmd is None:\n return ctx\n ctx = cmd.make_context(cmd_name, args, parent=ctx,\n resilient_parsing=True)\n args = ctx.protected_args + ctx.args\n else:\n # Walk chained subcommand contexts saving the last one.\n while args:\n cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)\n if cmd is None:\n return ctx\n sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,\n allow_extra_args=True,\n allow_interspersed_args=False,\n resilient_parsing=True)\n args = sub_ctx.args\n ctx = sub_ctx\n args = sub_ctx.protected_args + sub_ctx.args\n else:\n break\n return ctx\n\n\ndef start_of_option(param_str):\n \"\"\"\n :param param_str: param_str to check\n :return: whether or not this is the start of an option declaration (i.e. starts \"-\" or \"--\")\n \"\"\"\n return param_str and param_str[:1] == '-'\n\n\ndef is_incomplete_option(all_args, cmd_param):\n \"\"\"\n :param all_args: the full original list of args supplied\n :param cmd_param: the current command paramter\n :return: whether or not the last option declaration (i.e. starts \"-\" or \"--\") is incomplete and\n corresponds to this cmd_param. In other words whether this cmd_param option can still accept\n values\n \"\"\"\n if not isinstance(cmd_param, Option):\n return False\n if cmd_param.is_flag:\n return False\n last_option = None\n for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):\n if index + 1 > cmd_param.nargs:\n break\n if start_of_option(arg_str):\n last_option = arg_str\n\n return True if last_option and last_option in cmd_param.opts else False\n\n\ndef is_incomplete_argument(current_params, cmd_param):\n \"\"\"\n :param current_params: the current params and values for this argument as already entered\n :param cmd_param: the current command parameter\n :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In\n other words whether or not the this cmd_param argument can still accept values\n \"\"\"\n if not isinstance(cmd_param, Argument):\n return False\n current_param_values = current_params[cmd_param.name]\n if current_param_values is None:\n return True\n if cmd_param.nargs == -1:\n return True\n if isinstance(current_param_values, collections.Iterable) \\\n and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:\n return True\n return False\n\n\ndef get_user_autocompletions(ctx, args, incomplete, cmd_param):\n \"\"\"\n :param ctx: context associated with the parsed command\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :param cmd_param: command definition\n :return: all the possible user-specified completions for the param\n \"\"\"\n results = []\n if isinstance(cmd_param.type, Choice):\n # Choices don't support descriptions.\n results = [(c, None)\n for c in cmd_param.type.choices if c.startswith(incomplete)]\n elif cmd_param.autocompletion is not None:\n dynamic_completions = cmd_param.autocompletion(ctx=ctx,\n args=args,\n incomplete=incomplete)\n results = [c if isinstance(c, tuple) else (c, None)\n for c in dynamic_completions]\n return results\n\n\ndef add_subcommand_completions(ctx, incomplete, completions_out):\n # Add subcommand completions.\n if isinstance(ctx.command, MultiCommand):\n completions_out.extend(\n [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in ctx.command.list_commands(ctx) if c.startswith(incomplete)])\n\n # Walk up the context list and add any other completion possibilities from chained commands\n while ctx.parent is not None:\n ctx = ctx.parent\n if isinstance(ctx.command, MultiCommand) and ctx.command.chain:\n remaining_commands = sorted(\n set(ctx.command.list_commands(ctx)) - set(ctx.protected_args))\n completions_out.extend(\n [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in remaining_commands if c.startswith(incomplete)])\n\n\ndef get_choices(cli, prog_name, args, incomplete):\n \"\"\"\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :return: all the possible completions for the incomplete\n \"\"\"\n all_args = copy.deepcopy(args)\n\n ctx = resolve_ctx(cli, prog_name, args)\n if ctx is None:\n return []\n\n # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse\n # without the '='\n if start_of_option(incomplete) and WORDBREAK in incomplete:\n partition_incomplete = incomplete.partition(WORDBREAK)\n all_args.append(partition_incomplete[0])\n incomplete = partition_incomplete[2]\n elif incomplete == WORDBREAK:\n incomplete = ''\n\n completions = []\n if start_of_option(incomplete):\n # completions for partial options\n for param in ctx.command.params:\n if isinstance(param, Option):\n param_opts = [param_opt for param_opt in param.opts +\n param.secondary_opts if param_opt not in all_args or param.multiple]\n completions.extend(\n [(o, param.help) for o in param_opts if o.startswith(incomplete)])\n return completions\n # completion for option values from user supplied values\n for param in ctx.command.params:\n if is_incomplete_option(all_args, param):\n return get_user_autocompletions(ctx, all_args, incomplete, param)\n # completion for argument values from user supplied values\n for param in ctx.command.params:\n if is_incomplete_argument(ctx.params, param):\n return get_user_autocompletions(ctx, all_args, incomplete, param)\n\n add_subcommand_completions(ctx, incomplete, completions)\n # Sort before returning so that proper ordering can be enforced in custom types.\n return sorted(completions)\n\n\ndef do_complete(cli, prog_name, include_descriptions):\n cwords = split_arg_string(os.environ['COMP_WORDS'])\n cword = int(os.environ['COMP_CWORD'])\n args = cwords[1:cword]\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = ''\n\n for item in get_choices(cli, prog_name, args, incomplete):\n echo(item[0])\n if include_descriptions:\n # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present.\n echo(item[1] if item[1] else '_')\n\n return True\n\n\ndef bashcomplete(cli, prog_name, complete_var, complete_instr):\n if complete_instr.startswith('source'):\n shell = 'zsh' if complete_instr == 'source_zsh' else 'bash'\n echo(get_completion_script(prog_name, complete_var, shell))\n return True\n elif complete_instr == 'complete' or complete_instr == 'complete_zsh':\n return do_complete(cli, prog_name, complete_instr == 'complete_zsh')\n return False\n", "path": "click/_bashcomplete.py"}], "after_files": [{"content": "import collections\nimport copy\nimport os\nimport re\n\nfrom .utils import echo\nfrom .parser import split_arg_string\nfrom .core import MultiCommand, Option, Argument\nfrom .types import Choice\n\nWORDBREAK = '='\n\n# Note, only BASH version 4.4 and later have the nosort option.\nCOMPLETION_SCRIPT_BASH = '''\n%(complete_func)s() {\n local IFS=$'\\n'\n COMPREPLY=( $( env COMP_WORDS=\"${COMP_WORDS[*]}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n %(autocomplete_var)s=complete $1 ) )\n return 0\n}\n\n%(complete_func)setup() {\n local COMPLETION_OPTIONS=\"\"\n local BASH_VERSION_ARR=(${BASH_VERSION//./ })\n # Only BASH version 4.4 and later have the nosort option.\n if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then\n COMPLETION_OPTIONS=\"-o nosort\"\n fi\n\n complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s\n}\n\n%(complete_func)setup\n'''\n\nCOMPLETION_SCRIPT_ZSH = '''\n%(complete_func)s() {\n local -a completions\n local -a completions_with_descriptions\n local -a response\n response=(\"${(@f)$( env COMP_WORDS=\\\"${words[*]}\\\" \\\\\n COMP_CWORD=$((CURRENT-1)) \\\\\n %(autocomplete_var)s=\\\"complete_zsh\\\" \\\\\n %(script_names)s )}\")\n\n for key descr in ${(kv)response}; do\n if [[ \"$descr\" == \"_\" ]]; then\n completions+=(\"$key\")\n else\n completions_with_descriptions+=(\"$key\":\"$descr\")\n fi\n done\n\n if [ -n \"$completions_with_descriptions\" ]; then\n _describe -V unsorted completions_with_descriptions -U -Q\n fi\n\n if [ -n \"$completions\" ]; then\n compadd -U -V unsorted -Q -a completions\n fi\n compstate[insert]=\"automenu\"\n}\n\ncompdef %(complete_func)s %(script_names)s\n'''\n\n_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')\n\n\ndef get_completion_script(prog_name, complete_var, shell):\n cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))\n script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH\n return (script % {\n 'complete_func': '_%s_completion' % cf_name,\n 'script_names': prog_name,\n 'autocomplete_var': complete_var,\n }).strip() + ';'\n\n\ndef resolve_ctx(cli, prog_name, args):\n \"\"\"\n Parse into a hierarchy of contexts. Contexts are connected through the parent variable.\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :return: the final context/command parsed\n \"\"\"\n ctx = cli.make_context(prog_name, args, resilient_parsing=True)\n args = ctx.protected_args + ctx.args\n while args:\n if isinstance(ctx.command, MultiCommand):\n if not ctx.command.chain:\n cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)\n if cmd is None:\n return ctx\n ctx = cmd.make_context(cmd_name, args, parent=ctx,\n resilient_parsing=True)\n args = ctx.protected_args + ctx.args\n else:\n # Walk chained subcommand contexts saving the last one.\n while args:\n cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)\n if cmd is None:\n return ctx\n sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,\n allow_extra_args=True,\n allow_interspersed_args=False,\n resilient_parsing=True)\n args = sub_ctx.args\n ctx = sub_ctx\n args = sub_ctx.protected_args + sub_ctx.args\n else:\n break\n return ctx\n\n\ndef start_of_option(param_str):\n \"\"\"\n :param param_str: param_str to check\n :return: whether or not this is the start of an option declaration (i.e. starts \"-\" or \"--\")\n \"\"\"\n return param_str and param_str[:1] == '-'\n\n\ndef is_incomplete_option(all_args, cmd_param):\n \"\"\"\n :param all_args: the full original list of args supplied\n :param cmd_param: the current command paramter\n :return: whether or not the last option declaration (i.e. starts \"-\" or \"--\") is incomplete and\n corresponds to this cmd_param. In other words whether this cmd_param option can still accept\n values\n \"\"\"\n if not isinstance(cmd_param, Option):\n return False\n if cmd_param.is_flag:\n return False\n last_option = None\n for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):\n if index + 1 > cmd_param.nargs:\n break\n if start_of_option(arg_str):\n last_option = arg_str\n\n return True if last_option and last_option in cmd_param.opts else False\n\n\ndef is_incomplete_argument(current_params, cmd_param):\n \"\"\"\n :param current_params: the current params and values for this argument as already entered\n :param cmd_param: the current command parameter\n :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In\n other words whether or not the this cmd_param argument can still accept values\n \"\"\"\n if not isinstance(cmd_param, Argument):\n return False\n current_param_values = current_params[cmd_param.name]\n if current_param_values is None:\n return True\n if cmd_param.nargs == -1:\n return True\n if isinstance(current_param_values, collections.Iterable) \\\n and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:\n return True\n return False\n\n\ndef get_user_autocompletions(ctx, args, incomplete, cmd_param):\n \"\"\"\n :param ctx: context associated with the parsed command\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :param cmd_param: command definition\n :return: all the possible user-specified completions for the param\n \"\"\"\n results = []\n if isinstance(cmd_param.type, Choice):\n # Choices don't support descriptions.\n results = [(c, None)\n for c in cmd_param.type.choices if str(c).startswith(incomplete)]\n elif cmd_param.autocompletion is not None:\n dynamic_completions = cmd_param.autocompletion(ctx=ctx,\n args=args,\n incomplete=incomplete)\n results = [c if isinstance(c, tuple) else (c, None)\n for c in dynamic_completions]\n return results\n\n\ndef get_visible_commands_starting_with(ctx, starts_with):\n \"\"\"\n :param ctx: context associated with the parsed command\n :starts_with: string that visible commands must start with.\n :return: all visible (not hidden) commands that start with starts_with.\n \"\"\"\n for c in ctx.command.list_commands(ctx):\n if c.startswith(starts_with):\n command = ctx.command.get_command(ctx, c)\n if not command.hidden:\n yield command\n\n\ndef add_subcommand_completions(ctx, incomplete, completions_out):\n # Add subcommand completions.\n if isinstance(ctx.command, MultiCommand):\n completions_out.extend(\n [(c.name, c.get_short_help_str()) for c in get_visible_commands_starting_with(ctx, incomplete)])\n\n # Walk up the context list and add any other completion possibilities from chained commands\n while ctx.parent is not None:\n ctx = ctx.parent\n if isinstance(ctx.command, MultiCommand) and ctx.command.chain:\n remaining_commands = [c for c in get_visible_commands_starting_with(ctx, incomplete)\n if c.name not in ctx.protected_args]\n completions_out.extend([(c.name, c.get_short_help_str()) for c in remaining_commands])\n\n\ndef get_choices(cli, prog_name, args, incomplete):\n \"\"\"\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :return: all the possible completions for the incomplete\n \"\"\"\n all_args = copy.deepcopy(args)\n\n ctx = resolve_ctx(cli, prog_name, args)\n if ctx is None:\n return []\n\n # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse\n # without the '='\n if start_of_option(incomplete) and WORDBREAK in incomplete:\n partition_incomplete = incomplete.partition(WORDBREAK)\n all_args.append(partition_incomplete[0])\n incomplete = partition_incomplete[2]\n elif incomplete == WORDBREAK:\n incomplete = ''\n\n completions = []\n if start_of_option(incomplete):\n # completions for partial options\n for param in ctx.command.params:\n if isinstance(param, Option) and not param.hidden:\n param_opts = [param_opt for param_opt in param.opts +\n param.secondary_opts if param_opt not in all_args or param.multiple]\n completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)])\n return completions\n # completion for option values from user supplied values\n for param in ctx.command.params:\n if is_incomplete_option(all_args, param):\n return get_user_autocompletions(ctx, all_args, incomplete, param)\n # completion for argument values from user supplied values\n for param in ctx.command.params:\n if is_incomplete_argument(ctx.params, param):\n return get_user_autocompletions(ctx, all_args, incomplete, param)\n\n add_subcommand_completions(ctx, incomplete, completions)\n # Sort before returning so that proper ordering can be enforced in custom types.\n return sorted(completions)\n\n\ndef do_complete(cli, prog_name, include_descriptions):\n cwords = split_arg_string(os.environ['COMP_WORDS'])\n cword = int(os.environ['COMP_CWORD'])\n args = cwords[1:cword]\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = ''\n\n for item in get_choices(cli, prog_name, args, incomplete):\n echo(item[0])\n if include_descriptions:\n # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present.\n echo(item[1] if item[1] else '_')\n\n return True\n\n\ndef bashcomplete(cli, prog_name, complete_var, complete_instr):\n if complete_instr.startswith('source'):\n shell = 'zsh' if complete_instr == 'source_zsh' else 'bash'\n echo(get_completion_script(prog_name, complete_var, shell))\n return True\n elif complete_instr == 'complete' or complete_instr == 'complete_zsh':\n return do_complete(cli, prog_name, complete_instr == 'complete_zsh')\n return False\n", "path": "click/_bashcomplete.py"}]}
| 3,410 | 847 |
gh_patches_debug_16154
|
rasdani/github-patches
|
git_diff
|
bornhack__bornhack-website-378
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
backoffice: show quantity of products ordered after scanning qr code
backoffice: show quantity of products ordered after scanning qr code
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/tickets/models.py`
Content:
```
1 import io
2 import hashlib
3 import base64
4 import qrcode
5 from django.conf import settings
6 from django.urls import reverse_lazy
7 from django.utils.translation import ugettext_lazy as _
8
9 from shop.models import OrderProductRelation
10 from utils.models import UUIDModel, CampRelatedModel
11 from utils.pdf import generate_pdf_letter
12 from django.db import models
13 import logging
14
15 logger = logging.getLogger("bornhack.%s" % __name__)
16
17
18 # TicketType can be full week, one day. etc.
19 class TicketType(CampRelatedModel, UUIDModel):
20 name = models.TextField()
21 camp = models.ForeignKey("camps.Camp", on_delete=models.PROTECT)
22 includes_badge = models.BooleanField(default=False)
23 single_ticket_per_product = models.BooleanField(
24 default=False,
25 help_text=(
26 "Only create one ticket for a product/order pair no matter the quantity. "
27 "Useful for products which are bought in larger quantity (ie. village chairs)"
28 ),
29 )
30
31 def __str__(self):
32 return "{} ({})".format(self.name, self.camp.title)
33
34
35 def create_ticket_token(string):
36 return hashlib.sha256(string).hexdigest()
37
38
39 def qr_code_base64(token):
40 qr = qrcode.make(
41 token, version=1, error_correction=qrcode.constants.ERROR_CORRECT_H
42 ).resize((250, 250))
43 file_like = io.BytesIO()
44 qr.save(file_like, format="png")
45 qrcode_base64 = base64.b64encode(file_like.getvalue())
46 return qrcode_base64
47
48
49 class BaseTicket(CampRelatedModel, UUIDModel):
50 ticket_type = models.ForeignKey("TicketType", on_delete=models.PROTECT)
51 used = models.BooleanField(default=False)
52 badge_handed_out = models.BooleanField(default=False)
53 token = models.CharField(max_length=64, blank=True)
54 badge_token = models.CharField(max_length=64, blank=True)
55
56 class Meta:
57 abstract = True
58
59 @property
60 def camp(self):
61 return self.ticket_type.camp
62
63 def save(self, **kwargs):
64 self.token = self._get_token()
65 self.badge_token = self._get_badge_token()
66 super().save(**kwargs)
67
68 def _get_token(self):
69 return create_ticket_token(
70 "{_id}{secret_key}".format(
71 _id=self.uuid, secret_key=settings.SECRET_KEY
72 ).encode("utf-8")
73 )
74
75 def _get_badge_token(self):
76 return create_ticket_token(
77 "{_id}{secret_key}-badge".format(
78 _id=self.uuid, secret_key=settings.SECRET_KEY
79 ).encode("utf-8")
80 )
81
82 def get_qr_code_url(self):
83 return "data:image/png;base64,{}".format(
84 qr_code_base64(self._get_token()).decode("utf-8")
85 )
86
87 def get_qr_badge_code_url(self):
88 return "data:image/png;base64,{}".format(
89 qr_code_base64(self._get_badge_token()).decode("utf-8")
90 )
91
92 def generate_pdf(self):
93 formatdict = {"ticket": self}
94
95 if self.ticket_type.single_ticket_per_product and self.shortname == "shop":
96 orp = self.get_orp()
97 formatdict["quantity"] = orp.quantity
98
99 return generate_pdf_letter(
100 filename="{}_ticket_{}.pdf".format(self.shortname, self.pk),
101 formatdict=formatdict,
102 template="pdf/ticket.html",
103 )
104
105
106 class SponsorTicket(BaseTicket):
107 sponsor = models.ForeignKey("sponsors.Sponsor", on_delete=models.PROTECT)
108
109 def __str__(self):
110 return "SponsorTicket: {}".format(self.pk)
111
112 @property
113 def shortname(self):
114 return "sponsor"
115
116
117 class DiscountTicket(BaseTicket):
118 price = models.IntegerField(
119 help_text=_("Price of the discounted ticket (in DKK, including VAT).")
120 )
121
122 def __str__(self):
123 return "DiscountTicket: {}".format(self.pk)
124
125 @property
126 def shortname(self):
127 return "discount"
128
129
130 class ShopTicket(BaseTicket):
131 order = models.ForeignKey(
132 "shop.Order", related_name="shoptickets", on_delete=models.PROTECT
133 )
134 product = models.ForeignKey("shop.Product", on_delete=models.PROTECT)
135
136 name = models.CharField(
137 max_length=100,
138 help_text=(
139 "Name of the person this ticket belongs to. "
140 "This can be different from the buying user."
141 ),
142 null=True,
143 blank=True,
144 )
145
146 email = models.EmailField(null=True, blank=True)
147
148 # overwrite the _get_token method because old tickets use the user_id
149 def _get_token(self):
150 return hashlib.sha256(
151 "{_id}{user_id}{secret_key}".format(
152 _id=self.pk, user_id=self.order.user.pk, secret_key=settings.SECRET_KEY
153 ).encode("utf-8")
154 ).hexdigest()
155
156 def __str__(self):
157 return "Ticket {user} {product}".format(
158 user=self.order.user, product=self.product
159 )
160
161 def get_absolute_url(self):
162 return str(reverse_lazy("tickets:shopticket_edit", kwargs={"pk": self.pk}))
163
164 @property
165 def shortname(self):
166 return "shop"
167
168 def get_orp(self):
169 return OrderProductRelation.objects.get(product=self.product, order=self.order)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/tickets/models.py b/src/tickets/models.py
--- a/src/tickets/models.py
+++ b/src/tickets/models.py
@@ -93,8 +93,7 @@
formatdict = {"ticket": self}
if self.ticket_type.single_ticket_per_product and self.shortname == "shop":
- orp = self.get_orp()
- formatdict["quantity"] = orp.quantity
+ formatdict["quantity"] = self.orp.quantity
return generate_pdf_letter(
filename="{}_ticket_{}.pdf".format(self.shortname, self.pk),
@@ -165,5 +164,6 @@
def shortname(self):
return "shop"
- def get_orp(self):
+ @property
+ def orp(self):
return OrderProductRelation.objects.get(product=self.product, order=self.order)
|
{"golden_diff": "diff --git a/src/tickets/models.py b/src/tickets/models.py\n--- a/src/tickets/models.py\n+++ b/src/tickets/models.py\n@@ -93,8 +93,7 @@\n formatdict = {\"ticket\": self}\n \n if self.ticket_type.single_ticket_per_product and self.shortname == \"shop\":\n- orp = self.get_orp()\n- formatdict[\"quantity\"] = orp.quantity\n+ formatdict[\"quantity\"] = self.orp.quantity\n \n return generate_pdf_letter(\n filename=\"{}_ticket_{}.pdf\".format(self.shortname, self.pk),\n@@ -165,5 +164,6 @@\n def shortname(self):\n return \"shop\"\n \n- def get_orp(self):\n+ @property\n+ def orp(self):\n return OrderProductRelation.objects.get(product=self.product, order=self.order)\n", "issue": "backoffice: show quantity of products ordered after scanning qr code\n\nbackoffice: show quantity of products ordered after scanning qr code\n\n", "before_files": [{"content": "import io\nimport hashlib\nimport base64\nimport qrcode\nfrom django.conf import settings\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shop.models import OrderProductRelation\nfrom utils.models import UUIDModel, CampRelatedModel\nfrom utils.pdf import generate_pdf_letter\nfrom django.db import models\nimport logging\n\nlogger = logging.getLogger(\"bornhack.%s\" % __name__)\n\n\n# TicketType can be full week, one day. etc.\nclass TicketType(CampRelatedModel, UUIDModel):\n name = models.TextField()\n camp = models.ForeignKey(\"camps.Camp\", on_delete=models.PROTECT)\n includes_badge = models.BooleanField(default=False)\n single_ticket_per_product = models.BooleanField(\n default=False,\n help_text=(\n \"Only create one ticket for a product/order pair no matter the quantity. \"\n \"Useful for products which are bought in larger quantity (ie. village chairs)\"\n ),\n )\n\n def __str__(self):\n return \"{} ({})\".format(self.name, self.camp.title)\n\n\ndef create_ticket_token(string):\n return hashlib.sha256(string).hexdigest()\n\n\ndef qr_code_base64(token):\n qr = qrcode.make(\n token, version=1, error_correction=qrcode.constants.ERROR_CORRECT_H\n ).resize((250, 250))\n file_like = io.BytesIO()\n qr.save(file_like, format=\"png\")\n qrcode_base64 = base64.b64encode(file_like.getvalue())\n return qrcode_base64\n\n\nclass BaseTicket(CampRelatedModel, UUIDModel):\n ticket_type = models.ForeignKey(\"TicketType\", on_delete=models.PROTECT)\n used = models.BooleanField(default=False)\n badge_handed_out = models.BooleanField(default=False)\n token = models.CharField(max_length=64, blank=True)\n badge_token = models.CharField(max_length=64, blank=True)\n\n class Meta:\n abstract = True\n\n @property\n def camp(self):\n return self.ticket_type.camp\n\n def save(self, **kwargs):\n self.token = self._get_token()\n self.badge_token = self._get_badge_token()\n super().save(**kwargs)\n\n def _get_token(self):\n return create_ticket_token(\n \"{_id}{secret_key}\".format(\n _id=self.uuid, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n )\n\n def _get_badge_token(self):\n return create_ticket_token(\n \"{_id}{secret_key}-badge\".format(\n _id=self.uuid, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n )\n\n def get_qr_code_url(self):\n return \"data:image/png;base64,{}\".format(\n qr_code_base64(self._get_token()).decode(\"utf-8\")\n )\n\n def get_qr_badge_code_url(self):\n return \"data:image/png;base64,{}\".format(\n qr_code_base64(self._get_badge_token()).decode(\"utf-8\")\n )\n\n def generate_pdf(self):\n formatdict = {\"ticket\": self}\n\n if self.ticket_type.single_ticket_per_product and self.shortname == \"shop\":\n orp = self.get_orp()\n formatdict[\"quantity\"] = orp.quantity\n\n return generate_pdf_letter(\n filename=\"{}_ticket_{}.pdf\".format(self.shortname, self.pk),\n formatdict=formatdict,\n template=\"pdf/ticket.html\",\n )\n\n\nclass SponsorTicket(BaseTicket):\n sponsor = models.ForeignKey(\"sponsors.Sponsor\", on_delete=models.PROTECT)\n\n def __str__(self):\n return \"SponsorTicket: {}\".format(self.pk)\n\n @property\n def shortname(self):\n return \"sponsor\"\n\n\nclass DiscountTicket(BaseTicket):\n price = models.IntegerField(\n help_text=_(\"Price of the discounted ticket (in DKK, including VAT).\")\n )\n\n def __str__(self):\n return \"DiscountTicket: {}\".format(self.pk)\n\n @property\n def shortname(self):\n return \"discount\"\n\n\nclass ShopTicket(BaseTicket):\n order = models.ForeignKey(\n \"shop.Order\", related_name=\"shoptickets\", on_delete=models.PROTECT\n )\n product = models.ForeignKey(\"shop.Product\", on_delete=models.PROTECT)\n\n name = models.CharField(\n max_length=100,\n help_text=(\n \"Name of the person this ticket belongs to. \"\n \"This can be different from the buying user.\"\n ),\n null=True,\n blank=True,\n )\n\n email = models.EmailField(null=True, blank=True)\n\n # overwrite the _get_token method because old tickets use the user_id\n def _get_token(self):\n return hashlib.sha256(\n \"{_id}{user_id}{secret_key}\".format(\n _id=self.pk, user_id=self.order.user.pk, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n ).hexdigest()\n\n def __str__(self):\n return \"Ticket {user} {product}\".format(\n user=self.order.user, product=self.product\n )\n\n def get_absolute_url(self):\n return str(reverse_lazy(\"tickets:shopticket_edit\", kwargs={\"pk\": self.pk}))\n\n @property\n def shortname(self):\n return \"shop\"\n\n def get_orp(self):\n return OrderProductRelation.objects.get(product=self.product, order=self.order)\n", "path": "src/tickets/models.py"}], "after_files": [{"content": "import io\nimport hashlib\nimport base64\nimport qrcode\nfrom django.conf import settings\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shop.models import OrderProductRelation\nfrom utils.models import UUIDModel, CampRelatedModel\nfrom utils.pdf import generate_pdf_letter\nfrom django.db import models\nimport logging\n\nlogger = logging.getLogger(\"bornhack.%s\" % __name__)\n\n\n# TicketType can be full week, one day. etc.\nclass TicketType(CampRelatedModel, UUIDModel):\n name = models.TextField()\n camp = models.ForeignKey(\"camps.Camp\", on_delete=models.PROTECT)\n includes_badge = models.BooleanField(default=False)\n single_ticket_per_product = models.BooleanField(\n default=False,\n help_text=(\n \"Only create one ticket for a product/order pair no matter the quantity. \"\n \"Useful for products which are bought in larger quantity (ie. village chairs)\"\n ),\n )\n\n def __str__(self):\n return \"{} ({})\".format(self.name, self.camp.title)\n\n\ndef create_ticket_token(string):\n return hashlib.sha256(string).hexdigest()\n\n\ndef qr_code_base64(token):\n qr = qrcode.make(\n token, version=1, error_correction=qrcode.constants.ERROR_CORRECT_H\n ).resize((250, 250))\n file_like = io.BytesIO()\n qr.save(file_like, format=\"png\")\n qrcode_base64 = base64.b64encode(file_like.getvalue())\n return qrcode_base64\n\n\nclass BaseTicket(CampRelatedModel, UUIDModel):\n ticket_type = models.ForeignKey(\"TicketType\", on_delete=models.PROTECT)\n used = models.BooleanField(default=False)\n badge_handed_out = models.BooleanField(default=False)\n token = models.CharField(max_length=64, blank=True)\n badge_token = models.CharField(max_length=64, blank=True)\n\n class Meta:\n abstract = True\n\n @property\n def camp(self):\n return self.ticket_type.camp\n\n def save(self, **kwargs):\n self.token = self._get_token()\n self.badge_token = self._get_badge_token()\n super().save(**kwargs)\n\n def _get_token(self):\n return create_ticket_token(\n \"{_id}{secret_key}\".format(\n _id=self.uuid, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n )\n\n def _get_badge_token(self):\n return create_ticket_token(\n \"{_id}{secret_key}-badge\".format(\n _id=self.uuid, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n )\n\n def get_qr_code_url(self):\n return \"data:image/png;base64,{}\".format(\n qr_code_base64(self._get_token()).decode(\"utf-8\")\n )\n\n def get_qr_badge_code_url(self):\n return \"data:image/png;base64,{}\".format(\n qr_code_base64(self._get_badge_token()).decode(\"utf-8\")\n )\n\n def generate_pdf(self):\n formatdict = {\"ticket\": self}\n\n if self.ticket_type.single_ticket_per_product and self.shortname == \"shop\":\n formatdict[\"quantity\"] = self.orp.quantity\n\n return generate_pdf_letter(\n filename=\"{}_ticket_{}.pdf\".format(self.shortname, self.pk),\n formatdict=formatdict,\n template=\"pdf/ticket.html\",\n )\n\n\nclass SponsorTicket(BaseTicket):\n sponsor = models.ForeignKey(\"sponsors.Sponsor\", on_delete=models.PROTECT)\n\n def __str__(self):\n return \"SponsorTicket: {}\".format(self.pk)\n\n @property\n def shortname(self):\n return \"sponsor\"\n\n\nclass DiscountTicket(BaseTicket):\n price = models.IntegerField(\n help_text=_(\"Price of the discounted ticket (in DKK, including VAT).\")\n )\n\n def __str__(self):\n return \"DiscountTicket: {}\".format(self.pk)\n\n @property\n def shortname(self):\n return \"discount\"\n\n\nclass ShopTicket(BaseTicket):\n order = models.ForeignKey(\n \"shop.Order\", related_name=\"shoptickets\", on_delete=models.PROTECT\n )\n product = models.ForeignKey(\"shop.Product\", on_delete=models.PROTECT)\n\n name = models.CharField(\n max_length=100,\n help_text=(\n \"Name of the person this ticket belongs to. \"\n \"This can be different from the buying user.\"\n ),\n null=True,\n blank=True,\n )\n\n email = models.EmailField(null=True, blank=True)\n\n # overwrite the _get_token method because old tickets use the user_id\n def _get_token(self):\n return hashlib.sha256(\n \"{_id}{user_id}{secret_key}\".format(\n _id=self.pk, user_id=self.order.user.pk, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n ).hexdigest()\n\n def __str__(self):\n return \"Ticket {user} {product}\".format(\n user=self.order.user, product=self.product\n )\n\n def get_absolute_url(self):\n return str(reverse_lazy(\"tickets:shopticket_edit\", kwargs={\"pk\": self.pk}))\n\n @property\n def shortname(self):\n return \"shop\"\n\n @property\n def orp(self):\n return OrderProductRelation.objects.get(product=self.product, order=self.order)\n", "path": "src/tickets/models.py"}]}
| 1,873 | 190 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.