problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_37491
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-3404
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- query string arrays are not fully displayed ##### Steps to reproduce the problem: 1. visit through mitmproxy/mitmdump e.g. http://example.com/?first=value&arr[]=foo+bar&arr[]=baz 2. Check the query parameters in the request 3. Notice that they contain more data than mitmproxy/mitmdump shows ##### Any other comments? What have you tried so far? The following script shows all the data: ``` #!/usr/bin/env python3 from urllib.parse import urlparse, parse_qs url = "http://example.com/?first=value&arr[]=foo+bar&arr[]=baz" parts = urlparse(url) print(parse_qs(parts.query)) ``` Output: `{'first': ['value'], 'arr[]': ['foo bar', 'baz']}` But mitmproxy/mitmdump only shows: ``` first: value arr[]: foo bar ``` ##### System information <!-- Paste the output of "mitmproxy --version" here. --> Mitmproxy: 3.0.4 Python: 3.5.2 OpenSSL: OpenSSL 1.0.2g 1 Mar 2016 Platform: Linux-4.10.0-42-generic-x86_64-with-Ubuntu-16.04-xenial <!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/contentviews/base.py` Content: ``` 1 # Default view cutoff *in lines* 2 import typing 3 4 KEY_MAX = 30 5 6 TTextType = typing.Union[str, bytes] # FIXME: This should be either bytes or str ultimately. 7 TViewLine = typing.List[typing.Tuple[str, TTextType]] 8 TViewResult = typing.Tuple[str, typing.Iterator[TViewLine]] 9 10 11 class View: 12 name: str = None 13 content_types: typing.List[str] = [] 14 15 def __call__(self, data: bytes, **metadata) -> TViewResult: 16 """ 17 Transform raw data into human-readable output. 18 19 Args: 20 data: the data to decode/format. 21 metadata: optional keyword-only arguments for metadata. Implementations must not 22 rely on a given argument being present. 23 24 Returns: 25 A (description, content generator) tuple. 26 27 The content generator yields lists of (style, text) tuples, where each list represents 28 a single line. ``text`` is a unfiltered byte string which may need to be escaped, 29 depending on the used output. 30 31 Caveats: 32 The content generator must not yield tuples of tuples, 33 because urwid cannot process that. You have to yield a *list* of tuples per line. 34 """ 35 raise NotImplementedError() # pragma: no cover 36 37 38 def format_dict( 39 d: typing.Mapping[TTextType, TTextType] 40 ) -> typing.Iterator[TViewLine]: 41 """ 42 Helper function that transforms the given dictionary into a list of 43 [ 44 ("key", key ) 45 ("value", value) 46 ] 47 entries, where key is padded to a uniform width. 48 """ 49 50 max_key_len = max((len(k) for k in d.keys()), default=0) 51 max_key_len = min((max_key_len, KEY_MAX), default=0) 52 for key, value in d.items(): 53 if isinstance(key, bytes): 54 key += b":" 55 else: 56 key += ":" 57 key = key.ljust(max_key_len + 2) 58 yield [ 59 ("header", key), 60 ("text", value) 61 ] 62 63 64 def format_text(text: TTextType) -> typing.Iterator[TViewLine]: 65 """ 66 Helper function that transforms bytes into the view output format. 67 """ 68 for line in text.splitlines(): 69 yield [("text", line)] 70 ``` Path: `mitmproxy/contentviews/urlencoded.py` Content: ``` 1 from mitmproxy.net.http import url 2 from mitmproxy.coretypes import multidict 3 from . import base 4 5 6 class ViewURLEncoded(base.View): 7 name = "URL-encoded" 8 content_types = ["application/x-www-form-urlencoded"] 9 10 def __call__(self, data, **metadata): 11 try: 12 data = data.decode("ascii", "strict") 13 except ValueError: 14 return None 15 d = url.decode(data) 16 return "URLEncoded form", base.format_dict(multidict.MultiDict(d)) 17 ``` Path: `mitmproxy/contentviews/query.py` Content: ``` 1 from typing import List # noqa 2 3 from . import base 4 5 6 class ViewQuery(base.View): 7 name = "Query" 8 9 def __call__(self, data, **metadata): 10 query = metadata.get("query") 11 if query: 12 return "Query", base.format_dict(query) 13 else: 14 return "Query", base.format_text("") 15 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/contentviews/base.py b/mitmproxy/contentviews/base.py --- a/mitmproxy/contentviews/base.py +++ b/mitmproxy/contentviews/base.py @@ -35,32 +35,52 @@ raise NotImplementedError() # pragma: no cover -def format_dict( - d: typing.Mapping[TTextType, TTextType] -) -> typing.Iterator[TViewLine]: +def format_pairs( + items: typing.Iterable[typing.Tuple[TTextType, TTextType]] +)-> typing.Iterator[TViewLine]: + """ - Helper function that transforms the given dictionary into a list of + Helper function that accepts a list of (k,v) pairs into a list of [ - ("key", key ) + ("key", key ) ("value", value) ] - entries, where key is padded to a uniform width. + where key is padded to a uniform width """ - max_key_len = max((len(k) for k in d.keys()), default=0) + max_key_len = max((len(k[0]) for k in items), default=0) max_key_len = min((max_key_len, KEY_MAX), default=0) - for key, value in d.items(): + + for key, value in items: if isinstance(key, bytes): + key += b":" else: key += ":" + key = key.ljust(max_key_len + 2) + yield [ ("header", key), ("text", value) ] +def format_dict( + d: typing.Mapping[TTextType, TTextType] +) -> typing.Iterator[TViewLine]: + """ + Helper function that transforms the given dictionary into a list of + [ + ("key", key ) + ("value", value) + ] + entries, where key is padded to a uniform width. + """ + + return format_pairs(d.items()) + + def format_text(text: TTextType) -> typing.Iterator[TViewLine]: """ Helper function that transforms bytes into the view output format. diff --git a/mitmproxy/contentviews/query.py b/mitmproxy/contentviews/query.py --- a/mitmproxy/contentviews/query.py +++ b/mitmproxy/contentviews/query.py @@ -9,6 +9,6 @@ def __call__(self, data, **metadata): query = metadata.get("query") if query: - return "Query", base.format_dict(query) + return "Query", base.format_pairs(query.items(multi=True)) else: return "Query", base.format_text("") diff --git a/mitmproxy/contentviews/urlencoded.py b/mitmproxy/contentviews/urlencoded.py --- a/mitmproxy/contentviews/urlencoded.py +++ b/mitmproxy/contentviews/urlencoded.py @@ -1,5 +1,4 @@ from mitmproxy.net.http import url -from mitmproxy.coretypes import multidict from . import base @@ -13,4 +12,4 @@ except ValueError: return None d = url.decode(data) - return "URLEncoded form", base.format_dict(multidict.MultiDict(d)) + return "URLEncoded form", base.format_pairs(d)
{"golden_diff": "diff --git a/mitmproxy/contentviews/base.py b/mitmproxy/contentviews/base.py\n--- a/mitmproxy/contentviews/base.py\n+++ b/mitmproxy/contentviews/base.py\n@@ -35,32 +35,52 @@\n raise NotImplementedError() # pragma: no cover\n \n \n-def format_dict(\n- d: typing.Mapping[TTextType, TTextType]\n-) -> typing.Iterator[TViewLine]:\n+def format_pairs(\n+ items: typing.Iterable[typing.Tuple[TTextType, TTextType]]\n+)-> typing.Iterator[TViewLine]:\n+\n \"\"\"\n- Helper function that transforms the given dictionary into a list of\n+ Helper function that accepts a list of (k,v) pairs into a list of\n [\n- (\"key\", key )\n+ (\"key\", key )\n (\"value\", value)\n ]\n- entries, where key is padded to a uniform width.\n+ where key is padded to a uniform width\n \"\"\"\n \n- max_key_len = max((len(k) for k in d.keys()), default=0)\n+ max_key_len = max((len(k[0]) for k in items), default=0)\n max_key_len = min((max_key_len, KEY_MAX), default=0)\n- for key, value in d.items():\n+\n+ for key, value in items:\n if isinstance(key, bytes):\n+\n key += b\":\"\n else:\n key += \":\"\n+\n key = key.ljust(max_key_len + 2)\n+\n yield [\n (\"header\", key),\n (\"text\", value)\n ]\n \n \n+def format_dict(\n+ d: typing.Mapping[TTextType, TTextType]\n+) -> typing.Iterator[TViewLine]:\n+ \"\"\"\n+ Helper function that transforms the given dictionary into a list of\n+ [\n+ (\"key\", key )\n+ (\"value\", value)\n+ ]\n+ entries, where key is padded to a uniform width.\n+ \"\"\"\n+\n+ return format_pairs(d.items())\n+\n+\n def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n \"\"\"\n Helper function that transforms bytes into the view output format.\ndiff --git a/mitmproxy/contentviews/query.py b/mitmproxy/contentviews/query.py\n--- a/mitmproxy/contentviews/query.py\n+++ b/mitmproxy/contentviews/query.py\n@@ -9,6 +9,6 @@\n def __call__(self, data, **metadata):\n query = metadata.get(\"query\")\n if query:\n- return \"Query\", base.format_dict(query)\n+ return \"Query\", base.format_pairs(query.items(multi=True))\n else:\n return \"Query\", base.format_text(\"\")\ndiff --git a/mitmproxy/contentviews/urlencoded.py b/mitmproxy/contentviews/urlencoded.py\n--- a/mitmproxy/contentviews/urlencoded.py\n+++ b/mitmproxy/contentviews/urlencoded.py\n@@ -1,5 +1,4 @@\n from mitmproxy.net.http import url\n-from mitmproxy.coretypes import multidict\n from . import base\n \n \n@@ -13,4 +12,4 @@\n except ValueError:\n return None\n d = url.decode(data)\n- return \"URLEncoded form\", base.format_dict(multidict.MultiDict(d))\n+ return \"URLEncoded form\", base.format_pairs(d)\n", "issue": "query string arrays are not fully displayed\n##### Steps to reproduce the problem:\r\n\r\n1. visit through mitmproxy/mitmdump e.g. http://example.com/?first=value&arr[]=foo+bar&arr[]=baz\r\n2. Check the query parameters in the request\r\n3. Notice that they contain more data than mitmproxy/mitmdump shows\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nThe following script shows all the data:\r\n\r\n```\r\n#!/usr/bin/env python3\r\n\r\nfrom urllib.parse import urlparse, parse_qs\r\n\r\nurl = \"http://example.com/?first=value&arr[]=foo+bar&arr[]=baz\"\r\nparts = urlparse(url)\r\nprint(parse_qs(parts.query))\r\n```\r\n\r\nOutput:\r\n`{'first': ['value'], 'arr[]': ['foo bar', 'baz']}`\r\n\r\nBut mitmproxy/mitmdump only shows:\r\n```\r\n first: value\r\n arr[]: foo bar\r\n```\r\n\r\n##### System information\r\n\r\n<!-- Paste the output of \"mitmproxy --version\" here. -->\r\n\r\nMitmproxy: 3.0.4\r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.0.2g 1 Mar 2016\r\nPlatform: Linux-4.10.0-42-generic-x86_64-with-Ubuntu-16.04-xenial\r\n\r\n<!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->\r\n\n", "before_files": [{"content": "# Default view cutoff *in lines*\nimport typing\n\nKEY_MAX = 30\n\nTTextType = typing.Union[str, bytes] # FIXME: This should be either bytes or str ultimately.\nTViewLine = typing.List[typing.Tuple[str, TTextType]]\nTViewResult = typing.Tuple[str, typing.Iterator[TViewLine]]\n\n\nclass View:\n name: str = None\n content_types: typing.List[str] = []\n\n def __call__(self, data: bytes, **metadata) -> TViewResult:\n \"\"\"\n Transform raw data into human-readable output.\n\n Args:\n data: the data to decode/format.\n metadata: optional keyword-only arguments for metadata. Implementations must not\n rely on a given argument being present.\n\n Returns:\n A (description, content generator) tuple.\n\n The content generator yields lists of (style, text) tuples, where each list represents\n a single line. ``text`` is a unfiltered byte string which may need to be escaped,\n depending on the used output.\n\n Caveats:\n The content generator must not yield tuples of tuples,\n because urwid cannot process that. You have to yield a *list* of tuples per line.\n \"\"\"\n raise NotImplementedError() # pragma: no cover\n\n\ndef format_dict(\n d: typing.Mapping[TTextType, TTextType]\n) -> typing.Iterator[TViewLine]:\n \"\"\"\n Helper function that transforms the given dictionary into a list of\n [\n (\"key\", key )\n (\"value\", value)\n ]\n entries, where key is padded to a uniform width.\n \"\"\"\n\n max_key_len = max((len(k) for k in d.keys()), default=0)\n max_key_len = min((max_key_len, KEY_MAX), default=0)\n for key, value in d.items():\n if isinstance(key, bytes):\n key += b\":\"\n else:\n key += \":\"\n key = key.ljust(max_key_len + 2)\n yield [\n (\"header\", key),\n (\"text\", value)\n ]\n\n\ndef format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n \"\"\"\n Helper function that transforms bytes into the view output format.\n \"\"\"\n for line in text.splitlines():\n yield [(\"text\", line)]\n", "path": "mitmproxy/contentviews/base.py"}, {"content": "from mitmproxy.net.http import url\nfrom mitmproxy.coretypes import multidict\nfrom . import base\n\n\nclass ViewURLEncoded(base.View):\n name = \"URL-encoded\"\n content_types = [\"application/x-www-form-urlencoded\"]\n\n def __call__(self, data, **metadata):\n try:\n data = data.decode(\"ascii\", \"strict\")\n except ValueError:\n return None\n d = url.decode(data)\n return \"URLEncoded form\", base.format_dict(multidict.MultiDict(d))\n", "path": "mitmproxy/contentviews/urlencoded.py"}, {"content": "from typing import List # noqa\n\nfrom . import base\n\n\nclass ViewQuery(base.View):\n name = \"Query\"\n\n def __call__(self, data, **metadata):\n query = metadata.get(\"query\")\n if query:\n return \"Query\", base.format_dict(query)\n else:\n return \"Query\", base.format_text(\"\")\n", "path": "mitmproxy/contentviews/query.py"}], "after_files": [{"content": "# Default view cutoff *in lines*\nimport typing\n\nKEY_MAX = 30\n\nTTextType = typing.Union[str, bytes] # FIXME: This should be either bytes or str ultimately.\nTViewLine = typing.List[typing.Tuple[str, TTextType]]\nTViewResult = typing.Tuple[str, typing.Iterator[TViewLine]]\n\n\nclass View:\n name: str = None\n content_types: typing.List[str] = []\n\n def __call__(self, data: bytes, **metadata) -> TViewResult:\n \"\"\"\n Transform raw data into human-readable output.\n\n Args:\n data: the data to decode/format.\n metadata: optional keyword-only arguments for metadata. Implementations must not\n rely on a given argument being present.\n\n Returns:\n A (description, content generator) tuple.\n\n The content generator yields lists of (style, text) tuples, where each list represents\n a single line. ``text`` is a unfiltered byte string which may need to be escaped,\n depending on the used output.\n\n Caveats:\n The content generator must not yield tuples of tuples,\n because urwid cannot process that. You have to yield a *list* of tuples per line.\n \"\"\"\n raise NotImplementedError() # pragma: no cover\n\n\ndef format_pairs(\n items: typing.Iterable[typing.Tuple[TTextType, TTextType]]\n)-> typing.Iterator[TViewLine]:\n\n \"\"\"\n Helper function that accepts a list of (k,v) pairs into a list of\n [\n (\"key\", key )\n (\"value\", value)\n ]\n where key is padded to a uniform width\n \"\"\"\n\n max_key_len = max((len(k[0]) for k in items), default=0)\n max_key_len = min((max_key_len, KEY_MAX), default=0)\n\n for key, value in items:\n if isinstance(key, bytes):\n\n key += b\":\"\n else:\n key += \":\"\n\n key = key.ljust(max_key_len + 2)\n\n yield [\n (\"header\", key),\n (\"text\", value)\n ]\n\n\ndef format_dict(\n d: typing.Mapping[TTextType, TTextType]\n) -> typing.Iterator[TViewLine]:\n \"\"\"\n Helper function that transforms the given dictionary into a list of\n [\n (\"key\", key )\n (\"value\", value)\n ]\n entries, where key is padded to a uniform width.\n \"\"\"\n\n return format_pairs(d.items())\n\n\ndef format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n \"\"\"\n Helper function that transforms bytes into the view output format.\n \"\"\"\n for line in text.splitlines():\n yield [(\"text\", line)]\n", "path": "mitmproxy/contentviews/base.py"}, {"content": "from mitmproxy.net.http import url\nfrom . import base\n\n\nclass ViewURLEncoded(base.View):\n name = \"URL-encoded\"\n content_types = [\"application/x-www-form-urlencoded\"]\n\n def __call__(self, data, **metadata):\n try:\n data = data.decode(\"ascii\", \"strict\")\n except ValueError:\n return None\n d = url.decode(data)\n return \"URLEncoded form\", base.format_pairs(d)\n", "path": "mitmproxy/contentviews/urlencoded.py"}, {"content": "from typing import List # noqa\n\nfrom . import base\n\n\nclass ViewQuery(base.View):\n name = \"Query\"\n\n def __call__(self, data, **metadata):\n query = metadata.get(\"query\")\n if query:\n return \"Query\", base.format_pairs(query.items(multi=True))\n else:\n return \"Query\", base.format_text(\"\")\n", "path": "mitmproxy/contentviews/query.py"}]}
1,485
727
gh_patches_debug_18641
rasdani/github-patches
git_diff
litestar-org__litestar-1009
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: Structlog example no longer works Me again. Sorry 🙈 **Describe the bug** Running the structlog example [here](https://starlite-api.github.io/starlite/1.48/usage/0-the-starlite-app/?h=structlog#using-structlog) results in an internal server error as of v1.45 (I think) ``` {"status_code":500,"detail":"TypeError(\"encode_json() got an unexpected keyword argument 'default'\")"} ``` The default encoder was changed [here](https://github.com/starlite-api/starlite/pull/891/files#diff-6b2294023eb60948cd9f742e4930255a72254daf74f9e3157df8d479a685b123R213) Which doesn't accept the `default` argument given [here](https://github.com/hynek/structlog/blob/main/src/structlog/processors.py#L318) I'm not sure if it's a structlog problem or a starlite problem. Maybe the solution is to rename `enc_hook` to `default` then it mirrors the signature of `json.dumps`? I'm not sure, to be honest. **To Reproduce** Run the structlog example in the documentation: ```python from starlite import Starlite, StructLoggingConfig, Request, get @get("/") def my_router_handler(request: Request) -> None: request.logger.info("inside a request") return None logging_config = StructLoggingConfig() app = Starlite(route_handlers=[my_router_handler], logging_config=logging_config) ``` **Additional context** Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `starlite/utils/serialization.py` Content: ``` 1 from pathlib import PurePosixPath 2 from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union 3 4 import msgspec 5 from pydantic import ( 6 AnyUrl, 7 BaseModel, 8 ByteSize, 9 ConstrainedBytes, 10 ConstrainedDate, 11 ConstrainedDecimal, 12 ConstrainedFloat, 13 ConstrainedFrozenSet, 14 ConstrainedInt, 15 ConstrainedList, 16 ConstrainedSet, 17 ConstrainedStr, 18 EmailStr, 19 NameEmail, 20 PaymentCardNumber, 21 SecretField, 22 StrictBool, 23 ) 24 from pydantic.color import Color 25 26 if TYPE_CHECKING: 27 from starlite.types import TypeEncodersMap 28 29 DEFAULT_TYPE_ENCODERS: "TypeEncodersMap" = { 30 PurePosixPath: str, 31 # pydantic specific types 32 BaseModel: lambda m: m.dict(), 33 ByteSize: lambda b: b.real, 34 EmailStr: str, 35 NameEmail: str, 36 Color: str, 37 AnyUrl: str, 38 SecretField: str, 39 ConstrainedInt: int, 40 ConstrainedFloat: float, 41 ConstrainedStr: str, 42 ConstrainedBytes: lambda b: b.decode("utf-8"), 43 ConstrainedList: list, 44 ConstrainedSet: set, 45 ConstrainedFrozenSet: frozenset, 46 ConstrainedDecimal: float, 47 ConstrainedDate: lambda d: d.isoformat(), 48 PaymentCardNumber: str, 49 StrictBool: int, # pydantic compatibility 50 } 51 52 53 def default_serializer(value: Any, type_encoders: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any: 54 """Transform values non-natively supported by `msgspec` 55 56 Args: 57 value: A value to serialize# 58 type_encoders: Mapping of types to callables to transforming types 59 Returns: 60 A serialized value 61 Raises: 62 TypeError: if value is not supported 63 """ 64 if type_encoders is None: 65 type_encoders = DEFAULT_TYPE_ENCODERS 66 for base in value.__class__.__mro__[:-1]: 67 try: 68 encoder = type_encoders[base] 69 except KeyError: 70 continue 71 return encoder(value) 72 raise TypeError(f"Unsupported type: {type(value)!r}") 73 74 75 def dec_hook(type_: Any, value: Any) -> Any: # pragma: no cover 76 """Transform values non-natively supported by `msgspec` 77 78 Args: 79 type_: Encountered type 80 value: Value to coerce 81 82 Returns: 83 A `msgspec`-supported type 84 """ 85 if issubclass(type_, BaseModel): 86 return type_(**value) 87 raise TypeError(f"Unsupported type: {type(value)!r}") 88 89 90 _msgspec_json_encoder = msgspec.json.Encoder(enc_hook=default_serializer) 91 _msgspec_json_decoder = msgspec.json.Decoder(dec_hook=dec_hook) 92 _msgspec_msgpack_encoder = msgspec.msgpack.Encoder(enc_hook=default_serializer) 93 _msgspec_msgpack_decoder = msgspec.msgpack.Decoder(dec_hook=dec_hook) 94 95 96 def encode_json(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes: 97 """Encode a value into JSON. 98 99 Args: 100 obj: Value to encode 101 enc_hook: Optional callable to support non-natively supported types 102 103 Returns: 104 JSON as bytes 105 """ 106 if enc_hook is None or enc_hook is default_serializer: 107 return _msgspec_json_encoder.encode(obj) 108 return msgspec.json.encode(obj, enc_hook=enc_hook) 109 110 111 def decode_json(raw: Union[str, bytes]) -> Any: 112 """Decode a JSON string/bytes into an object. 113 114 Args: 115 raw: Value to decode 116 117 Returns: 118 An object 119 """ 120 return _msgspec_json_decoder.decode(raw) 121 122 123 def encode_msgpack(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes: 124 """Encode a value into MessagePack. 125 126 Args: 127 obj: Value to encode 128 enc_hook: Optional callable to support non-natively supported types 129 130 Returns: 131 MessagePack as bytes 132 """ 133 if enc_hook is None or enc_hook is default_serializer: 134 return _msgspec_msgpack_encoder.encode(obj) 135 return msgspec.msgpack.encode(obj, enc_hook=enc_hook) 136 137 138 def decode_msgpack(raw: bytes) -> Any: 139 """Decode a MessagePack string/bytes into an object. 140 141 Args: 142 raw: Value to decode 143 144 Returns: 145 An object 146 """ 147 return _msgspec_msgpack_decoder.decode(raw) 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/starlite/utils/serialization.py b/starlite/utils/serialization.py --- a/starlite/utils/serialization.py +++ b/starlite/utils/serialization.py @@ -93,19 +93,19 @@ _msgspec_msgpack_decoder = msgspec.msgpack.Decoder(dec_hook=dec_hook) -def encode_json(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes: +def encode_json(obj: Any, default: Optional[Callable[[Any], Any]] = default_serializer) -> bytes: """Encode a value into JSON. Args: obj: Value to encode - enc_hook: Optional callable to support non-natively supported types + default: Optional callable to support non-natively supported types. Returns: JSON as bytes """ - if enc_hook is None or enc_hook is default_serializer: + if default is None or default is default_serializer: return _msgspec_json_encoder.encode(obj) - return msgspec.json.encode(obj, enc_hook=enc_hook) + return msgspec.json.encode(obj, enc_hook=default) def decode_json(raw: Union[str, bytes]) -> Any:
{"golden_diff": "diff --git a/starlite/utils/serialization.py b/starlite/utils/serialization.py\n--- a/starlite/utils/serialization.py\n+++ b/starlite/utils/serialization.py\n@@ -93,19 +93,19 @@\n _msgspec_msgpack_decoder = msgspec.msgpack.Decoder(dec_hook=dec_hook)\n \n \n-def encode_json(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:\n+def encode_json(obj: Any, default: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:\n \"\"\"Encode a value into JSON.\n \n Args:\n obj: Value to encode\n- enc_hook: Optional callable to support non-natively supported types\n+ default: Optional callable to support non-natively supported types.\n \n Returns:\n JSON as bytes\n \"\"\"\n- if enc_hook is None or enc_hook is default_serializer:\n+ if default is None or default is default_serializer:\n return _msgspec_json_encoder.encode(obj)\n- return msgspec.json.encode(obj, enc_hook=enc_hook)\n+ return msgspec.json.encode(obj, enc_hook=default)\n \n \n def decode_json(raw: Union[str, bytes]) -> Any:\n", "issue": "Bug: Structlog example no longer works\nMe again. Sorry \ud83d\ude48 \r\n\r\n**Describe the bug**\r\nRunning the structlog example [here](https://starlite-api.github.io/starlite/1.48/usage/0-the-starlite-app/?h=structlog#using-structlog) results in an internal server error as of v1.45 (I think)\r\n\r\n```\r\n{\"status_code\":500,\"detail\":\"TypeError(\\\"encode_json() got an unexpected keyword argument 'default'\\\")\"}\r\n```\r\n\r\nThe default encoder was changed [here](https://github.com/starlite-api/starlite/pull/891/files#diff-6b2294023eb60948cd9f742e4930255a72254daf74f9e3157df8d479a685b123R213)\r\nWhich doesn't accept the `default` argument given [here](https://github.com/hynek/structlog/blob/main/src/structlog/processors.py#L318)\r\n\r\nI'm not sure if it's a structlog problem or a starlite problem.\r\n\r\nMaybe the solution is to rename `enc_hook` to `default` then it mirrors the signature of `json.dumps`? I'm not sure, to be honest.\r\n\r\n\r\n**To Reproduce**\r\nRun the structlog example in the documentation:\r\n```python\r\nfrom starlite import Starlite, StructLoggingConfig, Request, get\r\n\r\n\r\n@get(\"/\")\r\ndef my_router_handler(request: Request) -> None:\r\n request.logger.info(\"inside a request\")\r\n return None\r\n\r\n\r\nlogging_config = StructLoggingConfig()\r\n\r\napp = Starlite(route_handlers=[my_router_handler], logging_config=logging_config)\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from pathlib import PurePosixPath\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union\n\nimport msgspec\nfrom pydantic import (\n AnyUrl,\n BaseModel,\n ByteSize,\n ConstrainedBytes,\n ConstrainedDate,\n ConstrainedDecimal,\n ConstrainedFloat,\n ConstrainedFrozenSet,\n ConstrainedInt,\n ConstrainedList,\n ConstrainedSet,\n ConstrainedStr,\n EmailStr,\n NameEmail,\n PaymentCardNumber,\n SecretField,\n StrictBool,\n)\nfrom pydantic.color import Color\n\nif TYPE_CHECKING:\n from starlite.types import TypeEncodersMap\n\nDEFAULT_TYPE_ENCODERS: \"TypeEncodersMap\" = {\n PurePosixPath: str,\n # pydantic specific types\n BaseModel: lambda m: m.dict(),\n ByteSize: lambda b: b.real,\n EmailStr: str,\n NameEmail: str,\n Color: str,\n AnyUrl: str,\n SecretField: str,\n ConstrainedInt: int,\n ConstrainedFloat: float,\n ConstrainedStr: str,\n ConstrainedBytes: lambda b: b.decode(\"utf-8\"),\n ConstrainedList: list,\n ConstrainedSet: set,\n ConstrainedFrozenSet: frozenset,\n ConstrainedDecimal: float,\n ConstrainedDate: lambda d: d.isoformat(),\n PaymentCardNumber: str,\n StrictBool: int, # pydantic compatibility\n}\n\n\ndef default_serializer(value: Any, type_encoders: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any:\n \"\"\"Transform values non-natively supported by `msgspec`\n\n Args:\n value: A value to serialize#\n type_encoders: Mapping of types to callables to transforming types\n Returns:\n A serialized value\n Raises:\n TypeError: if value is not supported\n \"\"\"\n if type_encoders is None:\n type_encoders = DEFAULT_TYPE_ENCODERS\n for base in value.__class__.__mro__[:-1]:\n try:\n encoder = type_encoders[base]\n except KeyError:\n continue\n return encoder(value)\n raise TypeError(f\"Unsupported type: {type(value)!r}\")\n\n\ndef dec_hook(type_: Any, value: Any) -> Any: # pragma: no cover\n \"\"\"Transform values non-natively supported by `msgspec`\n\n Args:\n type_: Encountered type\n value: Value to coerce\n\n Returns:\n A `msgspec`-supported type\n \"\"\"\n if issubclass(type_, BaseModel):\n return type_(**value)\n raise TypeError(f\"Unsupported type: {type(value)!r}\")\n\n\n_msgspec_json_encoder = msgspec.json.Encoder(enc_hook=default_serializer)\n_msgspec_json_decoder = msgspec.json.Decoder(dec_hook=dec_hook)\n_msgspec_msgpack_encoder = msgspec.msgpack.Encoder(enc_hook=default_serializer)\n_msgspec_msgpack_decoder = msgspec.msgpack.Decoder(dec_hook=dec_hook)\n\n\ndef encode_json(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:\n \"\"\"Encode a value into JSON.\n\n Args:\n obj: Value to encode\n enc_hook: Optional callable to support non-natively supported types\n\n Returns:\n JSON as bytes\n \"\"\"\n if enc_hook is None or enc_hook is default_serializer:\n return _msgspec_json_encoder.encode(obj)\n return msgspec.json.encode(obj, enc_hook=enc_hook)\n\n\ndef decode_json(raw: Union[str, bytes]) -> Any:\n \"\"\"Decode a JSON string/bytes into an object.\n\n Args:\n raw: Value to decode\n\n Returns:\n An object\n \"\"\"\n return _msgspec_json_decoder.decode(raw)\n\n\ndef encode_msgpack(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:\n \"\"\"Encode a value into MessagePack.\n\n Args:\n obj: Value to encode\n enc_hook: Optional callable to support non-natively supported types\n\n Returns:\n MessagePack as bytes\n \"\"\"\n if enc_hook is None or enc_hook is default_serializer:\n return _msgspec_msgpack_encoder.encode(obj)\n return msgspec.msgpack.encode(obj, enc_hook=enc_hook)\n\n\ndef decode_msgpack(raw: bytes) -> Any:\n \"\"\"Decode a MessagePack string/bytes into an object.\n\n Args:\n raw: Value to decode\n\n Returns:\n An object\n \"\"\"\n return _msgspec_msgpack_decoder.decode(raw)\n", "path": "starlite/utils/serialization.py"}], "after_files": [{"content": "from pathlib import PurePosixPath\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union\n\nimport msgspec\nfrom pydantic import (\n AnyUrl,\n BaseModel,\n ByteSize,\n ConstrainedBytes,\n ConstrainedDate,\n ConstrainedDecimal,\n ConstrainedFloat,\n ConstrainedFrozenSet,\n ConstrainedInt,\n ConstrainedList,\n ConstrainedSet,\n ConstrainedStr,\n EmailStr,\n NameEmail,\n PaymentCardNumber,\n SecretField,\n StrictBool,\n)\nfrom pydantic.color import Color\n\nif TYPE_CHECKING:\n from starlite.types import TypeEncodersMap\n\nDEFAULT_TYPE_ENCODERS: \"TypeEncodersMap\" = {\n PurePosixPath: str,\n # pydantic specific types\n BaseModel: lambda m: m.dict(),\n ByteSize: lambda b: b.real,\n EmailStr: str,\n NameEmail: str,\n Color: str,\n AnyUrl: str,\n SecretField: str,\n ConstrainedInt: int,\n ConstrainedFloat: float,\n ConstrainedStr: str,\n ConstrainedBytes: lambda b: b.decode(\"utf-8\"),\n ConstrainedList: list,\n ConstrainedSet: set,\n ConstrainedFrozenSet: frozenset,\n ConstrainedDecimal: float,\n ConstrainedDate: lambda d: d.isoformat(),\n PaymentCardNumber: str,\n StrictBool: int, # pydantic compatibility\n}\n\n\ndef default_serializer(value: Any, type_encoders: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any:\n \"\"\"Transform values non-natively supported by `msgspec`\n\n Args:\n value: A value to serialize#\n type_encoders: Mapping of types to callables to transforming types\n Returns:\n A serialized value\n Raises:\n TypeError: if value is not supported\n \"\"\"\n if type_encoders is None:\n type_encoders = DEFAULT_TYPE_ENCODERS\n for base in value.__class__.__mro__[:-1]:\n try:\n encoder = type_encoders[base]\n except KeyError:\n continue\n return encoder(value)\n raise TypeError(f\"Unsupported type: {type(value)!r}\")\n\n\ndef dec_hook(type_: Any, value: Any) -> Any: # pragma: no cover\n \"\"\"Transform values non-natively supported by `msgspec`\n\n Args:\n type_: Encountered type\n value: Value to coerce\n\n Returns:\n A `msgspec`-supported type\n \"\"\"\n if issubclass(type_, BaseModel):\n return type_(**value)\n raise TypeError(f\"Unsupported type: {type(value)!r}\")\n\n\n_msgspec_json_encoder = msgspec.json.Encoder(enc_hook=default_serializer)\n_msgspec_json_decoder = msgspec.json.Decoder(dec_hook=dec_hook)\n_msgspec_msgpack_encoder = msgspec.msgpack.Encoder(enc_hook=default_serializer)\n_msgspec_msgpack_decoder = msgspec.msgpack.Decoder(dec_hook=dec_hook)\n\n\ndef encode_json(obj: Any, default: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:\n \"\"\"Encode a value into JSON.\n\n Args:\n obj: Value to encode\n default: Optional callable to support non-natively supported types.\n\n Returns:\n JSON as bytes\n \"\"\"\n if default is None or default is default_serializer:\n return _msgspec_json_encoder.encode(obj)\n return msgspec.json.encode(obj, enc_hook=default)\n\n\ndef decode_json(raw: Union[str, bytes]) -> Any:\n \"\"\"Decode a JSON string/bytes into an object.\n\n Args:\n raw: Value to decode\n\n Returns:\n An object\n \"\"\"\n return _msgspec_json_decoder.decode(raw)\n\n\ndef encode_msgpack(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:\n \"\"\"Encode a value into MessagePack.\n\n Args:\n obj: Value to encode\n enc_hook: Optional callable to support non-natively supported types\n\n Returns:\n MessagePack as bytes\n \"\"\"\n if enc_hook is None or enc_hook is default_serializer:\n return _msgspec_msgpack_encoder.encode(obj)\n return msgspec.msgpack.encode(obj, enc_hook=enc_hook)\n\n\ndef decode_msgpack(raw: bytes) -> Any:\n \"\"\"Decode a MessagePack string/bytes into an object.\n\n Args:\n raw: Value to decode\n\n Returns:\n An object\n \"\"\"\n return _msgspec_msgpack_decoder.decode(raw)\n", "path": "starlite/utils/serialization.py"}]}
1,977
257
gh_patches_debug_8798
rasdani/github-patches
git_diff
apache__airflow-16392
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Consider and add common sensitive names **Description** Since sensitive informations in the connection object (specifically the extras field) are now being masked based on sensitive key names, we should consider adding some common sensitive key names. `private_key` from [ssh connection](https://airflow.apache.org/docs/apache-airflow-providers-ssh/stable/connections/ssh.html) is an examples. **Use case / motivation** Extras field used to be blocked out entirely before the sensitive value masking feature (#15599). [Before in 2.0.2](https://github.com/apache/airflow/blob/2.0.2/airflow/hooks/base.py#L78 ) and [after in 2.1.0](https://github.com/apache/airflow/blob/2.1.0/airflow/hooks/base.py#L78 ). Extras field containing sensitive information now shown unless the key contains sensitive names. **Are you willing to submit a PR?** @ashb has expressed interest in adding this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `airflow/utils/log/secrets_masker.py` Content: ``` 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 """Mask sensitive information from logs""" 18 import collections 19 import io 20 import logging 21 import re 22 from typing import TYPE_CHECKING, Iterable, Optional, Set, TypeVar, Union 23 24 from airflow.compat.functools import cache, cached_property 25 26 if TYPE_CHECKING: 27 from airflow.typing_compat import RePatternType 28 29 RedactableItem = TypeVar('RedactableItem') 30 31 32 log = logging.getLogger(__name__) 33 34 35 DEFAULT_SENSITIVE_FIELDS = frozenset( 36 { 37 'password', 38 'secret', 39 'passwd', 40 'authorization', 41 'api_key', 42 'apikey', 43 'access_token', 44 } 45 ) 46 """Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive""" 47 48 49 @cache 50 def get_sensitive_variables_fields(): 51 """Get comma-separated sensitive Variable Fields from airflow.cfg.""" 52 from airflow.configuration import conf 53 54 sensitive_fields = DEFAULT_SENSITIVE_FIELDS.copy() 55 sensitive_variable_fields = conf.get('core', 'sensitive_var_conn_names') 56 if sensitive_variable_fields: 57 sensitive_fields |= frozenset({field.strip() for field in sensitive_variable_fields.split(',')}) 58 return sensitive_fields 59 60 61 def should_hide_value_for_key(name): 62 """Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden""" 63 from airflow import settings 64 65 if name and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS: 66 name = name.strip().lower() 67 return any(s in name for s in get_sensitive_variables_fields()) 68 return False 69 70 71 def mask_secret(secret: Union[str, dict, Iterable], name: str = None) -> None: 72 """ 73 Mask a secret from appearing in the task logs. 74 75 If ``name`` is provided, then it will only be masked if the name matches 76 one of the configured "sensitive" names. 77 78 If ``secret`` is a dict or a iterable (excluding str) then it will be 79 recursively walked and keys with sensitive names will be hidden. 80 """ 81 # Delay import 82 from airflow import settings 83 84 # Filtering all log messages is not a free process, so we only do it when 85 # running tasks 86 if not settings.MASK_SECRETS_IN_LOGS or not secret: 87 return 88 89 _secrets_masker().add_mask(secret, name) 90 91 92 def redact(value: "RedactableItem", name: str = None) -> "RedactableItem": 93 """Redact any secrets found in ``value``.""" 94 return _secrets_masker().redact(value, name) 95 96 97 @cache 98 def _secrets_masker() -> "SecretsMasker": 99 100 for flt in logging.getLogger('airflow.task').filters: 101 if isinstance(flt, SecretsMasker): 102 return flt 103 raise RuntimeError("No SecretsMasker found!") 104 105 106 class SecretsMasker(logging.Filter): 107 """Redact secrets from logs""" 108 109 replacer: Optional["RePatternType"] = None 110 patterns: Set[str] 111 112 ALREADY_FILTERED_FLAG = "__SecretsMasker_filtered" 113 114 def __init__(self): 115 super().__init__() 116 self.patterns = set() 117 118 @cached_property 119 def _record_attrs_to_ignore(self) -> Iterable[str]: 120 # Doing log.info(..., extra={'foo': 2}) sets extra properties on 121 # record, i.e. record.foo. And we need to filter those too. Fun 122 # 123 # Create a record, and look at what attributes are on it, and ignore 124 # all the default ones! 125 126 record = logging.getLogRecordFactory()( 127 # name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None, 128 "x", 129 logging.INFO, 130 __file__, 131 1, 132 "", 133 tuple(), 134 exc_info=None, 135 func="funcname", 136 ) 137 return frozenset(record.__dict__).difference({'msg', 'args'}) 138 139 def filter(self, record) -> bool: 140 if self.ALREADY_FILTERED_FLAG in record.__dict__: 141 # Filters are attached to multiple handlers and logs, keep a 142 # "private" flag that stops us needing to process it more than once 143 return True 144 145 if self.replacer: 146 for k, v in record.__dict__.items(): 147 if k in self._record_attrs_to_ignore: 148 continue 149 record.__dict__[k] = self.redact(v) 150 if record.exc_info and record.exc_info[1] is not None: 151 exc = record.exc_info[1] 152 # I'm not sure if this is a good idea! 153 exc.args = (self.redact(v) for v in exc.args) 154 record.__dict__[self.ALREADY_FILTERED_FLAG] = True 155 156 return True 157 158 def _redact_all(self, item: "RedactableItem") -> "RedactableItem": 159 if isinstance(item, dict): 160 return {dict_key: self._redact_all(subval) for dict_key, subval in item.items()} 161 elif isinstance(item, str): 162 return '***' 163 elif isinstance(item, (tuple, set)): 164 # Turn set in to tuple! 165 return tuple(self._redact_all(subval) for subval in item) 166 elif isinstance(item, Iterable): 167 return list(self._redact_all(subval) for subval in item) 168 else: 169 return item 170 171 # pylint: disable=too-many-return-statements 172 def redact(self, item: "RedactableItem", name: str = None) -> "RedactableItem": 173 """ 174 Redact an any secrets found in ``item``, if it is a string. 175 176 If ``name`` is given, and it's a "sensitive" name (see 177 :func:`should_hide_value_for_key`) then all string values in the item 178 is redacted. 179 180 """ 181 try: 182 if name and should_hide_value_for_key(name): 183 return self._redact_all(item) 184 185 if isinstance(item, dict): 186 return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()} 187 elif isinstance(item, str): 188 if self.replacer: 189 # We can't replace specific values, but the key-based redacting 190 # can still happen, so we can't short-circuit, we need to walk 191 # the structure. 192 return self.replacer.sub('***', item) 193 return item 194 elif isinstance(item, (tuple, set)): 195 # Turn set in to tuple! 196 return tuple(self.redact(subval) for subval in item) 197 elif isinstance(item, io.IOBase): 198 return item 199 elif isinstance(item, Iterable): 200 return list(self.redact(subval) for subval in item) 201 else: 202 return item 203 except Exception as e: # pylint: disable=broad-except 204 log.warning( 205 "Unable to redact %r, please report this via <https://github.com/apache/airflow/issues>. " 206 "Error was: %s: %s", 207 item, 208 type(e).__name__, 209 str(e), 210 ) 211 return item 212 213 # pylint: enable=too-many-return-statements 214 215 def add_mask(self, secret: Union[str, dict, Iterable], name: str = None): 216 """Add a new secret to be masked to this filter instance.""" 217 if isinstance(secret, dict): 218 for k, v in secret.items(): 219 self.add_mask(v, k) 220 elif isinstance(secret, str): 221 if not secret: 222 return 223 pattern = re.escape(secret) 224 if pattern not in self.patterns and (not name or should_hide_value_for_key(name)): 225 self.patterns.add(pattern) 226 self.replacer = re.compile('|'.join(self.patterns)) 227 elif isinstance(secret, collections.abc.Iterable): 228 for v in secret: 229 self.add_mask(v, name) 230 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/airflow/utils/log/secrets_masker.py b/airflow/utils/log/secrets_masker.py --- a/airflow/utils/log/secrets_masker.py +++ b/airflow/utils/log/secrets_masker.py @@ -34,13 +34,15 @@ DEFAULT_SENSITIVE_FIELDS = frozenset( { - 'password', - 'secret', - 'passwd', - 'authorization', + 'access_token', 'api_key', 'apikey', - 'access_token', + 'authorization', + 'passphrase', + 'passwd', + 'password', + 'private_key', + 'secret', } ) """Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive"""
{"golden_diff": "diff --git a/airflow/utils/log/secrets_masker.py b/airflow/utils/log/secrets_masker.py\n--- a/airflow/utils/log/secrets_masker.py\n+++ b/airflow/utils/log/secrets_masker.py\n@@ -34,13 +34,15 @@\n \n DEFAULT_SENSITIVE_FIELDS = frozenset(\n {\n- 'password',\n- 'secret',\n- 'passwd',\n- 'authorization',\n+ 'access_token',\n 'api_key',\n 'apikey',\n- 'access_token',\n+ 'authorization',\n+ 'passphrase',\n+ 'passwd',\n+ 'password',\n+ 'private_key',\n+ 'secret',\n }\n )\n \"\"\"Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive\"\"\"\n", "issue": "Consider and add common sensitive names\n**Description** \r\n\r\nSince sensitive informations in the connection object (specifically the extras field) are now being masked based on sensitive key names, we should consider adding some common sensitive key names.\r\n\r\n`private_key` from [ssh connection](https://airflow.apache.org/docs/apache-airflow-providers-ssh/stable/connections/ssh.html) is an examples.\r\n\r\n**Use case / motivation**\r\n\r\nExtras field used to be blocked out entirely before the sensitive value masking feature (#15599).\r\n\r\n[Before in 2.0.2](https://github.com/apache/airflow/blob/2.0.2/airflow/hooks/base.py#L78\r\n) and [after in 2.1.0](https://github.com/apache/airflow/blob/2.1.0/airflow/hooks/base.py#L78\r\n).\r\n\r\nExtras field containing sensitive information now shown unless the key contains sensitive names.\r\n\r\n**Are you willing to submit a PR?** \r\n\r\n@ashb has expressed interest in adding this.\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Mask sensitive information from logs\"\"\"\nimport collections\nimport io\nimport logging\nimport re\nfrom typing import TYPE_CHECKING, Iterable, Optional, Set, TypeVar, Union\n\nfrom airflow.compat.functools import cache, cached_property\n\nif TYPE_CHECKING:\n from airflow.typing_compat import RePatternType\n\n RedactableItem = TypeVar('RedactableItem')\n\n\nlog = logging.getLogger(__name__)\n\n\nDEFAULT_SENSITIVE_FIELDS = frozenset(\n {\n 'password',\n 'secret',\n 'passwd',\n 'authorization',\n 'api_key',\n 'apikey',\n 'access_token',\n }\n)\n\"\"\"Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive\"\"\"\n\n\n@cache\ndef get_sensitive_variables_fields():\n \"\"\"Get comma-separated sensitive Variable Fields from airflow.cfg.\"\"\"\n from airflow.configuration import conf\n\n sensitive_fields = DEFAULT_SENSITIVE_FIELDS.copy()\n sensitive_variable_fields = conf.get('core', 'sensitive_var_conn_names')\n if sensitive_variable_fields:\n sensitive_fields |= frozenset({field.strip() for field in sensitive_variable_fields.split(',')})\n return sensitive_fields\n\n\ndef should_hide_value_for_key(name):\n \"\"\"Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden\"\"\"\n from airflow import settings\n\n if name and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:\n name = name.strip().lower()\n return any(s in name for s in get_sensitive_variables_fields())\n return False\n\n\ndef mask_secret(secret: Union[str, dict, Iterable], name: str = None) -> None:\n \"\"\"\n Mask a secret from appearing in the task logs.\n\n If ``name`` is provided, then it will only be masked if the name matches\n one of the configured \"sensitive\" names.\n\n If ``secret`` is a dict or a iterable (excluding str) then it will be\n recursively walked and keys with sensitive names will be hidden.\n \"\"\"\n # Delay import\n from airflow import settings\n\n # Filtering all log messages is not a free process, so we only do it when\n # running tasks\n if not settings.MASK_SECRETS_IN_LOGS or not secret:\n return\n\n _secrets_masker().add_mask(secret, name)\n\n\ndef redact(value: \"RedactableItem\", name: str = None) -> \"RedactableItem\":\n \"\"\"Redact any secrets found in ``value``.\"\"\"\n return _secrets_masker().redact(value, name)\n\n\n@cache\ndef _secrets_masker() -> \"SecretsMasker\":\n\n for flt in logging.getLogger('airflow.task').filters:\n if isinstance(flt, SecretsMasker):\n return flt\n raise RuntimeError(\"No SecretsMasker found!\")\n\n\nclass SecretsMasker(logging.Filter):\n \"\"\"Redact secrets from logs\"\"\"\n\n replacer: Optional[\"RePatternType\"] = None\n patterns: Set[str]\n\n ALREADY_FILTERED_FLAG = \"__SecretsMasker_filtered\"\n\n def __init__(self):\n super().__init__()\n self.patterns = set()\n\n @cached_property\n def _record_attrs_to_ignore(self) -> Iterable[str]:\n # Doing log.info(..., extra={'foo': 2}) sets extra properties on\n # record, i.e. record.foo. And we need to filter those too. Fun\n #\n # Create a record, and look at what attributes are on it, and ignore\n # all the default ones!\n\n record = logging.getLogRecordFactory()(\n # name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None,\n \"x\",\n logging.INFO,\n __file__,\n 1,\n \"\",\n tuple(),\n exc_info=None,\n func=\"funcname\",\n )\n return frozenset(record.__dict__).difference({'msg', 'args'})\n\n def filter(self, record) -> bool:\n if self.ALREADY_FILTERED_FLAG in record.__dict__:\n # Filters are attached to multiple handlers and logs, keep a\n # \"private\" flag that stops us needing to process it more than once\n return True\n\n if self.replacer:\n for k, v in record.__dict__.items():\n if k in self._record_attrs_to_ignore:\n continue\n record.__dict__[k] = self.redact(v)\n if record.exc_info and record.exc_info[1] is not None:\n exc = record.exc_info[1]\n # I'm not sure if this is a good idea!\n exc.args = (self.redact(v) for v in exc.args)\n record.__dict__[self.ALREADY_FILTERED_FLAG] = True\n\n return True\n\n def _redact_all(self, item: \"RedactableItem\") -> \"RedactableItem\":\n if isinstance(item, dict):\n return {dict_key: self._redact_all(subval) for dict_key, subval in item.items()}\n elif isinstance(item, str):\n return '***'\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self._redact_all(subval) for subval in item)\n elif isinstance(item, Iterable):\n return list(self._redact_all(subval) for subval in item)\n else:\n return item\n\n # pylint: disable=too-many-return-statements\n def redact(self, item: \"RedactableItem\", name: str = None) -> \"RedactableItem\":\n \"\"\"\n Redact an any secrets found in ``item``, if it is a string.\n\n If ``name`` is given, and it's a \"sensitive\" name (see\n :func:`should_hide_value_for_key`) then all string values in the item\n is redacted.\n\n \"\"\"\n try:\n if name and should_hide_value_for_key(name):\n return self._redact_all(item)\n\n if isinstance(item, dict):\n return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}\n elif isinstance(item, str):\n if self.replacer:\n # We can't replace specific values, but the key-based redacting\n # can still happen, so we can't short-circuit, we need to walk\n # the structure.\n return self.replacer.sub('***', item)\n return item\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self.redact(subval) for subval in item)\n elif isinstance(item, io.IOBase):\n return item\n elif isinstance(item, Iterable):\n return list(self.redact(subval) for subval in item)\n else:\n return item\n except Exception as e: # pylint: disable=broad-except\n log.warning(\n \"Unable to redact %r, please report this via <https://github.com/apache/airflow/issues>. \"\n \"Error was: %s: %s\",\n item,\n type(e).__name__,\n str(e),\n )\n return item\n\n # pylint: enable=too-many-return-statements\n\n def add_mask(self, secret: Union[str, dict, Iterable], name: str = None):\n \"\"\"Add a new secret to be masked to this filter instance.\"\"\"\n if isinstance(secret, dict):\n for k, v in secret.items():\n self.add_mask(v, k)\n elif isinstance(secret, str):\n if not secret:\n return\n pattern = re.escape(secret)\n if pattern not in self.patterns and (not name or should_hide_value_for_key(name)):\n self.patterns.add(pattern)\n self.replacer = re.compile('|'.join(self.patterns))\n elif isinstance(secret, collections.abc.Iterable):\n for v in secret:\n self.add_mask(v, name)\n", "path": "airflow/utils/log/secrets_masker.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Mask sensitive information from logs\"\"\"\nimport collections\nimport io\nimport logging\nimport re\nfrom typing import TYPE_CHECKING, Iterable, Optional, Set, TypeVar, Union\n\nfrom airflow.compat.functools import cache, cached_property\n\nif TYPE_CHECKING:\n from airflow.typing_compat import RePatternType\n\n RedactableItem = TypeVar('RedactableItem')\n\n\nlog = logging.getLogger(__name__)\n\n\nDEFAULT_SENSITIVE_FIELDS = frozenset(\n {\n 'access_token',\n 'api_key',\n 'apikey',\n 'authorization',\n 'passphrase',\n 'passwd',\n 'password',\n 'private_key',\n 'secret',\n }\n)\n\"\"\"Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive\"\"\"\n\n\n@cache\ndef get_sensitive_variables_fields():\n \"\"\"Get comma-separated sensitive Variable Fields from airflow.cfg.\"\"\"\n from airflow.configuration import conf\n\n sensitive_fields = DEFAULT_SENSITIVE_FIELDS.copy()\n sensitive_variable_fields = conf.get('core', 'sensitive_var_conn_names')\n if sensitive_variable_fields:\n sensitive_fields |= frozenset({field.strip() for field in sensitive_variable_fields.split(',')})\n return sensitive_fields\n\n\ndef should_hide_value_for_key(name):\n \"\"\"Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden\"\"\"\n from airflow import settings\n\n if name and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:\n name = name.strip().lower()\n return any(s in name for s in get_sensitive_variables_fields())\n return False\n\n\ndef mask_secret(secret: Union[str, dict, Iterable], name: str = None) -> None:\n \"\"\"\n Mask a secret from appearing in the task logs.\n\n If ``name`` is provided, then it will only be masked if the name matches\n one of the configured \"sensitive\" names.\n\n If ``secret`` is a dict or a iterable (excluding str) then it will be\n recursively walked and keys with sensitive names will be hidden.\n \"\"\"\n # Delay import\n from airflow import settings\n\n # Filtering all log messages is not a free process, so we only do it when\n # running tasks\n if not settings.MASK_SECRETS_IN_LOGS or not secret:\n return\n\n _secrets_masker().add_mask(secret, name)\n\n\ndef redact(value: \"RedactableItem\", name: str = None) -> \"RedactableItem\":\n \"\"\"Redact any secrets found in ``value``.\"\"\"\n return _secrets_masker().redact(value, name)\n\n\n@cache\ndef _secrets_masker() -> \"SecretsMasker\":\n\n for flt in logging.getLogger('airflow.task').filters:\n if isinstance(flt, SecretsMasker):\n return flt\n raise RuntimeError(\"No SecretsMasker found!\")\n\n\nclass SecretsMasker(logging.Filter):\n \"\"\"Redact secrets from logs\"\"\"\n\n replacer: Optional[\"RePatternType\"] = None\n patterns: Set[str]\n\n ALREADY_FILTERED_FLAG = \"__SecretsMasker_filtered\"\n\n def __init__(self):\n super().__init__()\n self.patterns = set()\n\n @cached_property\n def _record_attrs_to_ignore(self) -> Iterable[str]:\n # Doing log.info(..., extra={'foo': 2}) sets extra properties on\n # record, i.e. record.foo. And we need to filter those too. Fun\n #\n # Create a record, and look at what attributes are on it, and ignore\n # all the default ones!\n\n record = logging.getLogRecordFactory()(\n # name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None,\n \"x\",\n logging.INFO,\n __file__,\n 1,\n \"\",\n tuple(),\n exc_info=None,\n func=\"funcname\",\n )\n return frozenset(record.__dict__).difference({'msg', 'args'})\n\n def filter(self, record) -> bool:\n if self.ALREADY_FILTERED_FLAG in record.__dict__:\n # Filters are attached to multiple handlers and logs, keep a\n # \"private\" flag that stops us needing to process it more than once\n return True\n\n if self.replacer:\n for k, v in record.__dict__.items():\n if k in self._record_attrs_to_ignore:\n continue\n record.__dict__[k] = self.redact(v)\n if record.exc_info and record.exc_info[1] is not None:\n exc = record.exc_info[1]\n # I'm not sure if this is a good idea!\n exc.args = (self.redact(v) for v in exc.args)\n record.__dict__[self.ALREADY_FILTERED_FLAG] = True\n\n return True\n\n def _redact_all(self, item: \"RedactableItem\") -> \"RedactableItem\":\n if isinstance(item, dict):\n return {dict_key: self._redact_all(subval) for dict_key, subval in item.items()}\n elif isinstance(item, str):\n return '***'\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self._redact_all(subval) for subval in item)\n elif isinstance(item, Iterable):\n return list(self._redact_all(subval) for subval in item)\n else:\n return item\n\n # pylint: disable=too-many-return-statements\n def redact(self, item: \"RedactableItem\", name: str = None) -> \"RedactableItem\":\n \"\"\"\n Redact an any secrets found in ``item``, if it is a string.\n\n If ``name`` is given, and it's a \"sensitive\" name (see\n :func:`should_hide_value_for_key`) then all string values in the item\n is redacted.\n\n \"\"\"\n try:\n if name and should_hide_value_for_key(name):\n return self._redact_all(item)\n\n if isinstance(item, dict):\n return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}\n elif isinstance(item, str):\n if self.replacer:\n # We can't replace specific values, but the key-based redacting\n # can still happen, so we can't short-circuit, we need to walk\n # the structure.\n return self.replacer.sub('***', item)\n return item\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self.redact(subval) for subval in item)\n elif isinstance(item, io.IOBase):\n return item\n elif isinstance(item, Iterable):\n return list(self.redact(subval) for subval in item)\n else:\n return item\n except Exception as e: # pylint: disable=broad-except\n log.warning(\n \"Unable to redact %r, please report this via <https://github.com/apache/airflow/issues>. \"\n \"Error was: %s: %s\",\n item,\n type(e).__name__,\n str(e),\n )\n return item\n\n # pylint: enable=too-many-return-statements\n\n def add_mask(self, secret: Union[str, dict, Iterable], name: str = None):\n \"\"\"Add a new secret to be masked to this filter instance.\"\"\"\n if isinstance(secret, dict):\n for k, v in secret.items():\n self.add_mask(v, k)\n elif isinstance(secret, str):\n if not secret:\n return\n pattern = re.escape(secret)\n if pattern not in self.patterns and (not name or should_hide_value_for_key(name)):\n self.patterns.add(pattern)\n self.replacer = re.compile('|'.join(self.patterns))\n elif isinstance(secret, collections.abc.Iterable):\n for v in secret:\n self.add_mask(v, name)\n", "path": "airflow/utils/log/secrets_masker.py"}]}
2,934
172
gh_patches_debug_427
rasdani/github-patches
git_diff
python__python-docs-es-1787
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Translate 'using/unix.po' This needs to reach 100% translated. The rendered version of this file will be available at https://docs.python.org/es/3.10/using/unix.html once translated. Meanwhile, the English version is shown. Current stats for `using/unix.po`: * Fuzzy: 1 * Percent translated: 88.9% * Entries: 40 / 45 * Untranslated: 5 Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it. Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/translate.py` Content: ``` 1 import os 2 import re 3 import sys 4 from typing import Dict, Tuple 5 6 import polib 7 8 VERBOSE = False 9 DEBUG = False 10 SKIP_TRANSLATED_ENTRIES = True 11 12 try: 13 from deep_translator import GoogleTranslator 14 except ImportError: 15 print("Error: This util script needs `deep_translator` to be installed") 16 sys.exit(1) 17 18 _patterns = [ 19 ":c:func:`[^`]+`", 20 ":c:type:`[^`]+`", 21 ":c:macro:`[^`]+`", 22 ":c:member:`[^`]+`", 23 ":c:data:`[^`]+`", 24 ":py:data:`[^`]+`", 25 ":py:mod:`[^`]+`", 26 ":func:`[^`]+`", 27 ":mod:`[^`]+`", 28 ":ref:`[^`]+`", 29 ":class:`[^`]+`", 30 ":pep:`[^`]+`", 31 ":data:`[^`]+`", 32 ":exc:`[^`]+`", 33 ":term:`[^`]+`", 34 ":meth:`[^`]+`", 35 ":envvar:`[^`]+`", 36 ":file:`[^`]+`", 37 ":attr:`[^`]+`", 38 ":const:`[^`]+`", 39 ":issue:`[^`]+`", 40 ":opcode:`[^`]+`", 41 ":option:`[^`]+`", 42 ":program:`[^`]+`", 43 ":keyword:`[^`]+`", 44 ":RFC:`[^`]+`", 45 ":rfc:`[^`]+`", 46 ":doc:`[^`]+`", 47 ":manpage:`[^`]+`", 48 ":sup:`[^`]+`", 49 "``[^`]+``", 50 "`[^`]+`__", 51 "`[^`]+`_", 52 "\*\*[^\*]+\*\*", # bold text between ** 53 "\*[^\*]+\*", # italic text between * 54 ] 55 56 _exps = [re.compile(e) for e in _patterns] 57 58 def protect_sphinx_directives(s: str) -> Tuple[dict, str]: 59 """ 60 Parameters: 61 string containing the text to translate 62 63 Returns: 64 dictionary containing all the placeholder text as keys 65 and the correct value. 66 """ 67 68 i = 0 69 d: Dict[str, str] = {} 70 for exp in _exps: 71 matches = exp.findall(s) 72 if DEBUG: 73 print(exp, matches) 74 for match in matches: 75 ph = f"XASDF{str(i).zfill(2)}" 76 s = s.replace(match, ph) 77 if ph in d and VERBOSE: 78 print(f"Error: {ph} is already in the dictionary") 79 print("new", match) 80 print("old", d[ph]) 81 d[ph] = match 82 i += 1 83 return d, s 84 85 86 def undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str: 87 for ph, value in placeholders.items(): 88 translated_text = translated_text.replace(ph, value) 89 if DEBUG: 90 print(ph, value) 91 print(translated_text) 92 return translated_text 93 94 95 if __name__ == "__main__": 96 filename = sys.argv[1] 97 if not os.path.isfile(filename): 98 print(f"File not found: '{filename}'") 99 sys.exit(-1) 100 101 po = polib.pofile(filename) 102 translator = GoogleTranslator(source="en", target="es") 103 104 for entry in po: 105 # If the entry has already a translation, skip. 106 if SKIP_TRANSLATED_ENTRIES and entry.msgstr: 107 continue 108 109 print("\nEN|", entry.msgid) 110 placeholders, temp_text = protect_sphinx_directives(entry.msgid) 111 if VERBOSE: 112 print(temp_text) 113 print(placeholders) 114 115 # Translate the temporary text without sphinx statements 116 translated_text = translator.translate(temp_text) 117 118 # Recover sphinx statements 119 real_text = undo_sphinx_directives_protection(placeholders, translated_text) 120 print("ES|", real_text) 121 122 # Replace the po file translated entry 123 entry.msgstr = real_text 124 125 # Save the file after all the entries are translated 126 po.save() 127 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/translate.py b/scripts/translate.py --- a/scripts/translate.py +++ b/scripts/translate.py @@ -44,6 +44,7 @@ ":RFC:`[^`]+`", ":rfc:`[^`]+`", ":doc:`[^`]+`", + ":source:`[^`]+`", ":manpage:`[^`]+`", ":sup:`[^`]+`", "``[^`]+``",
{"golden_diff": "diff --git a/scripts/translate.py b/scripts/translate.py\n--- a/scripts/translate.py\n+++ b/scripts/translate.py\n@@ -44,6 +44,7 @@\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n+ \":source:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n", "issue": "Translate 'using/unix.po'\nThis needs to reach 100% translated.\n\nThe rendered version of this file will be available at https://docs.python.org/es/3.10/using/unix.html once translated.\nMeanwhile, the English version is shown.\n\nCurrent stats for `using/unix.po`:\n\n* Fuzzy: 1\n* Percent translated: 88.9%\n* Entries: 40 / 45\n* Untranslated: 5\n\nPlease, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.\n\nRemember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).\n", "before_files": [{"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n", "path": "scripts/translate.py"}], "after_files": [{"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \":source:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n", "path": "scripts/translate.py"}]}
1,612
105
gh_patches_debug_32832
rasdani/github-patches
git_diff
cisagov__manage.get.gov-426
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Don't display redundant form field labels - Don't display redundant field labels - Retain field labels for screenreaders --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/registrar/templatetags/field_helpers.py` Content: ``` 1 """Custom field helpers for our inputs.""" 2 import re 3 4 from django import template 5 6 register = template.Library() 7 8 9 @register.inclusion_tag("includes/input_with_errors.html", takes_context=True) 10 def input_with_errors(context, field=None): # noqa: C901 11 """Make an input field along with error handling. 12 13 Args: 14 field: The field instance. 15 16 In addition to the explicit `field` argument, this inclusion_tag takes the 17 following "widget-tweak-esque" parameters from the surrounding context. 18 19 Context args: 20 add_class: append to input element's `class` attribute 21 add_error_class: like `add_class` but only if field.errors is not empty 22 add_required_class: like `add_class` but only if field is required 23 add_label_class: append to input element's label's `class` attribute 24 add_group_class: append to input element's surrounding tag's `class` attribute 25 attr_* - adds or replaces any single html attribute for the input 26 add_error_attr_* - like `attr_*` but only if field.errors is not empty 27 28 Example usage: 29 ``` 30 {% for form in forms.0 %} 31 {% with add_class="usa-input--medium" %} 32 {% with attr_required=True attr_disabled=False %} 33 {% input_with_errors form.street_address1 %} 34 {% endwith %} 35 {% endwith %} 36 {% endfor } 37 38 There are a few edge cases to keep in mind: 39 - a "maxlength" attribute will cause the input to use USWDS Character counter 40 - the field's `use_fieldset` controls whether the output is label/field or 41 fieldset/legend/field 42 - checkbox label styling is different (this is handled, don't worry about it) 43 """ 44 context = context.flatten() 45 context["field"] = field 46 47 # get any attributes specified in the field's definition 48 attrs = dict(field.field.widget.attrs) 49 50 # these will be converted to CSS strings 51 classes = [] 52 label_classes = [] 53 group_classes = [] 54 55 # this will be converted to an attribute string 56 described_by = [] 57 58 if "class" in attrs: 59 classes.append(attrs.pop("class")) 60 61 # parse context for field attributes and classes 62 # --- 63 # here we loop through all items in the context dictionary 64 # (this is the context which was being used to render the 65 # outer template in which this {% input_with_errors %} appeared!) 66 # and look for "magic" keys -- these are used to modify the 67 # appearance and behavior of the final HTML 68 for key, value in context.items(): 69 if key.startswith("attr_"): 70 attr_name = re.sub("_", "-", key[5:]) 71 attrs[attr_name] = value 72 elif key.startswith("add_error_attr_") and field.errors: 73 attr_name = re.sub("_", "-", key[15:]) 74 attrs[attr_name] = value 75 76 elif key == "add_class": 77 classes.append(value) 78 elif key == "add_required_class" and field.required: 79 classes.append(value) 80 elif key == "add_error_class" and field.errors: 81 classes.append(value) 82 83 elif key == "add_label_class": 84 label_classes.append(value) 85 86 elif key == "add_group_class": 87 group_classes.append(value) 88 89 attrs["id"] = field.auto_id 90 91 # do some work for various edge cases 92 93 if "maxlength" in attrs: 94 # associate the field programmatically with its hint text 95 described_by.append(f"{attrs['id']}__message") 96 97 if field.use_fieldset: 98 context["label_tag"] = "legend" 99 else: 100 context["label_tag"] = "label" 101 102 if field.use_fieldset: 103 label_classes.append("usa-legend") 104 105 if field.widget_type == "checkbox": 106 label_classes.append("usa-checkbox__label") 107 elif not field.use_fieldset: 108 label_classes.append("usa-label") 109 110 if field.errors: 111 # associate the field programmatically with its error message 112 message_div_id = f"{attrs['id']}__error-message" 113 described_by.append(message_div_id) 114 115 # set the field invalid 116 # due to weirdness, this must be a string, not a boolean 117 attrs["aria-invalid"] = "true" 118 119 # style the invalid field 120 classes.append("usa-input--error") 121 label_classes.append("usa-label--error") 122 group_classes.append("usa-form-group--error") 123 124 # convert lists into strings 125 126 if classes: 127 context["classes"] = " ".join(classes) 128 129 if label_classes: 130 context["label_classes"] = " ".join(label_classes) 131 132 if group_classes: 133 context["group_classes"] = " ".join(group_classes) 134 135 if described_by: 136 # ensure we don't overwrite existing attribute value 137 if "aria-describedby" in attrs: 138 described_by.append(attrs["aria-describedby"]) 139 attrs["aria-describedby"] = " ".join(described_by) 140 141 # ask Django to give us the widget dict 142 # see Widget.get_context() on 143 # https://docs.djangoproject.com/en/4.1/ref/forms/widgets 144 widget = field.field.widget.get_context( 145 field.html_name, field.value() or field.initial, field.build_widget_attrs(attrs) 146 ) # -> {"widget": {"name": ...}} 147 148 context["widget"] = widget["widget"] 149 150 return context 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/registrar/templatetags/field_helpers.py b/src/registrar/templatetags/field_helpers.py --- a/src/registrar/templatetags/field_helpers.py +++ b/src/registrar/templatetags/field_helpers.py @@ -21,6 +21,7 @@ add_error_class: like `add_class` but only if field.errors is not empty add_required_class: like `add_class` but only if field is required add_label_class: append to input element's label's `class` attribute + add_legend_class: append to input element's legend's `class` attribute add_group_class: append to input element's surrounding tag's `class` attribute attr_* - adds or replaces any single html attribute for the input add_error_attr_* - like `attr_*` but only if field.errors is not empty @@ -50,6 +51,7 @@ # these will be converted to CSS strings classes = [] label_classes = [] + legend_classes = [] group_classes = [] # this will be converted to an attribute string @@ -82,6 +84,8 @@ elif key == "add_label_class": label_classes.append(value) + elif key == "add_legend_class": + legend_classes.append(value) elif key == "add_group_class": group_classes.append(value) @@ -129,6 +133,9 @@ if label_classes: context["label_classes"] = " ".join(label_classes) + if legend_classes: + context["legend_classes"] = " ".join(legend_classes) + if group_classes: context["group_classes"] = " ".join(group_classes)
{"golden_diff": "diff --git a/src/registrar/templatetags/field_helpers.py b/src/registrar/templatetags/field_helpers.py\n--- a/src/registrar/templatetags/field_helpers.py\n+++ b/src/registrar/templatetags/field_helpers.py\n@@ -21,6 +21,7 @@\n add_error_class: like `add_class` but only if field.errors is not empty\n add_required_class: like `add_class` but only if field is required\n add_label_class: append to input element's label's `class` attribute\n+ add_legend_class: append to input element's legend's `class` attribute\n add_group_class: append to input element's surrounding tag's `class` attribute\n attr_* - adds or replaces any single html attribute for the input\n add_error_attr_* - like `attr_*` but only if field.errors is not empty\n@@ -50,6 +51,7 @@\n # these will be converted to CSS strings\n classes = []\n label_classes = []\n+ legend_classes = []\n group_classes = []\n \n # this will be converted to an attribute string\n@@ -82,6 +84,8 @@\n \n elif key == \"add_label_class\":\n label_classes.append(value)\n+ elif key == \"add_legend_class\":\n+ legend_classes.append(value)\n \n elif key == \"add_group_class\":\n group_classes.append(value)\n@@ -129,6 +133,9 @@\n if label_classes:\n context[\"label_classes\"] = \" \".join(label_classes)\n \n+ if legend_classes:\n+ context[\"legend_classes\"] = \" \".join(legend_classes)\n+\n if group_classes:\n context[\"group_classes\"] = \" \".join(group_classes)\n", "issue": "Don't display redundant form field labels\n- Don't display redundant field labels \n- Retain field labels for screenreaders\n", "before_files": [{"content": "\"\"\"Custom field helpers for our inputs.\"\"\"\nimport re\n\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/input_with_errors.html\", takes_context=True)\ndef input_with_errors(context, field=None): # noqa: C901\n \"\"\"Make an input field along with error handling.\n\n Args:\n field: The field instance.\n\n In addition to the explicit `field` argument, this inclusion_tag takes the\n following \"widget-tweak-esque\" parameters from the surrounding context.\n\n Context args:\n add_class: append to input element's `class` attribute\n add_error_class: like `add_class` but only if field.errors is not empty\n add_required_class: like `add_class` but only if field is required\n add_label_class: append to input element's label's `class` attribute\n add_group_class: append to input element's surrounding tag's `class` attribute\n attr_* - adds or replaces any single html attribute for the input\n add_error_attr_* - like `attr_*` but only if field.errors is not empty\n\n Example usage:\n ```\n {% for form in forms.0 %}\n {% with add_class=\"usa-input--medium\" %}\n {% with attr_required=True attr_disabled=False %}\n {% input_with_errors form.street_address1 %}\n {% endwith %}\n {% endwith %}\n {% endfor }\n\n There are a few edge cases to keep in mind:\n - a \"maxlength\" attribute will cause the input to use USWDS Character counter\n - the field's `use_fieldset` controls whether the output is label/field or\n fieldset/legend/field\n - checkbox label styling is different (this is handled, don't worry about it)\n \"\"\"\n context = context.flatten()\n context[\"field\"] = field\n\n # get any attributes specified in the field's definition\n attrs = dict(field.field.widget.attrs)\n\n # these will be converted to CSS strings\n classes = []\n label_classes = []\n group_classes = []\n\n # this will be converted to an attribute string\n described_by = []\n\n if \"class\" in attrs:\n classes.append(attrs.pop(\"class\"))\n\n # parse context for field attributes and classes\n # ---\n # here we loop through all items in the context dictionary\n # (this is the context which was being used to render the\n # outer template in which this {% input_with_errors %} appeared!)\n # and look for \"magic\" keys -- these are used to modify the\n # appearance and behavior of the final HTML\n for key, value in context.items():\n if key.startswith(\"attr_\"):\n attr_name = re.sub(\"_\", \"-\", key[5:])\n attrs[attr_name] = value\n elif key.startswith(\"add_error_attr_\") and field.errors:\n attr_name = re.sub(\"_\", \"-\", key[15:])\n attrs[attr_name] = value\n\n elif key == \"add_class\":\n classes.append(value)\n elif key == \"add_required_class\" and field.required:\n classes.append(value)\n elif key == \"add_error_class\" and field.errors:\n classes.append(value)\n\n elif key == \"add_label_class\":\n label_classes.append(value)\n\n elif key == \"add_group_class\":\n group_classes.append(value)\n\n attrs[\"id\"] = field.auto_id\n\n # do some work for various edge cases\n\n if \"maxlength\" in attrs:\n # associate the field programmatically with its hint text\n described_by.append(f\"{attrs['id']}__message\")\n\n if field.use_fieldset:\n context[\"label_tag\"] = \"legend\"\n else:\n context[\"label_tag\"] = \"label\"\n\n if field.use_fieldset:\n label_classes.append(\"usa-legend\")\n\n if field.widget_type == \"checkbox\":\n label_classes.append(\"usa-checkbox__label\")\n elif not field.use_fieldset:\n label_classes.append(\"usa-label\")\n\n if field.errors:\n # associate the field programmatically with its error message\n message_div_id = f\"{attrs['id']}__error-message\"\n described_by.append(message_div_id)\n\n # set the field invalid\n # due to weirdness, this must be a string, not a boolean\n attrs[\"aria-invalid\"] = \"true\"\n\n # style the invalid field\n classes.append(\"usa-input--error\")\n label_classes.append(\"usa-label--error\")\n group_classes.append(\"usa-form-group--error\")\n\n # convert lists into strings\n\n if classes:\n context[\"classes\"] = \" \".join(classes)\n\n if label_classes:\n context[\"label_classes\"] = \" \".join(label_classes)\n\n if group_classes:\n context[\"group_classes\"] = \" \".join(group_classes)\n\n if described_by:\n # ensure we don't overwrite existing attribute value\n if \"aria-describedby\" in attrs:\n described_by.append(attrs[\"aria-describedby\"])\n attrs[\"aria-describedby\"] = \" \".join(described_by)\n\n # ask Django to give us the widget dict\n # see Widget.get_context() on\n # https://docs.djangoproject.com/en/4.1/ref/forms/widgets\n widget = field.field.widget.get_context(\n field.html_name, field.value() or field.initial, field.build_widget_attrs(attrs)\n ) # -> {\"widget\": {\"name\": ...}}\n\n context[\"widget\"] = widget[\"widget\"]\n\n return context\n", "path": "src/registrar/templatetags/field_helpers.py"}], "after_files": [{"content": "\"\"\"Custom field helpers for our inputs.\"\"\"\nimport re\n\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/input_with_errors.html\", takes_context=True)\ndef input_with_errors(context, field=None): # noqa: C901\n \"\"\"Make an input field along with error handling.\n\n Args:\n field: The field instance.\n\n In addition to the explicit `field` argument, this inclusion_tag takes the\n following \"widget-tweak-esque\" parameters from the surrounding context.\n\n Context args:\n add_class: append to input element's `class` attribute\n add_error_class: like `add_class` but only if field.errors is not empty\n add_required_class: like `add_class` but only if field is required\n add_label_class: append to input element's label's `class` attribute\n add_legend_class: append to input element's legend's `class` attribute\n add_group_class: append to input element's surrounding tag's `class` attribute\n attr_* - adds or replaces any single html attribute for the input\n add_error_attr_* - like `attr_*` but only if field.errors is not empty\n\n Example usage:\n ```\n {% for form in forms.0 %}\n {% with add_class=\"usa-input--medium\" %}\n {% with attr_required=True attr_disabled=False %}\n {% input_with_errors form.street_address1 %}\n {% endwith %}\n {% endwith %}\n {% endfor }\n\n There are a few edge cases to keep in mind:\n - a \"maxlength\" attribute will cause the input to use USWDS Character counter\n - the field's `use_fieldset` controls whether the output is label/field or\n fieldset/legend/field\n - checkbox label styling is different (this is handled, don't worry about it)\n \"\"\"\n context = context.flatten()\n context[\"field\"] = field\n\n # get any attributes specified in the field's definition\n attrs = dict(field.field.widget.attrs)\n\n # these will be converted to CSS strings\n classes = []\n label_classes = []\n legend_classes = []\n group_classes = []\n\n # this will be converted to an attribute string\n described_by = []\n\n if \"class\" in attrs:\n classes.append(attrs.pop(\"class\"))\n\n # parse context for field attributes and classes\n # ---\n # here we loop through all items in the context dictionary\n # (this is the context which was being used to render the\n # outer template in which this {% input_with_errors %} appeared!)\n # and look for \"magic\" keys -- these are used to modify the\n # appearance and behavior of the final HTML\n for key, value in context.items():\n if key.startswith(\"attr_\"):\n attr_name = re.sub(\"_\", \"-\", key[5:])\n attrs[attr_name] = value\n elif key.startswith(\"add_error_attr_\") and field.errors:\n attr_name = re.sub(\"_\", \"-\", key[15:])\n attrs[attr_name] = value\n\n elif key == \"add_class\":\n classes.append(value)\n elif key == \"add_required_class\" and field.required:\n classes.append(value)\n elif key == \"add_error_class\" and field.errors:\n classes.append(value)\n\n elif key == \"add_label_class\":\n label_classes.append(value)\n elif key == \"add_legend_class\":\n legend_classes.append(value)\n\n elif key == \"add_group_class\":\n group_classes.append(value)\n\n attrs[\"id\"] = field.auto_id\n\n # do some work for various edge cases\n\n if \"maxlength\" in attrs:\n # associate the field programmatically with its hint text\n described_by.append(f\"{attrs['id']}__message\")\n\n if field.use_fieldset:\n context[\"label_tag\"] = \"legend\"\n else:\n context[\"label_tag\"] = \"label\"\n\n if field.use_fieldset:\n label_classes.append(\"usa-legend\")\n\n if field.widget_type == \"checkbox\":\n label_classes.append(\"usa-checkbox__label\")\n elif not field.use_fieldset:\n label_classes.append(\"usa-label\")\n\n if field.errors:\n # associate the field programmatically with its error message\n message_div_id = f\"{attrs['id']}__error-message\"\n described_by.append(message_div_id)\n\n # set the field invalid\n # due to weirdness, this must be a string, not a boolean\n attrs[\"aria-invalid\"] = \"true\"\n\n # style the invalid field\n classes.append(\"usa-input--error\")\n label_classes.append(\"usa-label--error\")\n group_classes.append(\"usa-form-group--error\")\n\n # convert lists into strings\n\n if classes:\n context[\"classes\"] = \" \".join(classes)\n\n if label_classes:\n context[\"label_classes\"] = \" \".join(label_classes)\n\n if legend_classes:\n context[\"legend_classes\"] = \" \".join(legend_classes)\n\n if group_classes:\n context[\"group_classes\"] = \" \".join(group_classes)\n\n if described_by:\n # ensure we don't overwrite existing attribute value\n if \"aria-describedby\" in attrs:\n described_by.append(attrs[\"aria-describedby\"])\n attrs[\"aria-describedby\"] = \" \".join(described_by)\n\n # ask Django to give us the widget dict\n # see Widget.get_context() on\n # https://docs.djangoproject.com/en/4.1/ref/forms/widgets\n widget = field.field.widget.get_context(\n field.html_name, field.value() or field.initial, field.build_widget_attrs(attrs)\n ) # -> {\"widget\": {\"name\": ...}}\n\n context[\"widget\"] = widget[\"widget\"]\n\n return context\n", "path": "src/registrar/templatetags/field_helpers.py"}]}
1,812
384
gh_patches_debug_56800
rasdani/github-patches
git_diff
wright-group__WrightTools-522
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- hide fit functionality --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `WrightTools/__init__.py` Content: ``` 1 """WrightTools init.""" 2 # flake8: noqa 3 4 5 # --- import -------------------------------------------------------------------------------------- 6 7 8 import sys as _sys 9 10 from .__version__ import * 11 from . import artists 12 from . import collection 13 from . import data 14 from . import diagrams 15 from . import fit 16 from . import kit 17 from . import units 18 from . import exceptions 19 20 from ._open import * 21 from .collection._collection import * 22 from .data._data import * 23 24 25 # --- rcparams ------------------------------------------------------------------------------------ 26 27 28 if int(_sys.version.split('.')[0]) > 2: 29 artists.apply_rcparams('fast') 30 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/WrightTools/__init__.py b/WrightTools/__init__.py --- a/WrightTools/__init__.py +++ b/WrightTools/__init__.py @@ -12,7 +12,6 @@ from . import collection from . import data from . import diagrams -from . import fit from . import kit from . import units from . import exceptions
{"golden_diff": "diff --git a/WrightTools/__init__.py b/WrightTools/__init__.py\n--- a/WrightTools/__init__.py\n+++ b/WrightTools/__init__.py\n@@ -12,7 +12,6 @@\n from . import collection\n from . import data\n from . import diagrams\n-from . import fit\n from . import kit\n from . import units\n from . import exceptions\n", "issue": "hide fit functionality\n\n", "before_files": [{"content": "\"\"\"WrightTools init.\"\"\"\n# flake8: noqa\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport sys as _sys\n\nfrom .__version__ import *\nfrom . import artists\nfrom . import collection\nfrom . import data\nfrom . import diagrams\nfrom . import fit\nfrom . import kit\nfrom . import units\nfrom . import exceptions\n\nfrom ._open import *\nfrom .collection._collection import *\nfrom .data._data import *\n\n\n# --- rcparams ------------------------------------------------------------------------------------\n\n\nif int(_sys.version.split('.')[0]) > 2:\n artists.apply_rcparams('fast')\n", "path": "WrightTools/__init__.py"}], "after_files": [{"content": "\"\"\"WrightTools init.\"\"\"\n# flake8: noqa\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport sys as _sys\n\nfrom .__version__ import *\nfrom . import artists\nfrom . import collection\nfrom . import data\nfrom . import diagrams\nfrom . import kit\nfrom . import units\nfrom . import exceptions\n\nfrom ._open import *\nfrom .collection._collection import *\nfrom .data._data import *\n\n\n# --- rcparams ------------------------------------------------------------------------------------\n\n\nif int(_sys.version.split('.')[0]) > 2:\n artists.apply_rcparams('fast')\n", "path": "WrightTools/__init__.py"}]}
443
87
gh_patches_debug_7839
rasdani/github-patches
git_diff
getsentry__sentry-25
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Limitations on SENTRY_KEY not documented I assumed that SENTRY_KEY was just any random string that should be unique to a deployment and kept secret (ie, serving a similar purpose to django's SECRET_KEY) so I generated a random string to use there. It turns out that it's used in a URL for the feeds: ``` url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'), ``` (in sentry.urls) Mine happened to have an '[' in it which caused a very confusing error about an unterminated regex. It should be documented that SENTRY_KEY will make it into a URL (and further that django will try to then parse it as a regexp when trying to do reverse lookups) and thus can't have most punctuation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry/urls.py` Content: ``` 1 import os 2 3 from django.conf.urls.defaults import * 4 5 from sentry.conf import KEY 6 from sentry import views 7 from sentry.feeds import MessageFeed, SummaryFeed 8 9 SENTRY_ROOT = os.path.dirname(__file__) 10 11 urlpatterns = patterns('', 12 url(r'^_media/(?P<path>.*)$', 'django.views.static.serve', 13 {'document_root': os.path.join(SENTRY_ROOT, 'media')}, name='sentry-media'), 14 15 # Feeds 16 17 url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'), 18 url(r'^feeds/%s/summaries.xml$' % KEY, SummaryFeed(), name='sentry-feed-summaries'), 19 20 # JS and API 21 22 url(r'^jsapi/$', views.ajax_handler, name='sentry-ajax'), 23 url(r'^store/$', views.store, name='sentry-store'), 24 25 # Normal views 26 27 url(r'^login$', views.login, name='sentry-login'), 28 url(r'^logout$', views.logout, name='sentry-logout'), 29 url(r'^group/(\d+)$', views.group, name='sentry-group'), 30 url(r'^group/(\d+)/messages$', views.group_message_list, name='sentry-group-messages'), 31 url(r'^group/(\d+)/messages/(\d+)$', views.group_message_details, name='sentry-group-message'), 32 url(r'^group/(\d+)/actions/([\w_-]+)', views.group_plugin_action, name='sentry-group-plugin-action'), 33 34 url(r'^$', views.index, name='sentry'), 35 ) 36 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry/urls.py b/sentry/urls.py --- a/sentry/urls.py +++ b/sentry/urls.py @@ -1,4 +1,5 @@ import os +import re from django.conf.urls.defaults import * @@ -14,8 +15,8 @@ # Feeds - url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'), - url(r'^feeds/%s/summaries.xml$' % KEY, SummaryFeed(), name='sentry-feed-summaries'), + url(r'^feeds/%s/messages.xml$' % re.escape(KEY), MessageFeed(), name='sentry-feed-messages'), + url(r'^feeds/%s/summaries.xml$' % re.escape(KEY), SummaryFeed(), name='sentry-feed-summaries'), # JS and API
{"golden_diff": "diff --git a/sentry/urls.py b/sentry/urls.py\n--- a/sentry/urls.py\n+++ b/sentry/urls.py\n@@ -1,4 +1,5 @@\n import os\n+import re\n \n from django.conf.urls.defaults import *\n \n@@ -14,8 +15,8 @@\n \n # Feeds\n \n- url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),\n- url(r'^feeds/%s/summaries.xml$' % KEY, SummaryFeed(), name='sentry-feed-summaries'),\n+ url(r'^feeds/%s/messages.xml$' % re.escape(KEY), MessageFeed(), name='sentry-feed-messages'),\n+ url(r'^feeds/%s/summaries.xml$' % re.escape(KEY), SummaryFeed(), name='sentry-feed-summaries'),\n \n # JS and API\n", "issue": "Limitations on SENTRY_KEY not documented\nI assumed that SENTRY_KEY was just any random string that should be unique to a deployment and kept secret (ie, serving a similar purpose to django's SECRET_KEY) so I generated a random string to use there. \n\nIt turns out that it's used in a URL for the feeds:\n\n```\nurl(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),\n```\n\n(in sentry.urls)\n\nMine happened to have an '[' in it which caused a very confusing error about an unterminated regex. \n\nIt should be documented that SENTRY_KEY will make it into a URL (and further that django will try to then parse it as a regexp when trying to do reverse lookups) and thus can't have most punctuation. \n\n", "before_files": [{"content": "import os\n\nfrom django.conf.urls.defaults import *\n\nfrom sentry.conf import KEY\nfrom sentry import views\nfrom sentry.feeds import MessageFeed, SummaryFeed\n\nSENTRY_ROOT = os.path.dirname(__file__) \n\nurlpatterns = patterns('',\n url(r'^_media/(?P<path>.*)$', 'django.views.static.serve',\n {'document_root': os.path.join(SENTRY_ROOT, 'media')}, name='sentry-media'),\n\n # Feeds\n\n url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),\n url(r'^feeds/%s/summaries.xml$' % KEY, SummaryFeed(), name='sentry-feed-summaries'),\n\n # JS and API\n\n url(r'^jsapi/$', views.ajax_handler, name='sentry-ajax'),\n url(r'^store/$', views.store, name='sentry-store'),\n \n # Normal views\n\n url(r'^login$', views.login, name='sentry-login'),\n url(r'^logout$', views.logout, name='sentry-logout'),\n url(r'^group/(\\d+)$', views.group, name='sentry-group'),\n url(r'^group/(\\d+)/messages$', views.group_message_list, name='sentry-group-messages'),\n url(r'^group/(\\d+)/messages/(\\d+)$', views.group_message_details, name='sentry-group-message'),\n url(r'^group/(\\d+)/actions/([\\w_-]+)', views.group_plugin_action, name='sentry-group-plugin-action'),\n\n url(r'^$', views.index, name='sentry'),\n)\n", "path": "sentry/urls.py"}], "after_files": [{"content": "import os\nimport re\n\nfrom django.conf.urls.defaults import *\n\nfrom sentry.conf import KEY\nfrom sentry import views\nfrom sentry.feeds import MessageFeed, SummaryFeed\n\nSENTRY_ROOT = os.path.dirname(__file__) \n\nurlpatterns = patterns('',\n url(r'^_media/(?P<path>.*)$', 'django.views.static.serve',\n {'document_root': os.path.join(SENTRY_ROOT, 'media')}, name='sentry-media'),\n\n # Feeds\n\n url(r'^feeds/%s/messages.xml$' % re.escape(KEY), MessageFeed(), name='sentry-feed-messages'),\n url(r'^feeds/%s/summaries.xml$' % re.escape(KEY), SummaryFeed(), name='sentry-feed-summaries'),\n\n # JS and API\n\n url(r'^jsapi/$', views.ajax_handler, name='sentry-ajax'),\n url(r'^store/$', views.store, name='sentry-store'),\n \n # Normal views\n\n url(r'^login$', views.login, name='sentry-login'),\n url(r'^logout$', views.logout, name='sentry-logout'),\n url(r'^group/(\\d+)$', views.group, name='sentry-group'),\n url(r'^group/(\\d+)/messages$', views.group_message_list, name='sentry-group-messages'),\n url(r'^group/(\\d+)/messages/(\\d+)$', views.group_message_details, name='sentry-group-message'),\n url(r'^group/(\\d+)/urls$', views.group_urls, name='sentry-group-urls'),\n url(r'^group/(\\d+)/servers$', views.group_servers, name='sentry-group-servers'),\n url(r'^group/(\\d+)/actions/([\\w_-]+)', views.group_plugin_action, name='sentry-group-plugin-action'),\n\n url(r'^$', views.index, name='sentry'),\n)\n", "path": "sentry/urls.py"}]}
836
194
gh_patches_debug_21879
rasdani/github-patches
git_diff
ansible__ansible-17489
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Delegation of service action fails when using different init systems Hello ansible devs, ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME `service` action, as determined by `lib/ansible/plugins/action/service.py` ##### ANSIBLE VERSION ``` ansible 2.2.0 (devel 1c33b5a9f0) last updated 2016/08/30 09:23:53 (GMT +200) lib/ansible/modules/core: (detached HEAD 5310bab12f) last updated 2016/08/30 09:23:59 (GMT +200) lib/ansible/modules/extras: (detached HEAD 2ef4a34eee) last updated 2016/08/30 09:23:59 (GMT +200) config file = configured module search path = Default w/o overrides ``` ##### CONFIGURATION nothing specific to this bug ##### OS / ENVIRONMENT Debian stretch/testing on amd64, clients are wheezy systems running sysvinit and jessie systems running systemd. ##### SUMMARY Depending on the gathered host systems facts, ansible decides which `service_mgr` to use. On systemd systems, this is going to be `systemd`, on sysvinit systems, it's `service` and so on. The problem is, when delegating a service action from a system running systemd to a system running something else, ansible still uses the systemd service_mgr, which obviously fails. A workaround is forcing the action to `service` with the un(der)documented `use` option as shown in the third play. ##### STEPS TO REPRODUCE consider the following playbook. on some_sysvinit_host, the icinga service needs to be restarted. This fails when that action is delegated and no special care is taken to work around the bug: ``` - hosts: monitoring_running_sysvinit name: Verify that direct service restart works tasks: - service: name=icinga state=restarted - hosts: client_with_systemd name: Check whether it falls back to generic action w/o facts gather_facts: false tasks: - service: name=icinga state=restarted delegate_to: monitoring_running_sysvinit - hosts: client_with_systemd name: Delegated to host running sysvinit, force 'service' handler tasks: - service: name=icinga state=restarted use=service delegate_to: monitoring_running_sysvinit - hosts: client_with_systemd name: Delegated to host running sysvinit tasks: - service: name=icinga state=restarted delegate_to: monitoring_running_sysvinit ``` ##### EXPECTED RESULTS All four actions should restart the icinga service on monitoring_running_sysvinit successfully. ##### ACTUAL RESULTS The first action, running directly on the monitoring_running_sysvinit host restarts correctly. Delegation works when we're working around the bug by either not gathering facts (and so, forcing client_with_systemd's service_mgr to the fallback `service`) or by overwriting the helper action to `service` by setting the `use` action for the delegated service action. When just delegating from the systemd host to the sysvinit (or any other combination), the incompatible action fails: ``` $ ansible-playbook test-delegation-service.yml --sudo --verbose --diff PLAY [Verify that direct service restart works] ******************************** TASK [setup] ******************************************************************* ok: [monitoring_running_sysvinit] TASK [service] ***************************************************************** changed: [monitoring_running_sysvinit] => {"changed": true, "name": "icinga", "state": "started"} PLAY [Check whether it falls back to generic action w/o facts] ***************** TASK [service] ***************************************************************** changed: [client_with_systemd -> monitoring_running_sysvinit] => {"changed": true, "name": "icinga", "state": "started"} PLAY [Delegated to host running sysvinit, force 'service' handler] ************* TASK [setup] ******************************************************************* ok: [client_with_systemd] TASK [service] ***************************************************************** changed: [client_with_systemd -> monitoring_running_sysvinit] => {"changed": true, "name": "icinga", "state": "started"} PLAY [Delegated to host running sysvinit] ************************************** TASK [setup] ******************************************************************* ok: [client_with_systemd] TASK [service] ***************************************************************** fatal: [client_with_systemd -> monitoring_running_sysvinit]: FAILED! => {"changed": false, "cmd": "None show icinga", "failed": true, "msg": "[Errno 2] No such file or directory", "rc": 2} to retry, use: --limit @test-delegation-service.retry PLAY RECAP ********************************************************************* client_with_systemd : ok=4 changed=2 unreachable=0 failed=1 monitoring_running_sysvinit : ok=2 changed=1 unreachable=0 failed=0 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/plugins/action/service.py` Content: ``` 1 # (c) 2015, Ansible Inc, 2 # 3 # This file is part of Ansible 4 # 5 # Ansible is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Ansible is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 17 from __future__ import (absolute_import, division, print_function) 18 __metaclass__ = type 19 20 21 from ansible.plugins.action import ActionBase 22 23 24 class ActionModule(ActionBase): 25 26 TRANSFERS_FILES = False 27 28 UNUSED_PARAMS = { 29 'systemd': ['pattern', 'runlevels', 'sleep', 'arguments'], 30 } 31 32 def run(self, tmp=None, task_vars=None): 33 ''' handler for package operations ''' 34 if task_vars is None: 35 task_vars = dict() 36 37 result = super(ActionModule, self).run(tmp, task_vars) 38 39 module = self._task.args.get('use', 'auto').lower() 40 41 if module == 'auto': 42 try: 43 module = self._templar.template('{{ansible_service_mgr}}') 44 except: 45 pass # could not get it from template! 46 47 if module == 'auto': 48 facts = self._execute_module(module_name='setup', module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars) 49 self._display.debug("Facts %s" % facts) 50 if 'ansible_facts' in facts and 'ansible_service_mgr' in facts['ansible_facts']: 51 module = facts['ansible_facts']['ansible_service_mgr'] 52 53 if not module or module == 'auto' or module not in self._shared_loader_obj.module_loader: 54 module = 'service' 55 56 if module != 'auto': 57 # run the 'service' module 58 new_module_args = self._task.args.copy() 59 if 'use' in new_module_args: 60 del new_module_args['use'] 61 62 # for backwards compatibility 63 if 'state' in new_module_args and new_module_args['state'] == 'running': 64 new_module_args['state'] = 'started' 65 66 if module in self.UNUSED_PARAMS: 67 for unused in self.UNUSED_PARAMS[module]: 68 if unused in new_module_args: 69 del new_module_args[unused] 70 self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module)) 71 72 self._display.vvvv("Running %s" % module) 73 result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars)) 74 else: 75 result['failed'] = True 76 result['msg'] = 'Could not detect which service manager to use. Try gathering facts or setting the "use" option.' 77 78 return result 79 ``` Path: `lib/ansible/plugins/action/package.py` Content: ``` 1 # (c) 2015, Ansible Inc, 2 # 3 # This file is part of Ansible 4 # 5 # Ansible is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Ansible is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 17 from __future__ import (absolute_import, division, print_function) 18 __metaclass__ = type 19 20 from ansible.plugins.action import ActionBase 21 22 try: 23 from __main__ import display 24 except ImportError: 25 from ansible.utils.display import Display 26 display = Display() 27 28 29 class ActionModule(ActionBase): 30 31 TRANSFERS_FILES = False 32 33 def run(self, tmp=None, task_vars=None): 34 ''' handler for package operations ''' 35 if task_vars is None: 36 task_vars = dict() 37 38 result = super(ActionModule, self).run(tmp, task_vars) 39 40 module = self._task.args.get('use', 'auto') 41 42 if module == 'auto': 43 try: 44 module = self._templar.template('{{ansible_pkg_mgr}}') 45 except: 46 pass # could not get it from template! 47 48 if module == 'auto': 49 facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'), task_vars=task_vars) 50 display.debug("Facts %s" % facts) 51 if 'ansible_facts' in facts and 'ansible_pkg_mgr' in facts['ansible_facts']: 52 module = getattr(facts['ansible_facts'], 'ansible_pkg_mgr', 'auto') 53 54 if module != 'auto': 55 56 if module not in self._shared_loader_obj.module_loader: 57 result['failed'] = True 58 result['msg'] = 'Could not find a module for %s.' % module 59 return result 60 61 # run the 'package' module 62 new_module_args = self._task.args.copy() 63 if 'use' in new_module_args: 64 del new_module_args['use'] 65 66 display.vvvv("Running %s" % module) 67 result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars)) 68 return result 69 else: 70 result['failed'] = True 71 result['msg'] = 'Could not detect which package manager to use. Try gathering facts or setting the "use" option.' 72 return result 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py --- a/lib/ansible/plugins/action/package.py +++ b/lib/ansible/plugins/action/package.py @@ -41,7 +41,10 @@ if module == 'auto': try: - module = self._templar.template('{{ansible_pkg_mgr}}') + if self._task.delegate_to: # if we delegate, we should use delegated host's facts + module = self._templar.template("{{hostvars['%s']['ansible_pkg_mgr']}}" % self._task.delegate_to) + else: + module = self._templar.template('{{ansible_pkg_mgr}}') except: pass # could not get it from template! diff --git a/lib/ansible/plugins/action/service.py b/lib/ansible/plugins/action/service.py --- a/lib/ansible/plugins/action/service.py +++ b/lib/ansible/plugins/action/service.py @@ -40,7 +40,10 @@ if module == 'auto': try: - module = self._templar.template('{{ansible_service_mgr}}') + if self._task.delegate_to: # if we delegate, we should use delegated host's facts + module = self._templar.template("{{hostvars['%s']['ansible_service_mgr']}}" % self._task.delegate_to) + else: + module = self._templar.template('{{ansible_service_mgr}}') except: pass # could not get it from template!
{"golden_diff": "diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py\n--- a/lib/ansible/plugins/action/package.py\n+++ b/lib/ansible/plugins/action/package.py\n@@ -41,7 +41,10 @@\n \n if module == 'auto':\n try:\n- module = self._templar.template('{{ansible_pkg_mgr}}')\n+ if self._task.delegate_to: # if we delegate, we should use delegated host's facts\n+ module = self._templar.template(\"{{hostvars['%s']['ansible_pkg_mgr']}}\" % self._task.delegate_to)\n+ else:\n+ module = self._templar.template('{{ansible_pkg_mgr}}')\n except:\n pass # could not get it from template!\n \ndiff --git a/lib/ansible/plugins/action/service.py b/lib/ansible/plugins/action/service.py\n--- a/lib/ansible/plugins/action/service.py\n+++ b/lib/ansible/plugins/action/service.py\n@@ -40,7 +40,10 @@\n \n if module == 'auto':\n try:\n- module = self._templar.template('{{ansible_service_mgr}}')\n+ if self._task.delegate_to: # if we delegate, we should use delegated host's facts\n+ module = self._templar.template(\"{{hostvars['%s']['ansible_service_mgr']}}\" % self._task.delegate_to)\n+ else:\n+ module = self._templar.template('{{ansible_service_mgr}}')\n except:\n pass # could not get it from template!\n", "issue": "Delegation of service action fails when using different init systems\nHello ansible devs,\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\n`service` action, as determined by `lib/ansible/plugins/action/service.py`\n##### ANSIBLE VERSION\n\n```\nansible 2.2.0 (devel 1c33b5a9f0) last updated 2016/08/30 09:23:53 (GMT +200)\n lib/ansible/modules/core: (detached HEAD 5310bab12f) last updated 2016/08/30 09:23:59 (GMT +200)\n lib/ansible/modules/extras: (detached HEAD 2ef4a34eee) last updated 2016/08/30 09:23:59 (GMT +200)\n config file = \n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nnothing specific to this bug\n##### OS / ENVIRONMENT\n\nDebian stretch/testing on amd64, clients are wheezy systems running sysvinit and jessie systems running systemd.\n##### SUMMARY\n\nDepending on the gathered host systems facts, ansible decides which `service_mgr` to use. On systemd systems, this is going to be `systemd`, on sysvinit systems, it's `service` and so on. The problem is, when delegating a service action from a system running systemd to a system running something else, ansible still uses the systemd service_mgr, which obviously fails.\n\nA workaround is forcing the action to `service` with the un(der)documented `use` option as shown in the third play.\n##### STEPS TO REPRODUCE\n\nconsider the following playbook. on some_sysvinit_host, the icinga service needs to be restarted. This fails when that action is delegated and no special care is taken to work around the bug:\n\n```\n- hosts: monitoring_running_sysvinit\n name: Verify that direct service restart works\n tasks:\n - service: name=icinga state=restarted\n\n- hosts: client_with_systemd\n name: Check whether it falls back to generic action w/o facts\n gather_facts: false\n tasks:\n - service: name=icinga state=restarted\n delegate_to: monitoring_running_sysvinit\n\n- hosts: client_with_systemd\n name: Delegated to host running sysvinit, force 'service' handler\n tasks:\n - service: name=icinga state=restarted use=service\n delegate_to: monitoring_running_sysvinit\n\n- hosts: client_with_systemd\n name: Delegated to host running sysvinit\n tasks:\n - service: name=icinga state=restarted\n delegate_to: monitoring_running_sysvinit\n```\n##### EXPECTED RESULTS\n\nAll four actions should restart the icinga service on monitoring_running_sysvinit successfully.\n##### ACTUAL RESULTS\n\nThe first action, running directly on the monitoring_running_sysvinit host restarts correctly.\n\nDelegation works when we're working around the bug by either not gathering facts (and so, forcing client_with_systemd's service_mgr to the fallback `service`) or by overwriting the helper action to `service` by setting the `use` action for the delegated service action.\n\nWhen just delegating from the systemd host to the sysvinit (or any other combination), the incompatible action fails:\n\n```\n$ ansible-playbook test-delegation-service.yml --sudo --verbose --diff\nPLAY [Verify that direct service restart works] ********************************\n\nTASK [setup] *******************************************************************\nok: [monitoring_running_sysvinit]\n\nTASK [service] *****************************************************************\nchanged: [monitoring_running_sysvinit] => {\"changed\": true, \"name\": \"icinga\", \"state\": \"started\"}\n\nPLAY [Check whether it falls back to generic action w/o facts] *****************\n\nTASK [service] *****************************************************************\nchanged: [client_with_systemd -> monitoring_running_sysvinit] => {\"changed\": true, \"name\": \"icinga\", \"state\": \"started\"}\n\nPLAY [Delegated to host running sysvinit, force 'service' handler] *************\n\nTASK [setup] *******************************************************************\nok: [client_with_systemd]\n\nTASK [service] *****************************************************************\nchanged: [client_with_systemd -> monitoring_running_sysvinit] => {\"changed\": true, \"name\": \"icinga\", \"state\": \"started\"}\n\nPLAY [Delegated to host running sysvinit] **************************************\n\nTASK [setup] *******************************************************************\nok: [client_with_systemd]\n\nTASK [service] *****************************************************************\nfatal: [client_with_systemd -> monitoring_running_sysvinit]: FAILED! => {\"changed\": false, \"cmd\": \"None show icinga\", \"failed\": true, \"msg\": \"[Errno 2] No such file or directory\", \"rc\": 2}\n to retry, use: --limit @test-delegation-service.retry\n\nPLAY RECAP *********************************************************************\nclient_with_systemd : ok=4 changed=2 unreachable=0 failed=1 \nmonitoring_running_sysvinit : ok=2 changed=1 unreachable=0 failed=0 \n\n```\n\n", "before_files": [{"content": "# (c) 2015, Ansible Inc,\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\n\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n UNUSED_PARAMS = {\n 'systemd': ['pattern', 'runlevels', 'sleep', 'arguments'],\n }\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for package operations '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n\n module = self._task.args.get('use', 'auto').lower()\n\n if module == 'auto':\n try:\n module = self._templar.template('{{ansible_service_mgr}}')\n except:\n pass # could not get it from template!\n\n if module == 'auto':\n facts = self._execute_module(module_name='setup', module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)\n self._display.debug(\"Facts %s\" % facts)\n if 'ansible_facts' in facts and 'ansible_service_mgr' in facts['ansible_facts']:\n module = facts['ansible_facts']['ansible_service_mgr']\n\n if not module or module == 'auto' or module not in self._shared_loader_obj.module_loader:\n module = 'service'\n\n if module != 'auto':\n # run the 'service' module\n new_module_args = self._task.args.copy()\n if 'use' in new_module_args:\n del new_module_args['use']\n\n # for backwards compatibility\n if 'state' in new_module_args and new_module_args['state'] == 'running':\n new_module_args['state'] = 'started'\n\n if module in self.UNUSED_PARAMS:\n for unused in self.UNUSED_PARAMS[module]:\n if unused in new_module_args:\n del new_module_args[unused]\n self._display.warning('Ignoring \"%s\" as it is not used in \"%s\"' % (unused, module))\n\n self._display.vvvv(\"Running %s\" % module)\n result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars))\n else:\n result['failed'] = True\n result['msg'] = 'Could not detect which service manager to use. Try gathering facts or setting the \"use\" option.'\n\n return result\n", "path": "lib/ansible/plugins/action/service.py"}, {"content": "# (c) 2015, Ansible Inc,\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.plugins.action import ActionBase\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for package operations '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n\n module = self._task.args.get('use', 'auto')\n\n if module == 'auto':\n try:\n module = self._templar.template('{{ansible_pkg_mgr}}')\n except:\n pass # could not get it from template!\n\n if module == 'auto':\n facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'), task_vars=task_vars)\n display.debug(\"Facts %s\" % facts)\n if 'ansible_facts' in facts and 'ansible_pkg_mgr' in facts['ansible_facts']:\n module = getattr(facts['ansible_facts'], 'ansible_pkg_mgr', 'auto')\n\n if module != 'auto':\n\n if module not in self._shared_loader_obj.module_loader:\n result['failed'] = True\n result['msg'] = 'Could not find a module for %s.' % module\n return result\n\n # run the 'package' module\n new_module_args = self._task.args.copy()\n if 'use' in new_module_args:\n del new_module_args['use']\n\n display.vvvv(\"Running %s\" % module)\n result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars))\n return result\n else:\n result['failed'] = True\n result['msg'] = 'Could not detect which package manager to use. Try gathering facts or setting the \"use\" option.'\n return result\n", "path": "lib/ansible/plugins/action/package.py"}], "after_files": [{"content": "# (c) 2015, Ansible Inc,\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\n\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n UNUSED_PARAMS = {\n 'systemd': ['pattern', 'runlevels', 'sleep', 'arguments'],\n }\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for package operations '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n\n module = self._task.args.get('use', 'auto').lower()\n\n if module == 'auto':\n try:\n if self._task.delegate_to: # if we delegate, we should use delegated host's facts\n module = self._templar.template(\"{{hostvars['%s']['ansible_service_mgr']}}\" % self._task.delegate_to)\n else:\n module = self._templar.template('{{ansible_service_mgr}}')\n except:\n pass # could not get it from template!\n\n if module == 'auto':\n facts = self._execute_module(module_name='setup', module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)\n self._display.debug(\"Facts %s\" % facts)\n if 'ansible_facts' in facts and 'ansible_service_mgr' in facts['ansible_facts']:\n module = facts['ansible_facts']['ansible_service_mgr']\n\n if not module or module == 'auto' or module not in self._shared_loader_obj.module_loader:\n module = 'service'\n\n if module != 'auto':\n # run the 'service' module\n new_module_args = self._task.args.copy()\n if 'use' in new_module_args:\n del new_module_args['use']\n\n # for backwards compatibility\n if 'state' in new_module_args and new_module_args['state'] == 'running':\n new_module_args['state'] = 'started'\n\n if module in self.UNUSED_PARAMS:\n for unused in self.UNUSED_PARAMS[module]:\n if unused in new_module_args:\n del new_module_args[unused]\n self._display.warning('Ignoring \"%s\" as it is not used in \"%s\"' % (unused, module))\n\n self._display.vvvv(\"Running %s\" % module)\n result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars))\n else:\n result['failed'] = True\n result['msg'] = 'Could not detect which service manager to use. Try gathering facts or setting the \"use\" option.'\n\n return result\n", "path": "lib/ansible/plugins/action/service.py"}, {"content": "# (c) 2015, Ansible Inc,\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.plugins.action import ActionBase\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for package operations '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n\n module = self._task.args.get('use', 'auto')\n\n if module == 'auto':\n try:\n if self._task.delegate_to: # if we delegate, we should use delegated host's facts\n module = self._templar.template(\"{{hostvars['%s']['ansible_pkg_mgr']}}\" % self._task.delegate_to)\n else:\n module = self._templar.template('{{ansible_pkg_mgr}}')\n except:\n pass # could not get it from template!\n\n if module == 'auto':\n facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'), task_vars=task_vars)\n display.debug(\"Facts %s\" % facts)\n if 'ansible_facts' in facts and 'ansible_pkg_mgr' in facts['ansible_facts']:\n module = getattr(facts['ansible_facts'], 'ansible_pkg_mgr', 'auto')\n\n if module != 'auto':\n\n if module not in self._shared_loader_obj.module_loader:\n result['failed'] = True\n result['msg'] = 'Could not find a module for %s.' % module\n return result\n\n # run the 'package' module\n new_module_args = self._task.args.copy()\n if 'use' in new_module_args:\n del new_module_args['use']\n\n display.vvvv(\"Running %s\" % module)\n result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars))\n return result\n else:\n result['failed'] = True\n result['msg'] = 'Could not detect which package manager to use. Try gathering facts or setting the \"use\" option.'\n return result\n", "path": "lib/ansible/plugins/action/package.py"}]}
2,984
325
gh_patches_debug_14750
rasdani/github-patches
git_diff
Qiskit__qiskit-4721
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- circuit -> schedule raises exception <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: master - **Python version**: - **Operating system**: ### What is the current behavior? ```python ghz = QuantumCircuit(5, 5) ghz.h(0) ghz.cx(range(4), range(1,5)) ghz.barrier() ghz.measure(range(5), range(5)) sch = schedule(ghz, backend) ``` gives: AttributeError: 'NoneType' object has no attribute 'instruction_schedule_map' This works on older versions. ### Steps to reproduce the problem ### What is the expected behavior? ### Suggested solutions --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qiskit/compiler/schedule.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # This code is part of Qiskit. 4 # 5 # (C) Copyright IBM 2019. 6 # 7 # This code is licensed under the Apache License, Version 2.0. You may 8 # obtain a copy of this license in the LICENSE.txt file in the root directory 9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 10 # 11 # Any modifications or derivative works of this code must retain this 12 # copyright notice, and modified files need to carry a notice indicating 13 # that they have been altered from the originals. 14 15 """ 16 Convenience entry point into pulse scheduling, requiring only a circuit and a backend. For more 17 control over pulse scheduling, look at `qiskit.scheduler.schedule_circuit`. 18 """ 19 import logging 20 21 from time import time 22 from typing import List, Optional, Union 23 24 from qiskit.circuit.quantumcircuit import QuantumCircuit 25 from qiskit.exceptions import QiskitError 26 from qiskit.pulse import InstructionScheduleMap, Schedule 27 from qiskit.providers import BaseBackend 28 from qiskit.scheduler import ScheduleConfig 29 from qiskit.scheduler.schedule_circuit import schedule_circuit 30 31 LOG = logging.getLogger(__name__) 32 33 34 def _log_schedule_time(start_time, end_time): 35 log_msg = "Total Scheduling Time - %.5f (ms)" % ((end_time - start_time) * 1000) 36 LOG.info(log_msg) 37 38 39 def schedule(circuits: Union[QuantumCircuit, List[QuantumCircuit]], 40 backend: Optional[BaseBackend] = None, 41 inst_map: Optional[InstructionScheduleMap] = None, 42 meas_map: Optional[List[List[int]]] = None, 43 method: Optional[Union[str, List[str]]] = None) -> Union[Schedule, List[Schedule]]: 44 """ 45 Schedule a circuit to a pulse ``Schedule``, using the backend, according to any specified 46 methods. Supported methods are documented in :py:mod:`qiskit.scheduler.schedule_circuit`. 47 48 Args: 49 circuits: The quantum circuit or circuits to translate 50 backend: A backend instance, which contains hardware-specific data required for scheduling 51 inst_map: Mapping of circuit operations to pulse schedules. If ``None``, defaults to the 52 ``backend``\'s ``instruction_schedule_map`` 53 meas_map: List of sets of qubits that must be measured together. If ``None``, defaults to 54 the ``backend``\'s ``meas_map`` 55 method: Optionally specify a particular scheduling method 56 57 Returns: 58 A pulse ``Schedule`` that implements the input circuit 59 60 Raises: 61 QiskitError: If ``inst_map`` and ``meas_map`` are not passed and ``backend`` is not passed 62 """ 63 start_time = time() 64 if inst_map is None: 65 if backend is None: 66 raise QiskitError("Must supply either a backend or InstructionScheduleMap for " 67 "scheduling passes.") 68 inst_map = backend.defaults().instruction_schedule_map 69 if meas_map is None: 70 if backend is None: 71 raise QiskitError("Must supply either a backend or a meas_map for scheduling passes.") 72 meas_map = backend.configuration().meas_map 73 74 schedule_config = ScheduleConfig(inst_map=inst_map, meas_map=meas_map) 75 circuits = circuits if isinstance(circuits, list) else [circuits] 76 schedules = [schedule_circuit(circuit, schedule_config, method) for circuit in circuits] 77 end_time = time() 78 _log_schedule_time(start_time, end_time) 79 return schedules[0] if len(schedules) == 1 else schedules 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qiskit/compiler/schedule.py b/qiskit/compiler/schedule.py --- a/qiskit/compiler/schedule.py +++ b/qiskit/compiler/schedule.py @@ -65,7 +65,11 @@ if backend is None: raise QiskitError("Must supply either a backend or InstructionScheduleMap for " "scheduling passes.") - inst_map = backend.defaults().instruction_schedule_map + defaults = backend.defaults() + if defaults is None: + raise QiskitError("The backend defaults are unavailable. The backend may not " + "support pulse.") + inst_map = defaults.instruction_schedule_map if meas_map is None: if backend is None: raise QiskitError("Must supply either a backend or a meas_map for scheduling passes.")
{"golden_diff": "diff --git a/qiskit/compiler/schedule.py b/qiskit/compiler/schedule.py\n--- a/qiskit/compiler/schedule.py\n+++ b/qiskit/compiler/schedule.py\n@@ -65,7 +65,11 @@\n if backend is None:\n raise QiskitError(\"Must supply either a backend or InstructionScheduleMap for \"\n \"scheduling passes.\")\n- inst_map = backend.defaults().instruction_schedule_map\n+ defaults = backend.defaults()\n+ if defaults is None:\n+ raise QiskitError(\"The backend defaults are unavailable. The backend may not \"\n+ \"support pulse.\")\n+ inst_map = defaults.instruction_schedule_map\n if meas_map is None:\n if backend is None:\n raise QiskitError(\"Must supply either a backend or a meas_map for scheduling passes.\")\n", "issue": "circuit -> schedule raises exception\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**: master\r\n- **Python version**:\r\n- **Operating system**:\r\n\r\n### What is the current behavior?\r\n```python\r\nghz = QuantumCircuit(5, 5)\r\nghz.h(0)\r\nghz.cx(range(4), range(1,5))\r\nghz.barrier()\r\nghz.measure(range(5), range(5))\r\n\r\nsch = schedule(ghz, backend)\r\n```\r\n\r\ngives:\r\n\r\nAttributeError: 'NoneType' object has no attribute 'instruction_schedule_map'\r\n\r\nThis works on older versions.\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n\r\n\r\n### What is the expected behavior?\r\n\r\n\r\n\r\n### Suggested solutions\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nConvenience entry point into pulse scheduling, requiring only a circuit and a backend. For more\ncontrol over pulse scheduling, look at `qiskit.scheduler.schedule_circuit`.\n\"\"\"\nimport logging\n\nfrom time import time\nfrom typing import List, Optional, Union\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.pulse import InstructionScheduleMap, Schedule\nfrom qiskit.providers import BaseBackend\nfrom qiskit.scheduler import ScheduleConfig\nfrom qiskit.scheduler.schedule_circuit import schedule_circuit\n\nLOG = logging.getLogger(__name__)\n\n\ndef _log_schedule_time(start_time, end_time):\n log_msg = \"Total Scheduling Time - %.5f (ms)\" % ((end_time - start_time) * 1000)\n LOG.info(log_msg)\n\n\ndef schedule(circuits: Union[QuantumCircuit, List[QuantumCircuit]],\n backend: Optional[BaseBackend] = None,\n inst_map: Optional[InstructionScheduleMap] = None,\n meas_map: Optional[List[List[int]]] = None,\n method: Optional[Union[str, List[str]]] = None) -> Union[Schedule, List[Schedule]]:\n \"\"\"\n Schedule a circuit to a pulse ``Schedule``, using the backend, according to any specified\n methods. Supported methods are documented in :py:mod:`qiskit.scheduler.schedule_circuit`.\n\n Args:\n circuits: The quantum circuit or circuits to translate\n backend: A backend instance, which contains hardware-specific data required for scheduling\n inst_map: Mapping of circuit operations to pulse schedules. If ``None``, defaults to the\n ``backend``\\'s ``instruction_schedule_map``\n meas_map: List of sets of qubits that must be measured together. If ``None``, defaults to\n the ``backend``\\'s ``meas_map``\n method: Optionally specify a particular scheduling method\n\n Returns:\n A pulse ``Schedule`` that implements the input circuit\n\n Raises:\n QiskitError: If ``inst_map`` and ``meas_map`` are not passed and ``backend`` is not passed\n \"\"\"\n start_time = time()\n if inst_map is None:\n if backend is None:\n raise QiskitError(\"Must supply either a backend or InstructionScheduleMap for \"\n \"scheduling passes.\")\n inst_map = backend.defaults().instruction_schedule_map\n if meas_map is None:\n if backend is None:\n raise QiskitError(\"Must supply either a backend or a meas_map for scheduling passes.\")\n meas_map = backend.configuration().meas_map\n\n schedule_config = ScheduleConfig(inst_map=inst_map, meas_map=meas_map)\n circuits = circuits if isinstance(circuits, list) else [circuits]\n schedules = [schedule_circuit(circuit, schedule_config, method) for circuit in circuits]\n end_time = time()\n _log_schedule_time(start_time, end_time)\n return schedules[0] if len(schedules) == 1 else schedules\n", "path": "qiskit/compiler/schedule.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nConvenience entry point into pulse scheduling, requiring only a circuit and a backend. For more\ncontrol over pulse scheduling, look at `qiskit.scheduler.schedule_circuit`.\n\"\"\"\nimport logging\n\nfrom time import time\nfrom typing import List, Optional, Union\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.pulse import InstructionScheduleMap, Schedule\nfrom qiskit.providers import BaseBackend\nfrom qiskit.scheduler import ScheduleConfig\nfrom qiskit.scheduler.schedule_circuit import schedule_circuit\n\nLOG = logging.getLogger(__name__)\n\n\ndef _log_schedule_time(start_time, end_time):\n log_msg = \"Total Scheduling Time - %.5f (ms)\" % ((end_time - start_time) * 1000)\n LOG.info(log_msg)\n\n\ndef schedule(circuits: Union[QuantumCircuit, List[QuantumCircuit]],\n backend: Optional[BaseBackend] = None,\n inst_map: Optional[InstructionScheduleMap] = None,\n meas_map: Optional[List[List[int]]] = None,\n method: Optional[Union[str, List[str]]] = None) -> Union[Schedule, List[Schedule]]:\n \"\"\"\n Schedule a circuit to a pulse ``Schedule``, using the backend, according to any specified\n methods. Supported methods are documented in :py:mod:`qiskit.scheduler.schedule_circuit`.\n\n Args:\n circuits: The quantum circuit or circuits to translate\n backend: A backend instance, which contains hardware-specific data required for scheduling\n inst_map: Mapping of circuit operations to pulse schedules. If ``None``, defaults to the\n ``backend``\\'s ``instruction_schedule_map``\n meas_map: List of sets of qubits that must be measured together. If ``None``, defaults to\n the ``backend``\\'s ``meas_map``\n method: Optionally specify a particular scheduling method\n\n Returns:\n A pulse ``Schedule`` that implements the input circuit\n\n Raises:\n QiskitError: If ``inst_map`` and ``meas_map`` are not passed and ``backend`` is not passed\n \"\"\"\n start_time = time()\n if inst_map is None:\n if backend is None:\n raise QiskitError(\"Must supply either a backend or InstructionScheduleMap for \"\n \"scheduling passes.\")\n defaults = backend.defaults()\n if defaults is None:\n raise QiskitError(\"The backend defaults are unavailable. The backend may not \"\n \"support pulse.\")\n inst_map = defaults.instruction_schedule_map\n if meas_map is None:\n if backend is None:\n raise QiskitError(\"Must supply either a backend or a meas_map for scheduling passes.\")\n meas_map = backend.configuration().meas_map\n\n schedule_config = ScheduleConfig(inst_map=inst_map, meas_map=meas_map)\n circuits = circuits if isinstance(circuits, list) else [circuits]\n schedules = [schedule_circuit(circuit, schedule_config, method) for circuit in circuits]\n end_time = time()\n _log_schedule_time(start_time, end_time)\n return schedules[0] if len(schedules) == 1 else schedules\n", "path": "qiskit/compiler/schedule.py"}]}
1,375
178
gh_patches_debug_6706
rasdani/github-patches
git_diff
ckan__ckan-2600
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fileupload removes underscores from filenames There doesn't seem to be any reason to forbid underscores in filenames, especially since munge allows them elsewhere. Looks like someone just forgot to add an underscore to the replace function. Have a PR all ready to go with fix and updated test, just need to know what branch to submit it to ;) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckan/lib/munge.py` Content: ``` 1 # Note these functions are similar to, but separate from name/title mungers 2 # found in the ckanext importer. That one needs to be stable to prevent 3 # packages changing name on reimport, but these ones can be changed and 4 # improved. 5 6 import re 7 import os.path 8 9 from ckan import model 10 11 12 def munge_name(name): 13 '''Munges the package name field in case it is not to spec.''' 14 # substitute non-ascii characters 15 if isinstance(name, unicode): 16 name = substitute_ascii_equivalents(name) 17 # separators become dashes 18 name = re.sub('[ .:/]', '-', name) 19 # take out not-allowed characters 20 name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() 21 # keep it within the length spec 22 name = _munge_to_length(name, model.PACKAGE_NAME_MIN_LENGTH, 23 model.PACKAGE_NAME_MAX_LENGTH) 24 return name 25 26 27 def munge_title_to_name(name): 28 '''Munge a package title into a package name.''' 29 # substitute non-ascii characters 30 if isinstance(name, unicode): 31 name = substitute_ascii_equivalents(name) 32 # convert spaces and separators 33 name = re.sub('[ .:/]', '-', name) 34 # take out not-allowed characters 35 name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() 36 # remove doubles 37 name = re.sub('--', '-', name) 38 # remove leading or trailing hyphens 39 name = name.strip('-') 40 # if longer than max_length, keep last word if a year 41 max_length = model.PACKAGE_NAME_MAX_LENGTH - 5 42 # (make length less than max, in case we need a few for '_' chars 43 # to de-clash names.) 44 if len(name) > max_length: 45 year_match = re.match('.*?[_-]((?:\d{2,4}[-/])?\d{2,4})$', name) 46 if year_match: 47 year = year_match.groups()[0] 48 name = '%s-%s' % (name[:(max_length-len(year)-1)], year) 49 else: 50 name = name[:max_length] 51 name = _munge_to_length(name, model.PACKAGE_NAME_MIN_LENGTH, 52 model.PACKAGE_NAME_MAX_LENGTH) 53 return name 54 55 56 def substitute_ascii_equivalents(text_unicode): 57 # Method taken from: http://code.activestate.com/recipes/251871/ 58 """ 59 This takes a UNICODE string and replaces Latin-1 characters with something 60 equivalent in 7-bit ASCII. It returns a plain ASCII string. This function 61 makes a best effort to convert Latin-1 characters into ASCII equivalents. 62 It does not just strip out the Latin-1 characters. All characters in the 63 standard 7-bit ASCII range are preserved. In the 8th bit range all the 64 Latin-1 accented letters are converted to unaccented equivalents. Most 65 symbol characters are converted to something meaningful. Anything not 66 converted is deleted. 67 """ 68 char_mapping = { 69 0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A', 70 0xc6: 'Ae', 0xc7: 'C', 71 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 72 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I', 73 0xd0: 'Th', 0xd1: 'N', 74 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O', 75 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U', 76 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss', 77 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a', 78 0xe6: 'ae', 0xe7: 'c', 79 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 80 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i', 81 0xf0: 'th', 0xf1: 'n', 82 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o', 83 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u', 84 0xfd: 'y', 0xfe: 'th', 0xff: 'y', 85 #0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}', 86 #0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}', 87 #0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}', 88 #0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}', 89 #0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4:"'", 90 #0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}', 91 #0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>', 92 #0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?', 93 #0xd7: '*', 0xf7: '/' 94 } 95 96 r = '' 97 for char in text_unicode: 98 if ord(char) in char_mapping: 99 r += char_mapping[ord(char)] 100 elif ord(char) >= 0x80: 101 pass 102 else: 103 r += str(char) 104 return r 105 106 107 def munge_tag(tag): 108 tag = substitute_ascii_equivalents(tag) 109 tag = tag.lower().strip() 110 tag = re.sub(r'[^a-zA-Z0-9\- ]', '', tag).replace(' ', '-') 111 tag = _munge_to_length(tag, model.MIN_TAG_LENGTH, model.MAX_TAG_LENGTH) 112 return tag 113 114 115 def munge_filename_legacy(filename): 116 ''' Tidies a filename. NB: deprecated 117 118 Unfortunately it mangles any path or filename extension, so is deprecated. 119 It needs to remain unchanged for use by group_dictize() and 120 Upload.update_data_dict() because if this routine changes then group images 121 uploaded previous to the change may not be viewable. 122 ''' 123 filename = substitute_ascii_equivalents(filename) 124 filename = filename.strip() 125 filename = re.sub(r'[^a-zA-Z0-9.\- ]', '', filename).replace(' ', '-') 126 filename = _munge_to_length(filename, 3, 100) 127 return filename 128 129 130 def munge_filename(filename): 131 ''' Tidies a filename 132 133 Keeps the filename extension (e.g. .csv). 134 Strips off any path on the front. 135 ''' 136 137 # just get the filename ignore the path 138 path, filename = os.path.split(filename) 139 # clean up 140 filename = substitute_ascii_equivalents(filename) 141 filename = filename.lower().strip() 142 filename = re.sub(r'[^a-zA-Z0-9. -]', '', filename).replace(' ', '-') 143 # resize if needed but keep extension 144 name, ext = os.path.splitext(filename) 145 # limit overly long extensions 146 if len(ext) > 21: 147 ext = ext[:21] 148 # max/min size 149 ext_length = len(ext) 150 name = _munge_to_length(name, max(3 - ext_length, 1), 100 - ext_length) 151 filename = name + ext 152 153 return filename 154 155 156 def _munge_to_length(string, min_length, max_length): 157 '''Pad/truncates a string''' 158 if len(string) < min_length: 159 string += '_' * (min_length - len(string)) 160 if len(string) > max_length: 161 string = string[:max_length] 162 return string 163 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckan/lib/munge.py b/ckan/lib/munge.py --- a/ckan/lib/munge.py +++ b/ckan/lib/munge.py @@ -139,7 +139,7 @@ # clean up filename = substitute_ascii_equivalents(filename) filename = filename.lower().strip() - filename = re.sub(r'[^a-zA-Z0-9. -]', '', filename).replace(' ', '-') + filename = re.sub(r'[^a-zA-Z0-9_. -]', '', filename).replace(' ', '-') # resize if needed but keep extension name, ext = os.path.splitext(filename) # limit overly long extensions
{"golden_diff": "diff --git a/ckan/lib/munge.py b/ckan/lib/munge.py\n--- a/ckan/lib/munge.py\n+++ b/ckan/lib/munge.py\n@@ -139,7 +139,7 @@\n # clean up\n filename = substitute_ascii_equivalents(filename)\n filename = filename.lower().strip()\n- filename = re.sub(r'[^a-zA-Z0-9. -]', '', filename).replace(' ', '-')\n+ filename = re.sub(r'[^a-zA-Z0-9_. -]', '', filename).replace(' ', '-')\n # resize if needed but keep extension\n name, ext = os.path.splitext(filename)\n # limit overly long extensions\n", "issue": "Fileupload removes underscores from filenames\nThere doesn't seem to be any reason to forbid underscores in filenames, especially since munge allows them elsewhere. Looks like someone just forgot to add an underscore to the replace function. Have a PR all ready to go with fix and updated test, just need to know what branch to submit it to ;)\n\n", "before_files": [{"content": "# Note these functions are similar to, but separate from name/title mungers\n# found in the ckanext importer. That one needs to be stable to prevent\n# packages changing name on reimport, but these ones can be changed and\n# improved.\n\nimport re\nimport os.path\n\nfrom ckan import model\n\n\ndef munge_name(name):\n '''Munges the package name field in case it is not to spec.'''\n # substitute non-ascii characters\n if isinstance(name, unicode):\n name = substitute_ascii_equivalents(name)\n # separators become dashes\n name = re.sub('[ .:/]', '-', name)\n # take out not-allowed characters\n name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()\n # keep it within the length spec\n name = _munge_to_length(name, model.PACKAGE_NAME_MIN_LENGTH,\n model.PACKAGE_NAME_MAX_LENGTH)\n return name\n\n\ndef munge_title_to_name(name):\n '''Munge a package title into a package name.'''\n # substitute non-ascii characters\n if isinstance(name, unicode):\n name = substitute_ascii_equivalents(name)\n # convert spaces and separators\n name = re.sub('[ .:/]', '-', name)\n # take out not-allowed characters\n name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()\n # remove doubles\n name = re.sub('--', '-', name)\n # remove leading or trailing hyphens\n name = name.strip('-')\n # if longer than max_length, keep last word if a year\n max_length = model.PACKAGE_NAME_MAX_LENGTH - 5\n # (make length less than max, in case we need a few for '_' chars\n # to de-clash names.)\n if len(name) > max_length:\n year_match = re.match('.*?[_-]((?:\\d{2,4}[-/])?\\d{2,4})$', name)\n if year_match:\n year = year_match.groups()[0]\n name = '%s-%s' % (name[:(max_length-len(year)-1)], year)\n else:\n name = name[:max_length]\n name = _munge_to_length(name, model.PACKAGE_NAME_MIN_LENGTH,\n model.PACKAGE_NAME_MAX_LENGTH)\n return name\n\n\ndef substitute_ascii_equivalents(text_unicode):\n # Method taken from: http://code.activestate.com/recipes/251871/\n \"\"\"\n This takes a UNICODE string and replaces Latin-1 characters with something\n equivalent in 7-bit ASCII. It returns a plain ASCII string. This function\n makes a best effort to convert Latin-1 characters into ASCII equivalents.\n It does not just strip out the Latin-1 characters. All characters in the\n standard 7-bit ASCII range are preserved. In the 8th bit range all the\n Latin-1 accented letters are converted to unaccented equivalents. Most\n symbol characters are converted to something meaningful. Anything not\n converted is deleted.\n \"\"\"\n char_mapping = {\n 0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',\n 0xc6: 'Ae', 0xc7: 'C',\n 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E',\n 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',\n 0xd0: 'Th', 0xd1: 'N',\n 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',\n 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',\n 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',\n 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',\n 0xe6: 'ae', 0xe7: 'c',\n 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e',\n 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',\n 0xf0: 'th', 0xf1: 'n',\n 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',\n 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',\n 0xfd: 'y', 0xfe: 'th', 0xff: 'y',\n #0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',\n #0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',\n #0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',\n #0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',\n #0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4:\"'\",\n #0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',\n #0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',\n #0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',\n #0xd7: '*', 0xf7: '/'\n }\n\n r = ''\n for char in text_unicode:\n if ord(char) in char_mapping:\n r += char_mapping[ord(char)]\n elif ord(char) >= 0x80:\n pass\n else:\n r += str(char)\n return r\n\n\ndef munge_tag(tag):\n tag = substitute_ascii_equivalents(tag)\n tag = tag.lower().strip()\n tag = re.sub(r'[^a-zA-Z0-9\\- ]', '', tag).replace(' ', '-')\n tag = _munge_to_length(tag, model.MIN_TAG_LENGTH, model.MAX_TAG_LENGTH)\n return tag\n\n\ndef munge_filename_legacy(filename):\n ''' Tidies a filename. NB: deprecated\n\n Unfortunately it mangles any path or filename extension, so is deprecated.\n It needs to remain unchanged for use by group_dictize() and\n Upload.update_data_dict() because if this routine changes then group images\n uploaded previous to the change may not be viewable.\n '''\n filename = substitute_ascii_equivalents(filename)\n filename = filename.strip()\n filename = re.sub(r'[^a-zA-Z0-9.\\- ]', '', filename).replace(' ', '-')\n filename = _munge_to_length(filename, 3, 100)\n return filename\n\n\ndef munge_filename(filename):\n ''' Tidies a filename\n\n Keeps the filename extension (e.g. .csv).\n Strips off any path on the front.\n '''\n\n # just get the filename ignore the path\n path, filename = os.path.split(filename)\n # clean up\n filename = substitute_ascii_equivalents(filename)\n filename = filename.lower().strip()\n filename = re.sub(r'[^a-zA-Z0-9. -]', '', filename).replace(' ', '-')\n # resize if needed but keep extension\n name, ext = os.path.splitext(filename)\n # limit overly long extensions\n if len(ext) > 21:\n ext = ext[:21]\n # max/min size\n ext_length = len(ext)\n name = _munge_to_length(name, max(3 - ext_length, 1), 100 - ext_length)\n filename = name + ext\n\n return filename\n\n\ndef _munge_to_length(string, min_length, max_length):\n '''Pad/truncates a string'''\n if len(string) < min_length:\n string += '_' * (min_length - len(string))\n if len(string) > max_length:\n string = string[:max_length]\n return string\n", "path": "ckan/lib/munge.py"}], "after_files": [{"content": "# Note these functions are similar to, but separate from name/title mungers\n# found in the ckanext importer. That one needs to be stable to prevent\n# packages changing name on reimport, but these ones can be changed and\n# improved.\n\nimport re\nimport os.path\n\nfrom ckan import model\n\n\ndef munge_name(name):\n '''Munges the package name field in case it is not to spec.'''\n # substitute non-ascii characters\n if isinstance(name, unicode):\n name = substitute_ascii_equivalents(name)\n # separators become dashes\n name = re.sub('[ .:/]', '-', name)\n # take out not-allowed characters\n name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()\n # keep it within the length spec\n name = _munge_to_length(name, model.PACKAGE_NAME_MIN_LENGTH,\n model.PACKAGE_NAME_MAX_LENGTH)\n return name\n\n\ndef munge_title_to_name(name):\n '''Munge a package title into a package name.'''\n # substitute non-ascii characters\n if isinstance(name, unicode):\n name = substitute_ascii_equivalents(name)\n # convert spaces and separators\n name = re.sub('[ .:/]', '-', name)\n # take out not-allowed characters\n name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()\n # remove doubles\n name = re.sub('--', '-', name)\n # remove leading or trailing hyphens\n name = name.strip('-')\n # if longer than max_length, keep last word if a year\n max_length = model.PACKAGE_NAME_MAX_LENGTH - 5\n # (make length less than max, in case we need a few for '_' chars\n # to de-clash names.)\n if len(name) > max_length:\n year_match = re.match('.*?[_-]((?:\\d{2,4}[-/])?\\d{2,4})$', name)\n if year_match:\n year = year_match.groups()[0]\n name = '%s-%s' % (name[:(max_length-len(year)-1)], year)\n else:\n name = name[:max_length]\n name = _munge_to_length(name, model.PACKAGE_NAME_MIN_LENGTH,\n model.PACKAGE_NAME_MAX_LENGTH)\n return name\n\n\ndef substitute_ascii_equivalents(text_unicode):\n # Method taken from: http://code.activestate.com/recipes/251871/\n \"\"\"\n This takes a UNICODE string and replaces Latin-1 characters with something\n equivalent in 7-bit ASCII. It returns a plain ASCII string. This function\n makes a best effort to convert Latin-1 characters into ASCII equivalents.\n It does not just strip out the Latin-1 characters. All characters in the\n standard 7-bit ASCII range are preserved. In the 8th bit range all the\n Latin-1 accented letters are converted to unaccented equivalents. Most\n symbol characters are converted to something meaningful. Anything not\n converted is deleted.\n \"\"\"\n char_mapping = {\n 0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',\n 0xc6: 'Ae', 0xc7: 'C',\n 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E',\n 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',\n 0xd0: 'Th', 0xd1: 'N',\n 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',\n 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',\n 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',\n 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',\n 0xe6: 'ae', 0xe7: 'c',\n 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e',\n 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',\n 0xf0: 'th', 0xf1: 'n',\n 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',\n 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',\n 0xfd: 'y', 0xfe: 'th', 0xff: 'y',\n #0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',\n #0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',\n #0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',\n #0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',\n #0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4:\"'\",\n #0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',\n #0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',\n #0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',\n #0xd7: '*', 0xf7: '/'\n }\n\n r = ''\n for char in text_unicode:\n if ord(char) in char_mapping:\n r += char_mapping[ord(char)]\n elif ord(char) >= 0x80:\n pass\n else:\n r += str(char)\n return r\n\n\ndef munge_tag(tag):\n tag = substitute_ascii_equivalents(tag)\n tag = tag.lower().strip()\n tag = re.sub(r'[^a-zA-Z0-9\\- ]', '', tag).replace(' ', '-')\n tag = _munge_to_length(tag, model.MIN_TAG_LENGTH, model.MAX_TAG_LENGTH)\n return tag\n\n\ndef munge_filename_legacy(filename):\n ''' Tidies a filename. NB: deprecated\n\n Unfortunately it mangles any path or filename extension, so is deprecated.\n It needs to remain unchanged for use by group_dictize() and\n Upload.update_data_dict() because if this routine changes then group images\n uploaded previous to the change may not be viewable.\n '''\n filename = substitute_ascii_equivalents(filename)\n filename = filename.strip()\n filename = re.sub(r'[^a-zA-Z0-9.\\- ]', '', filename).replace(' ', '-')\n filename = _munge_to_length(filename, 3, 100)\n return filename\n\n\ndef munge_filename(filename):\n ''' Tidies a filename\n\n Keeps the filename extension (e.g. .csv).\n Strips off any path on the front.\n '''\n\n # just get the filename ignore the path\n path, filename = os.path.split(filename)\n # clean up\n filename = substitute_ascii_equivalents(filename)\n filename = filename.lower().strip()\n filename = re.sub(r'[^a-zA-Z0-9_. -]', '', filename).replace(' ', '-')\n # resize if needed but keep extension\n name, ext = os.path.splitext(filename)\n # limit overly long extensions\n if len(ext) > 21:\n ext = ext[:21]\n # max/min size\n ext_length = len(ext)\n name = _munge_to_length(name, max(3 - ext_length, 1), 100 - ext_length)\n filename = name + ext\n\n return filename\n\n\ndef _munge_to_length(string, min_length, max_length):\n '''Pad/truncates a string'''\n if len(string) < min_length:\n string += '_' * (min_length - len(string))\n if len(string) > max_length:\n string = string[:max_length]\n return string\n", "path": "ckan/lib/munge.py"}]}
2,683
153
gh_patches_debug_6245
rasdani/github-patches
git_diff
ansible__ansible-lint-303
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- False Positive ANSIBLE0014 does not allow command:args:stdin # Issue Type - Bug report # Ansible and Ansible Lint details ``` ansible --version ansible 2.4.0.0 config file = /etc/ansible/ansible.cfg configured module search path = [u'$HOME/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Aug 2 2017, 11:05:32) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)] ansible-lint --version ansible-lint 3.4.17 ``` - ansible installation method: OS package - ansible-lint installation method: pip # Desired Behaviour The `stdin` argument to the `command` module should not trigger the "Environment variables don't work as part of command" error. # Actual Behaviour (Bug report only) The EnvVarsInCommandRule (ANSIBLE0014) linter rule rejects the following playbook: ``` - hosts: localhost tasks: - command: /bin/cat args: stdin: "Hello, world!" ``` due to the presence of the `stdin` attribute which was added in Ansible 2.4. This appears to be because `stdin` is missing from `expected_args`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansiblelint/rules/EnvVarsInCommandRule.py` Content: ``` 1 # Copyright (c) 2016 Will Thames <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 # THE SOFTWARE. 20 21 from ansiblelint import AnsibleLintRule 22 from ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY 23 24 25 class EnvVarsInCommandRule(AnsibleLintRule): 26 id = 'ANSIBLE0014' 27 shortdesc = "Environment variables don't work as part of command" 28 description = 'Environment variables should be passed to shell or ' \ 29 'command through environment argument' 30 tags = ['bug'] 31 32 expected_args = ['chdir', 'creates', 'executable', 'removes', 'warn', 33 'cmd', '__ansible_module__', '__ansible_arguments__', 34 LINE_NUMBER_KEY, FILENAME_KEY] 35 36 def matchtask(self, file, task): 37 if task["action"]["__ansible_module__"] in ['shell', 'command']: 38 if 'cmd' in task['action']: 39 first_cmd_arg = task['action']['cmd'].split()[0] 40 else: 41 first_cmd_arg = task['action']['__ansible_arguments__'][0] 42 return any([arg not in self.expected_args for arg in task['action']] + 43 ["=" in first_cmd_arg]) 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansiblelint/rules/EnvVarsInCommandRule.py b/lib/ansiblelint/rules/EnvVarsInCommandRule.py --- a/lib/ansiblelint/rules/EnvVarsInCommandRule.py +++ b/lib/ansiblelint/rules/EnvVarsInCommandRule.py @@ -29,7 +29,7 @@ 'command through environment argument' tags = ['bug'] - expected_args = ['chdir', 'creates', 'executable', 'removes', 'warn', + expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn', 'cmd', '__ansible_module__', '__ansible_arguments__', LINE_NUMBER_KEY, FILENAME_KEY]
{"golden_diff": "diff --git a/lib/ansiblelint/rules/EnvVarsInCommandRule.py b/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n--- a/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n+++ b/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n@@ -29,7 +29,7 @@\n 'command through environment argument'\n tags = ['bug']\n \n- expected_args = ['chdir', 'creates', 'executable', 'removes', 'warn',\n+ expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n", "issue": "False Positive ANSIBLE0014 does not allow command:args:stdin\n# Issue Type\r\n- Bug report\r\n\r\n# Ansible and Ansible Lint details\r\n\r\n```\r\nansible --version\r\nansible 2.4.0.0\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = [u'$HOME/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.5 (default, Aug 2 2017, 11:05:32) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]\r\n\r\nansible-lint --version\r\nansible-lint 3.4.17\r\n```\r\n\r\n- ansible installation method: OS package\r\n- ansible-lint installation method: pip\r\n\r\n# Desired Behaviour\r\n\r\nThe `stdin` argument to the `command` module should not trigger the \"Environment variables don't work as part of command\" error.\r\n\r\n# Actual Behaviour (Bug report only)\r\n\r\nThe EnvVarsInCommandRule (ANSIBLE0014) linter rule rejects the following playbook:\r\n\r\n```\r\n- hosts: localhost\r\n tasks:\r\n - command: /bin/cat\r\n args:\r\n stdin: \"Hello, world!\"\r\n```\r\n\r\ndue to the presence of the `stdin` attribute which was added in Ansible 2.4. This appears to be because `stdin` is missing from `expected_args`.\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nfrom ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY\n\n\nclass EnvVarsInCommandRule(AnsibleLintRule):\n id = 'ANSIBLE0014'\n shortdesc = \"Environment variables don't work as part of command\"\n description = 'Environment variables should be passed to shell or ' \\\n 'command through environment argument'\n tags = ['bug']\n\n expected_args = ['chdir', 'creates', 'executable', 'removes', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in ['shell', 'command']:\n if 'cmd' in task['action']:\n first_cmd_arg = task['action']['cmd'].split()[0]\n else:\n first_cmd_arg = task['action']['__ansible_arguments__'][0]\n return any([arg not in self.expected_args for arg in task['action']] +\n [\"=\" in first_cmd_arg])\n", "path": "lib/ansiblelint/rules/EnvVarsInCommandRule.py"}], "after_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nfrom ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY\n\n\nclass EnvVarsInCommandRule(AnsibleLintRule):\n id = 'ANSIBLE0014'\n shortdesc = \"Environment variables don't work as part of command\"\n description = 'Environment variables should be passed to shell or ' \\\n 'command through environment argument'\n tags = ['bug']\n\n expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in ['shell', 'command']:\n if 'cmd' in task['action']:\n first_cmd_arg = task['action']['cmd'].split()[0]\n else:\n first_cmd_arg = task['action']['__ansible_arguments__'][0]\n return any([arg not in self.expected_args for arg in task['action']] +\n [\"=\" in first_cmd_arg])\n", "path": "lib/ansiblelint/rules/EnvVarsInCommandRule.py"}]}
1,164
155
gh_patches_debug_7106
rasdani/github-patches
git_diff
CTFd__CTFd-1485
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Theme settings There needs to be some way to change settings in themes themselves. People complain about a lot of nonsensical things that they should fix in their forks and not need to be a concern in master. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/constants/config.py` Content: ``` 1 from CTFd.utils import get_config 2 from CTFd.utils.helpers import markup 3 4 5 class _ConfigsWrapper: 6 def __getattr__(self, attr): 7 return get_config(attr) 8 9 @property 10 def ctf_name(self): 11 return get_config("theme_header", default="CTFd") 12 13 @property 14 def theme_header(self): 15 return markup(get_config("theme_header", default="")) 16 17 @property 18 def theme_footer(self): 19 return markup(get_config("theme_footer", default="")) 20 21 22 Configs = _ConfigsWrapper() 23 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/constants/config.py b/CTFd/constants/config.py --- a/CTFd/constants/config.py +++ b/CTFd/constants/config.py @@ -1,3 +1,5 @@ +import json + from CTFd.utils import get_config from CTFd.utils.helpers import markup @@ -18,5 +20,9 @@ def theme_footer(self): return markup(get_config("theme_footer", default="")) + @property + def theme_settings(self): + return json.loads(get_config("theme_settings", default="null")) + Configs = _ConfigsWrapper()
{"golden_diff": "diff --git a/CTFd/constants/config.py b/CTFd/constants/config.py\n--- a/CTFd/constants/config.py\n+++ b/CTFd/constants/config.py\n@@ -1,3 +1,5 @@\n+import json\n+\n from CTFd.utils import get_config\n from CTFd.utils.helpers import markup\n \n@@ -18,5 +20,9 @@\n def theme_footer(self):\n return markup(get_config(\"theme_footer\", default=\"\"))\n \n+ @property\n+ def theme_settings(self):\n+ return json.loads(get_config(\"theme_settings\", default=\"null\"))\n+\n \n Configs = _ConfigsWrapper()\n", "issue": "Theme settings\nThere needs to be some way to change settings in themes themselves. People complain about a lot of nonsensical things that they should fix in their forks and not need to be a concern in master. \n", "before_files": [{"content": "from CTFd.utils import get_config\nfrom CTFd.utils.helpers import markup\n\n\nclass _ConfigsWrapper:\n def __getattr__(self, attr):\n return get_config(attr)\n\n @property\n def ctf_name(self):\n return get_config(\"theme_header\", default=\"CTFd\")\n\n @property\n def theme_header(self):\n return markup(get_config(\"theme_header\", default=\"\"))\n\n @property\n def theme_footer(self):\n return markup(get_config(\"theme_footer\", default=\"\"))\n\n\nConfigs = _ConfigsWrapper()\n", "path": "CTFd/constants/config.py"}], "after_files": [{"content": "import json\n\nfrom CTFd.utils import get_config\nfrom CTFd.utils.helpers import markup\n\n\nclass _ConfigsWrapper:\n def __getattr__(self, attr):\n return get_config(attr)\n\n @property\n def ctf_name(self):\n return get_config(\"theme_header\", default=\"CTFd\")\n\n @property\n def theme_header(self):\n return markup(get_config(\"theme_header\", default=\"\"))\n\n @property\n def theme_footer(self):\n return markup(get_config(\"theme_footer\", default=\"\"))\n\n @property\n def theme_settings(self):\n return json.loads(get_config(\"theme_settings\", default=\"null\"))\n\n\nConfigs = _ConfigsWrapper()\n", "path": "CTFd/constants/config.py"}]}
460
136
gh_patches_debug_7342
rasdani/github-patches
git_diff
Zeroto521__my-data-toolkit-765
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MAINT: Add `sparse_output` for `OneHotEncoder` to compat with sklearn1.2 <!-- Thanks for contributing a pull request! Please follow these standard acronyms to start the commit message: - ENH: enhancement - BUG: bug fix - DOC: documentation - TYP: type annotations - TST: addition or modification of tests - MAINT: maintenance commit (refactoring, typos, etc.) - BLD: change related to building - REL: related to releasing - API: an (incompatible) API change - DEP: deprecate something, or remove a deprecated object - DEV: development tool or utility - REV: revert an earlier commit - PERF: performance improvement - BOT: always commit via a bot - CI: related to CI or CD - CLN: Code cleanup --> - [x] closes https://github.com/Zeroto521/my-data-toolkit/actions/runs/3156312323/jobs/5135884971#step:5:960 - [x] whatsnew entry In the latest (>= 1.1.2) sklearn version, `sparse` is replaced by `sparse_out`. And `sparse` will be removed in 1.4 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dtoolkit/transformer/sklearn/OneHotEncoder.py` Content: ``` 1 from __future__ import annotations 2 3 from textwrap import dedent 4 from typing import Literal 5 from typing import TYPE_CHECKING 6 7 import numpy as np 8 import pandas as pd 9 from pandas.util._decorators import doc 10 from sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder 11 12 from dtoolkit._typing import TwoDimArray 13 from dtoolkit.accessor.dataframe import cols # noqa: F401 14 from dtoolkit.accessor.series import cols # noqa: F401, F811 15 from dtoolkit.transformer._compat import SKLEARN_GE_12 16 17 18 if TYPE_CHECKING: 19 from scipy.sparse import csr_matrix 20 21 22 class OneHotEncoder(SKOneHotEncoder): 23 """ 24 Encode categorical features as a one-hot numeric array. 25 26 Parameters 27 ---------- 28 categories_with_parent : bool, default False 29 Returned column would hook parent labels if ``True`` else 30 would be ``categories``. 31 32 sparse : bool, default False 33 Will return sparse matrix if ``True`` else will return an array. 34 35 Other parameters 36 See :obj:`sklearn.preprocessing.OneHotEncoder`. 37 38 Notes 39 ----- 40 Different to :obj:`sklearn.preprocessing.OneHotEncoder`. 41 The result would return a :obj:`~pandas.DataFrame` which uses categories 42 as columns. 43 44 Examples 45 -------- 46 Given a dataset with two features, we let the encoder find the unique 47 values per feature and transform the data to a binary one-hot encoding. 48 49 :obj:`~pandas.DataFrame` in, :obj:`~pandas.DataFrame` out with categories 50 as columns. 51 52 >>> from dtoolkit.transformer import OneHotEncoder 53 >>> import pandas as pd 54 >>> X = [['Male', 1], ['Female', 3], ['Female', 2]] 55 >>> df = pd.DataFrame(X, columns=['gender', 'number']) 56 >>> df 57 gender number 58 0 Male 1 59 1 Female 3 60 2 Female 2 61 >>> enc = OneHotEncoder() 62 >>> enc.fit_transform(df) 63 Female Male 1 2 3 64 0 0.0 1.0 1.0 0.0 0.0 65 1 1.0 0.0 0.0 0.0 1.0 66 2 1.0 0.0 0.0 1.0 0.0 67 68 The encoded data also could hook parent labels. 69 70 >>> enc = OneHotEncoder(categories_with_parent=True) 71 >>> enc.fit_transform(df) 72 gender_Female gender_Male number_1 number_2 number_3 73 0 0.0 1.0 1.0 0.0 0.0 74 1 1.0 0.0 0.0 0.0 1.0 75 2 1.0 0.0 0.0 1.0 0.0 76 """ 77 78 @doc(SKOneHotEncoder.__init__) 79 def __init__( 80 self, 81 *, 82 sparse: bool = False, 83 sparse_output: bool = False, 84 categories_with_parent: bool = False, 85 categories="auto", 86 drop=None, 87 dtype=np.float64, 88 handle_unknown: Literal["error", "ignore", "infrequent_if_exist"] = "error", 89 min_frequency: int | float = None, 90 max_categories: int = None, 91 ): 92 # TODO: Remove `sparse` in sklearn 1.4. 93 # In the latest (>= 1.1.2) sklearn version, `sparse` is deprecated. 94 super().__init__( 95 categories=categories, 96 drop=drop, 97 dtype=dtype, 98 handle_unknown=handle_unknown, 99 min_frequency=min_frequency, 100 max_categories=max_categories, 101 **( 102 dict(sparse_output=sparse_output) 103 if SKLEARN_GE_12 104 else dict(sparse=sparse) 105 ), 106 ) 107 self.categories_with_parent = categories_with_parent 108 109 # compat with sklearn lower version 110 # `_parameter_constraints` comes out at sklearn 1.2 111 # TODO: delete this condition when required sklearn version is >= 1.2 112 if hasattr(self, "_parameter_constraints"): 113 self._parameter_constraints["categories_with_parent"] = ["boolean"] 114 115 @doc( 116 SKOneHotEncoder.transform, 117 dedent( 118 """ 119 Notes 120 ----- 121 This would let :obj:`~pandas.DataFrame` out. 122 """, 123 ), 124 ) 125 def transform(self, X: TwoDimArray) -> TwoDimArray | csr_matrix: 126 from itertools import chain 127 128 Xt = super().transform(X) 129 130 if self.sparse is False and isinstance(X, (pd.Series, pd.DataFrame)): 131 # NOTE: `get_feature_names_out` requires sklearn >= 1.0 132 categories = ( 133 self.get_feature_names_out(X.cols(to_list=True)) 134 if self.categories_with_parent 135 else chain.from_iterable(self.categories_) 136 ) 137 return pd.DataFrame(Xt, columns=categories, index=X.index) 138 139 return Xt 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dtoolkit/transformer/sklearn/OneHotEncoder.py b/dtoolkit/transformer/sklearn/OneHotEncoder.py --- a/dtoolkit/transformer/sklearn/OneHotEncoder.py +++ b/dtoolkit/transformer/sklearn/OneHotEncoder.py @@ -106,6 +106,9 @@ ) self.categories_with_parent = categories_with_parent + # TODO: Remove the following line in sklearn 1.2. + self.sparse_output = sparse_output + # compat with sklearn lower version # `_parameter_constraints` comes out at sklearn 1.2 # TODO: delete this condition when required sklearn version is >= 1.2
{"golden_diff": "diff --git a/dtoolkit/transformer/sklearn/OneHotEncoder.py b/dtoolkit/transformer/sklearn/OneHotEncoder.py\n--- a/dtoolkit/transformer/sklearn/OneHotEncoder.py\n+++ b/dtoolkit/transformer/sklearn/OneHotEncoder.py\n@@ -106,6 +106,9 @@\n )\n self.categories_with_parent = categories_with_parent\n \n+ # TODO: Remove the following line in sklearn 1.2.\n+ self.sparse_output = sparse_output\n+\n # compat with sklearn lower version\n # `_parameter_constraints` comes out at sklearn 1.2\n # TODO: delete this condition when required sklearn version is >= 1.2\n", "issue": "MAINT: Add `sparse_output` for `OneHotEncoder` to compat with sklearn1.2\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes https://github.com/Zeroto521/my-data-toolkit/actions/runs/3156312323/jobs/5135884971#step:5:960\r\n- [x] whatsnew entry\r\n\r\nIn the latest (>= 1.1.2) sklearn version, `sparse` is replaced by `sparse_out`.\r\nAnd `sparse` will be removed in 1.4\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import Literal\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util._decorators import doc\nfrom sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder\n\nfrom dtoolkit._typing import TwoDimArray\nfrom dtoolkit.accessor.dataframe import cols # noqa: F401\nfrom dtoolkit.accessor.series import cols # noqa: F401, F811\nfrom dtoolkit.transformer._compat import SKLEARN_GE_12\n\n\nif TYPE_CHECKING:\n from scipy.sparse import csr_matrix\n\n\nclass OneHotEncoder(SKOneHotEncoder):\n \"\"\"\n Encode categorical features as a one-hot numeric array.\n\n Parameters\n ----------\n categories_with_parent : bool, default False\n Returned column would hook parent labels if ``True`` else\n would be ``categories``.\n\n sparse : bool, default False\n Will return sparse matrix if ``True`` else will return an array.\n\n Other parameters\n See :obj:`sklearn.preprocessing.OneHotEncoder`.\n\n Notes\n -----\n Different to :obj:`sklearn.preprocessing.OneHotEncoder`.\n The result would return a :obj:`~pandas.DataFrame` which uses categories\n as columns.\n\n Examples\n --------\n Given a dataset with two features, we let the encoder find the unique\n values per feature and transform the data to a binary one-hot encoding.\n\n :obj:`~pandas.DataFrame` in, :obj:`~pandas.DataFrame` out with categories\n as columns.\n\n >>> from dtoolkit.transformer import OneHotEncoder\n >>> import pandas as pd\n >>> X = [['Male', 1], ['Female', 3], ['Female', 2]]\n >>> df = pd.DataFrame(X, columns=['gender', 'number'])\n >>> df\n gender number\n 0 Male 1\n 1 Female 3\n 2 Female 2\n >>> enc = OneHotEncoder()\n >>> enc.fit_transform(df)\n Female Male 1 2 3\n 0 0.0 1.0 1.0 0.0 0.0\n 1 1.0 0.0 0.0 0.0 1.0\n 2 1.0 0.0 0.0 1.0 0.0\n\n The encoded data also could hook parent labels.\n\n >>> enc = OneHotEncoder(categories_with_parent=True)\n >>> enc.fit_transform(df)\n gender_Female gender_Male number_1 number_2 number_3\n 0 0.0 1.0 1.0 0.0 0.0\n 1 1.0 0.0 0.0 0.0 1.0\n 2 1.0 0.0 0.0 1.0 0.0\n \"\"\"\n\n @doc(SKOneHotEncoder.__init__)\n def __init__(\n self,\n *,\n sparse: bool = False,\n sparse_output: bool = False,\n categories_with_parent: bool = False,\n categories=\"auto\",\n drop=None,\n dtype=np.float64,\n handle_unknown: Literal[\"error\", \"ignore\", \"infrequent_if_exist\"] = \"error\",\n min_frequency: int | float = None,\n max_categories: int = None,\n ):\n # TODO: Remove `sparse` in sklearn 1.4.\n # In the latest (>= 1.1.2) sklearn version, `sparse` is deprecated.\n super().__init__(\n categories=categories,\n drop=drop,\n dtype=dtype,\n handle_unknown=handle_unknown,\n min_frequency=min_frequency,\n max_categories=max_categories,\n **(\n dict(sparse_output=sparse_output)\n if SKLEARN_GE_12\n else dict(sparse=sparse)\n ),\n )\n self.categories_with_parent = categories_with_parent\n\n # compat with sklearn lower version\n # `_parameter_constraints` comes out at sklearn 1.2\n # TODO: delete this condition when required sklearn version is >= 1.2\n if hasattr(self, \"_parameter_constraints\"):\n self._parameter_constraints[\"categories_with_parent\"] = [\"boolean\"]\n\n @doc(\n SKOneHotEncoder.transform,\n dedent(\n \"\"\"\n Notes\n -----\n This would let :obj:`~pandas.DataFrame` out.\n \"\"\",\n ),\n )\n def transform(self, X: TwoDimArray) -> TwoDimArray | csr_matrix:\n from itertools import chain\n\n Xt = super().transform(X)\n\n if self.sparse is False and isinstance(X, (pd.Series, pd.DataFrame)):\n # NOTE: `get_feature_names_out` requires sklearn >= 1.0\n categories = (\n self.get_feature_names_out(X.cols(to_list=True))\n if self.categories_with_parent\n else chain.from_iterable(self.categories_)\n )\n return pd.DataFrame(Xt, columns=categories, index=X.index)\n\n return Xt\n", "path": "dtoolkit/transformer/sklearn/OneHotEncoder.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import Literal\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util._decorators import doc\nfrom sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder\n\nfrom dtoolkit._typing import TwoDimArray\nfrom dtoolkit.accessor.dataframe import cols # noqa: F401\nfrom dtoolkit.accessor.series import cols # noqa: F401, F811\nfrom dtoolkit.transformer._compat import SKLEARN_GE_12\n\n\nif TYPE_CHECKING:\n from scipy.sparse import csr_matrix\n\n\nclass OneHotEncoder(SKOneHotEncoder):\n \"\"\"\n Encode categorical features as a one-hot numeric array.\n\n Parameters\n ----------\n categories_with_parent : bool, default False\n Returned column would hook parent labels if ``True`` else\n would be ``categories``.\n\n sparse : bool, default False\n Will return sparse matrix if ``True`` else will return an array.\n\n Other parameters\n See :obj:`sklearn.preprocessing.OneHotEncoder`.\n\n Notes\n -----\n Different to :obj:`sklearn.preprocessing.OneHotEncoder`.\n The result would return a :obj:`~pandas.DataFrame` which uses categories\n as columns.\n\n Examples\n --------\n Given a dataset with two features, we let the encoder find the unique\n values per feature and transform the data to a binary one-hot encoding.\n\n :obj:`~pandas.DataFrame` in, :obj:`~pandas.DataFrame` out with categories\n as columns.\n\n >>> from dtoolkit.transformer import OneHotEncoder\n >>> import pandas as pd\n >>> X = [['Male', 1], ['Female', 3], ['Female', 2]]\n >>> df = pd.DataFrame(X, columns=['gender', 'number'])\n >>> df\n gender number\n 0 Male 1\n 1 Female 3\n 2 Female 2\n >>> enc = OneHotEncoder()\n >>> enc.fit_transform(df)\n Female Male 1 2 3\n 0 0.0 1.0 1.0 0.0 0.0\n 1 1.0 0.0 0.0 0.0 1.0\n 2 1.0 0.0 0.0 1.0 0.0\n\n The encoded data also could hook parent labels.\n\n >>> enc = OneHotEncoder(categories_with_parent=True)\n >>> enc.fit_transform(df)\n gender_Female gender_Male number_1 number_2 number_3\n 0 0.0 1.0 1.0 0.0 0.0\n 1 1.0 0.0 0.0 0.0 1.0\n 2 1.0 0.0 0.0 1.0 0.0\n \"\"\"\n\n @doc(SKOneHotEncoder.__init__)\n def __init__(\n self,\n *,\n sparse: bool = False,\n sparse_output: bool = False,\n categories_with_parent: bool = False,\n categories=\"auto\",\n drop=None,\n dtype=np.float64,\n handle_unknown: Literal[\"error\", \"ignore\", \"infrequent_if_exist\"] = \"error\",\n min_frequency: int | float = None,\n max_categories: int = None,\n ):\n # TODO: Remove `sparse` in sklearn 1.4.\n # In the latest (>= 1.1.2) sklearn version, `sparse` is deprecated.\n super().__init__(\n categories=categories,\n drop=drop,\n dtype=dtype,\n handle_unknown=handle_unknown,\n min_frequency=min_frequency,\n max_categories=max_categories,\n **(\n dict(sparse_output=sparse_output)\n if SKLEARN_GE_12\n else dict(sparse=sparse)\n ),\n )\n self.categories_with_parent = categories_with_parent\n\n # TODO: Remove the following line in sklearn 1.2.\n self.sparse_output = sparse_output\n\n # compat with sklearn lower version\n # `_parameter_constraints` comes out at sklearn 1.2\n # TODO: delete this condition when required sklearn version is >= 1.2\n if hasattr(self, \"_parameter_constraints\"):\n self._parameter_constraints[\"categories_with_parent\"] = [\"boolean\"]\n\n @doc(\n SKOneHotEncoder.transform,\n dedent(\n \"\"\"\n Notes\n -----\n This would let :obj:`~pandas.DataFrame` out.\n \"\"\",\n ),\n )\n def transform(self, X: TwoDimArray) -> TwoDimArray | csr_matrix:\n from itertools import chain\n\n Xt = super().transform(X)\n\n if self.sparse is False and isinstance(X, (pd.Series, pd.DataFrame)):\n # NOTE: `get_feature_names_out` requires sklearn >= 1.0\n categories = (\n self.get_feature_names_out(X.cols(to_list=True))\n if self.categories_with_parent\n else chain.from_iterable(self.categories_)\n )\n return pd.DataFrame(Xt, columns=categories, index=X.index)\n\n return Xt\n", "path": "dtoolkit/transformer/sklearn/OneHotEncoder.py"}]}
2,023
161
gh_patches_debug_37639
rasdani/github-patches
git_diff
ansible-collections__community.vmware-218
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- vmware_tag_manager: allow tags to datastore clusters ##### SUMMARY vmware_tag_manager: allow tags to be applied to datastore clusters much like they can to switches, vms, etc. ##### ISSUE TYPE - Feature Idea ##### COMPONENT NAME vmware_tag_manager ##### ADDITIONAL INFORMATION As a VI Admin, it is common to need to apply tags to all items in VMware in order to track information about those items. Additionally, in the instance of datastore clusters, these are often used to create storage policies within the environment. ```yaml - name: "apply tags to datastore cluster" vmware_tag_manager: hostname: "{{ vcenter_server }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: false tag_names: ["Storage:gold"] object_name: "High-Performance-Datastore-Cluster" object_type: "DatastoreCluster" state: "present" ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/modules/vmware_tag_manager.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 # Copyright: (c) 2018, Ansible Project 4 # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]> 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 from __future__ import absolute_import, division, print_function 8 __metaclass__ = type 9 10 11 DOCUMENTATION = r''' 12 --- 13 module: vmware_tag_manager 14 short_description: Manage association of VMware tags with VMware objects 15 description: 16 - This module can be used to assign / remove VMware tags from the given VMware objects. 17 - Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere. 18 - All variables and VMware object names are case sensitive. 19 author: 20 - Abhijeet Kasurde (@Akasurde) 21 - Frederic Van Reet (@GBrawl) 22 notes: 23 - Tested on vSphere 6.5 24 requirements: 25 - python >= 2.6 26 - PyVmomi 27 - vSphere Automation SDK 28 options: 29 tag_names: 30 description: 31 - List of tag(s) to be managed. 32 - User can also specify category name by specifying colon separated value. For example, "category_name:tag_name". 33 - User can also specify tag and category as dict, when tag or category contains colon. 34 See example for more information. Added in version 2.10. 35 - User can skip category name if you have unique tag names. 36 required: True 37 type: list 38 state: 39 description: 40 - If C(state) is set to C(add) or C(present) will add the tags to the existing tag list of the given object. 41 - If C(state) is set to C(remove) or C(absent) will remove the tags from the existing tag list of the given object. 42 - If C(state) is set to C(set) will replace the tags of the given objects with the user defined list of tags. 43 default: add 44 choices: [ present, absent, add, remove, set ] 45 type: str 46 object_type: 47 description: 48 - Type of object to work with. 49 required: True 50 choices: [ VirtualMachine, Datacenter, ClusterComputeResource, HostSystem, DistributedVirtualSwitch, DistributedVirtualPortgroup ] 51 type: str 52 object_name: 53 description: 54 - Name of the object to work with. 55 - For DistributedVirtualPortgroups the format should be "switch_name:portgroup_name" 56 required: True 57 type: str 58 extends_documentation_fragment: 59 - community.vmware.vmware_rest_client.documentation 60 61 ''' 62 63 EXAMPLES = r''' 64 - name: Add tags to a virtual machine 65 community.vmware.vmware_tag_manager: 66 hostname: '{{ vcenter_hostname }}' 67 username: '{{ vcenter_username }}' 68 password: '{{ vcenter_password }}' 69 validate_certs: no 70 tag_names: 71 - Sample_Tag_0002 72 - Category_0001:Sample_Tag_0003 73 object_name: Fedora_VM 74 object_type: VirtualMachine 75 state: add 76 delegate_to: localhost 77 78 - name: Specify tag and category as dict 79 community.vmware.vmware_tag_manager: 80 hostname: '{{ vcenter_hostname }}' 81 username: '{{ vcenter_username }}' 82 password: '{{ vcenter_password }}' 83 validate_certs: no 84 tag_names: 85 - tag: tag_0001 86 category: cat_0001 87 - tag: tag_0002 88 category: cat_0002 89 object_name: Fedora_VM 90 object_type: VirtualMachine 91 state: add 92 delegate_to: localhost 93 94 - name: Remove a tag from a virtual machine 95 community.vmware.vmware_tag_manager: 96 hostname: '{{ vcenter_hostname }}' 97 username: '{{ vcenter_username }}' 98 password: '{{ vcenter_password }}' 99 validate_certs: no 100 tag_names: 101 - Sample_Tag_0002 102 object_name: Fedora_VM 103 object_type: VirtualMachine 104 state: remove 105 delegate_to: localhost 106 107 - name: Add tags to a distributed virtual switch 108 community.vmware.vmware_tag_manager: 109 hostname: '{{ vcenter_hostname }}' 110 username: '{{ vcenter_username }}' 111 password: '{{ vcenter_password }}' 112 validate_certs: no 113 tag_names: 114 - Sample_Tag_0003 115 object_name: Switch_0001 116 object_type: DistributedVirtualSwitch 117 state: add 118 delegate_to: localhost 119 120 - name: Add tags to a distributed virtual portgroup 121 community.vmware.vmware_tag_manager: 122 hostname: '{{ vcenter_hostname }}' 123 username: '{{ vcenter_username }}' 124 password: '{{ vcenter_password }}' 125 validate_certs: no 126 tag_names: 127 - Sample_Tag_0004 128 object_name: Switch_0001:Portgroup_0001 129 object_type: DistributedVirtualPortgroup 130 state: add 131 delegate_to: localhost 132 ''' 133 134 RETURN = r''' 135 tag_status: 136 description: metadata about tags related to object configuration 137 returned: on success 138 type: list 139 sample: { 140 "current_tags": [ 141 "backup", 142 "security" 143 ], 144 "desired_tags": [ 145 "security" 146 ], 147 "previous_tags": [ 148 "backup", 149 "security" 150 ] 151 } 152 ''' 153 from ansible.module_utils.basic import AnsibleModule 154 from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient 155 from ansible_collections.community.vmware.plugins.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_dvspg_by_name) 156 try: 157 from com.vmware.vapi.std_client import DynamicID 158 from com.vmware.vapi.std.errors_client import Error 159 except ImportError: 160 pass 161 162 163 class VmwareTagManager(VmwareRestClient): 164 def __init__(self, module): 165 """ 166 Constructor 167 """ 168 super(VmwareTagManager, self).__init__(module) 169 self.pyv = PyVmomi(module=module) 170 171 self.object_type = self.params.get('object_type') 172 self.object_name = self.params.get('object_name') 173 self.managed_object = None 174 175 if self.object_type == 'VirtualMachine': 176 self.managed_object = self.pyv.get_vm_or_template(self.object_name) 177 178 if self.object_type == 'Datacenter': 179 self.managed_object = self.pyv.find_datacenter_by_name(self.object_name) 180 181 if self.object_type == 'ClusterComputeResource': 182 self.managed_object = self.pyv.find_cluster_by_name(self.object_name) 183 184 if self.object_type == 'HostSystem': 185 self.managed_object = self.pyv.find_hostsystem_by_name(self.object_name) 186 187 if self.object_type == 'DistributedVirtualSwitch': 188 self.managed_object = find_dvs_by_name(self.pyv.content, self.object_name) 189 self.object_type = 'VmwareDistributedVirtualSwitch' 190 191 if self.object_type == 'DistributedVirtualPortgroup': 192 dvs_name, pg_name = self.object_name.split(":", 1) 193 dv_switch = find_dvs_by_name(self.pyv.content, dvs_name) 194 if dv_switch is None: 195 self.module.fail_json(msg="A distributed virtual switch with name %s does not exist" % dvs_name) 196 self.managed_object = find_dvspg_by_name(dv_switch, pg_name) 197 198 if self.managed_object is None: 199 self.module.fail_json(msg="Failed to find the managed object for %s with type %s" % (self.object_name, self.object_type)) 200 201 if not hasattr(self.managed_object, '_moId'): 202 self.module.fail_json(msg="Unable to find managed object id for %s managed object" % self.object_name) 203 204 self.dynamic_managed_object = DynamicID(type=self.object_type, id=self.managed_object._moId) 205 206 self.tag_service = self.api_client.tagging.Tag 207 self.category_service = self.api_client.tagging.Category 208 self.tag_association_svc = self.api_client.tagging.TagAssociation 209 210 self.tag_names = self.params.get('tag_names') 211 212 def ensure_state(self): 213 """ 214 Manage the internal state of tags 215 216 """ 217 results = dict( 218 changed=False, 219 tag_status=dict(), 220 ) 221 changed = False 222 action = self.params.get('state') 223 available_tag_obj = self.get_tags_for_object(tag_service=self.tag_service, 224 tag_assoc_svc=self.tag_association_svc, 225 dobj=self.dynamic_managed_object) 226 227 _temp_prev_tags = ["%s:%s" % (tag['category_name'], tag['name']) for tag in self.get_tags_for_dynamic_obj(self.dynamic_managed_object)] 228 results['tag_status']['previous_tags'] = _temp_prev_tags 229 results['tag_status']['desired_tags'] = self.tag_names 230 231 # Check if category and tag combination exists as per user request 232 removed_tags_for_set = False 233 for tag in self.tag_names: 234 category_obj, category_name, tag_name = None, None, None 235 if isinstance(tag, dict): 236 tag_name = tag.get('tag') 237 category_name = tag.get('category') 238 if category_name: 239 # User specified category 240 category_obj = self.search_svc_object_by_name(self.category_service, category_name) 241 if not category_obj: 242 self.module.fail_json(msg="Unable to find the category %s" % category_name) 243 elif isinstance(tag, str): 244 if ":" in tag: 245 # User specified category 246 category_name, tag_name = tag.split(":", 1) 247 category_obj = self.search_svc_object_by_name(self.category_service, category_name) 248 if not category_obj: 249 self.module.fail_json(msg="Unable to find the category %s" % category_name) 250 else: 251 # User specified only tag 252 tag_name = tag 253 254 if category_name: 255 tag_obj = self.get_tag_by_category(tag_name=tag_name, category_name=category_name) 256 else: 257 tag_obj = self.get_tag_by_name(tag_name=tag_name) 258 259 if not tag_obj: 260 self.module.fail_json(msg="Unable to find the tag %s" % tag_name) 261 262 if action in ('add', 'present'): 263 if tag_obj not in available_tag_obj: 264 # Tag is not already applied 265 try: 266 self.tag_association_svc.attach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object) 267 changed = True 268 except Error as error: 269 self.module.fail_json(msg="%s" % self.get_error_message(error)) 270 271 elif action == 'set': 272 # Remove all tags first 273 try: 274 if not removed_tags_for_set: 275 for av_tag in available_tag_obj: 276 self.tag_association_svc.detach(tag_id=av_tag.id, object_id=self.dynamic_managed_object) 277 removed_tags_for_set = True 278 self.tag_association_svc.attach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object) 279 changed = True 280 except Error as error: 281 self.module.fail_json(msg="%s" % self.get_error_message(error)) 282 283 elif action in ('remove', 'absent'): 284 if tag_obj in available_tag_obj: 285 try: 286 self.tag_association_svc.detach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object) 287 changed = True 288 except Error as error: 289 self.module.fail_json(msg="%s" % self.get_error_message(error)) 290 291 _temp_curr_tags = ["%s:%s" % (tag['category_name'], tag['name']) for tag in self.get_tags_for_dynamic_obj(self.dynamic_managed_object)] 292 results['tag_status']['current_tags'] = _temp_curr_tags 293 results['changed'] = changed 294 self.module.exit_json(**results) 295 296 297 def main(): 298 argument_spec = VmwareRestClient.vmware_client_argument_spec() 299 argument_spec.update( 300 tag_names=dict(type='list', required=True), 301 state=dict(type='str', choices=['absent', 'add', 'present', 'remove', 'set'], default='add'), 302 object_name=dict(type='str', required=True), 303 object_type=dict(type='str', required=True, choices=['VirtualMachine', 'Datacenter', 'ClusterComputeResource', 304 'HostSystem', 'DistributedVirtualSwitch', 305 'DistributedVirtualPortgroup']), 306 ) 307 module = AnsibleModule(argument_spec=argument_spec) 308 309 vmware_tag_manager = VmwareTagManager(module) 310 vmware_tag_manager.ensure_state() 311 312 313 if __name__ == '__main__': 314 main() 315 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/modules/vmware_tag_manager.py b/plugins/modules/vmware_tag_manager.py --- a/plugins/modules/vmware_tag_manager.py +++ b/plugins/modules/vmware_tag_manager.py @@ -47,7 +47,7 @@ description: - Type of object to work with. required: True - choices: [ VirtualMachine, Datacenter, ClusterComputeResource, HostSystem, DistributedVirtualSwitch, DistributedVirtualPortgroup ] + choices: [ VirtualMachine, Datacenter, ClusterComputeResource, HostSystem, DistributedVirtualSwitch, DistributedVirtualPortgroup, Datastore, DatastoreCluster, ResourcePool, Folder ] type: str object_name: description: @@ -175,12 +175,25 @@ if self.object_type == 'VirtualMachine': self.managed_object = self.pyv.get_vm_or_template(self.object_name) + if self.object_type == 'Folder': + self.managed_object = self.pyv.find_folder_by_name(self.object_name) + if self.object_type == 'Datacenter': self.managed_object = self.pyv.find_datacenter_by_name(self.object_name) + if self.object_type == 'Datastore': + self.managed_object = self.pyv.find_datastore_by_name(self.object_name) + + if self.object_type == 'DatastoreCluster': + self.managed_object = self.pyv.find_datastore_cluster_by_name(self.object_name) + self.object_type = 'StoragePod' + if self.object_type == 'ClusterComputeResource': self.managed_object = self.pyv.find_cluster_by_name(self.object_name) + if self.object_type == 'ResourcePool': + self.managed_object = self.pyv.find_resource_pool_by_name(self.object_name) + if self.object_type == 'HostSystem': self.managed_object = self.pyv.find_hostsystem_by_name(self.object_name) @@ -302,7 +315,8 @@ object_name=dict(type='str', required=True), object_type=dict(type='str', required=True, choices=['VirtualMachine', 'Datacenter', 'ClusterComputeResource', 'HostSystem', 'DistributedVirtualSwitch', - 'DistributedVirtualPortgroup']), + 'DistributedVirtualPortgroup', 'Datastore', 'ResourcePool', + 'Folder', 'DatastoreCluster']), ) module = AnsibleModule(argument_spec=argument_spec)
{"golden_diff": "diff --git a/plugins/modules/vmware_tag_manager.py b/plugins/modules/vmware_tag_manager.py\n--- a/plugins/modules/vmware_tag_manager.py\n+++ b/plugins/modules/vmware_tag_manager.py\n@@ -47,7 +47,7 @@\n description:\n - Type of object to work with.\n required: True\n- choices: [ VirtualMachine, Datacenter, ClusterComputeResource, HostSystem, DistributedVirtualSwitch, DistributedVirtualPortgroup ]\n+ choices: [ VirtualMachine, Datacenter, ClusterComputeResource, HostSystem, DistributedVirtualSwitch, DistributedVirtualPortgroup, Datastore, DatastoreCluster, ResourcePool, Folder ]\n type: str\n object_name:\n description:\n@@ -175,12 +175,25 @@\n if self.object_type == 'VirtualMachine':\n self.managed_object = self.pyv.get_vm_or_template(self.object_name)\n \n+ if self.object_type == 'Folder':\n+ self.managed_object = self.pyv.find_folder_by_name(self.object_name)\n+\n if self.object_type == 'Datacenter':\n self.managed_object = self.pyv.find_datacenter_by_name(self.object_name)\n \n+ if self.object_type == 'Datastore':\n+ self.managed_object = self.pyv.find_datastore_by_name(self.object_name)\n+\n+ if self.object_type == 'DatastoreCluster':\n+ self.managed_object = self.pyv.find_datastore_cluster_by_name(self.object_name)\n+ self.object_type = 'StoragePod'\n+\n if self.object_type == 'ClusterComputeResource':\n self.managed_object = self.pyv.find_cluster_by_name(self.object_name)\n \n+ if self.object_type == 'ResourcePool':\n+ self.managed_object = self.pyv.find_resource_pool_by_name(self.object_name)\n+\n if self.object_type == 'HostSystem':\n self.managed_object = self.pyv.find_hostsystem_by_name(self.object_name)\n \n@@ -302,7 +315,8 @@\n object_name=dict(type='str', required=True),\n object_type=dict(type='str', required=True, choices=['VirtualMachine', 'Datacenter', 'ClusterComputeResource',\n 'HostSystem', 'DistributedVirtualSwitch',\n- 'DistributedVirtualPortgroup']),\n+ 'DistributedVirtualPortgroup', 'Datastore', 'ResourcePool',\n+ 'Folder', 'DatastoreCluster']),\n )\n module = AnsibleModule(argument_spec=argument_spec)\n", "issue": "vmware_tag_manager: allow tags to datastore clusters\n##### SUMMARY\r\nvmware_tag_manager: allow tags to be applied to datastore clusters much like they can to switches, vms, etc.\r\n\r\n##### ISSUE TYPE\r\n- Feature Idea\r\n\r\n##### COMPONENT NAME\r\nvmware_tag_manager\r\n\r\n##### ADDITIONAL INFORMATION\r\nAs a VI Admin, it is common to need to apply tags to all items in VMware in order to track information about those items. Additionally, in the instance of datastore clusters, these are often used to create storage policies within the environment.\r\n\r\n```yaml\r\n- name: \"apply tags to datastore cluster\"\r\n vmware_tag_manager:\r\n hostname: \"{{ vcenter_server }}\"\r\n username: \"{{ vcenter_username }}\"\r\n password: \"{{ vcenter_password }}\"\r\n validate_certs: false\r\n tag_names: [\"Storage:gold\"]\r\n object_name: \"High-Performance-Datastore-Cluster\"\r\n object_type: \"DatastoreCluster\"\r\n state: \"present\"\r\n```\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright: (c) 2018, Ansible Project\n# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_tag_manager\nshort_description: Manage association of VMware tags with VMware objects\ndescription:\n- This module can be used to assign / remove VMware tags from the given VMware objects.\n- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.\n- All variables and VMware object names are case sensitive.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\n- Frederic Van Reet (@GBrawl)\nnotes:\n- Tested on vSphere 6.5\nrequirements:\n- python >= 2.6\n- PyVmomi\n- vSphere Automation SDK\noptions:\n tag_names:\n description:\n - List of tag(s) to be managed.\n - User can also specify category name by specifying colon separated value. For example, \"category_name:tag_name\".\n - User can also specify tag and category as dict, when tag or category contains colon.\n See example for more information. Added in version 2.10.\n - User can skip category name if you have unique tag names.\n required: True\n type: list\n state:\n description:\n - If C(state) is set to C(add) or C(present) will add the tags to the existing tag list of the given object.\n - If C(state) is set to C(remove) or C(absent) will remove the tags from the existing tag list of the given object.\n - If C(state) is set to C(set) will replace the tags of the given objects with the user defined list of tags.\n default: add\n choices: [ present, absent, add, remove, set ]\n type: str\n object_type:\n description:\n - Type of object to work with.\n required: True\n choices: [ VirtualMachine, Datacenter, ClusterComputeResource, HostSystem, DistributedVirtualSwitch, DistributedVirtualPortgroup ]\n type: str\n object_name:\n description:\n - Name of the object to work with.\n - For DistributedVirtualPortgroups the format should be \"switch_name:portgroup_name\"\n required: True\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware_rest_client.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Add tags to a virtual machine\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - Sample_Tag_0002\n - Category_0001:Sample_Tag_0003\n object_name: Fedora_VM\n object_type: VirtualMachine\n state: add\n delegate_to: localhost\n\n- name: Specify tag and category as dict\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - tag: tag_0001\n category: cat_0001\n - tag: tag_0002\n category: cat_0002\n object_name: Fedora_VM\n object_type: VirtualMachine\n state: add\n delegate_to: localhost\n\n- name: Remove a tag from a virtual machine\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - Sample_Tag_0002\n object_name: Fedora_VM\n object_type: VirtualMachine\n state: remove\n delegate_to: localhost\n\n- name: Add tags to a distributed virtual switch\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - Sample_Tag_0003\n object_name: Switch_0001\n object_type: DistributedVirtualSwitch\n state: add\n delegate_to: localhost\n\n- name: Add tags to a distributed virtual portgroup\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - Sample_Tag_0004\n object_name: Switch_0001:Portgroup_0001\n object_type: DistributedVirtualPortgroup\n state: add\n delegate_to: localhost\n'''\n\nRETURN = r'''\ntag_status:\n description: metadata about tags related to object configuration\n returned: on success\n type: list\n sample: {\n \"current_tags\": [\n \"backup\",\n \"security\"\n ],\n \"desired_tags\": [\n \"security\"\n ],\n \"previous_tags\": [\n \"backup\",\n \"security\"\n ]\n }\n'''\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_dvspg_by_name)\ntry:\n from com.vmware.vapi.std_client import DynamicID\n from com.vmware.vapi.std.errors_client import Error\nexcept ImportError:\n pass\n\n\nclass VmwareTagManager(VmwareRestClient):\n def __init__(self, module):\n \"\"\"\n Constructor\n \"\"\"\n super(VmwareTagManager, self).__init__(module)\n self.pyv = PyVmomi(module=module)\n\n self.object_type = self.params.get('object_type')\n self.object_name = self.params.get('object_name')\n self.managed_object = None\n\n if self.object_type == 'VirtualMachine':\n self.managed_object = self.pyv.get_vm_or_template(self.object_name)\n\n if self.object_type == 'Datacenter':\n self.managed_object = self.pyv.find_datacenter_by_name(self.object_name)\n\n if self.object_type == 'ClusterComputeResource':\n self.managed_object = self.pyv.find_cluster_by_name(self.object_name)\n\n if self.object_type == 'HostSystem':\n self.managed_object = self.pyv.find_hostsystem_by_name(self.object_name)\n\n if self.object_type == 'DistributedVirtualSwitch':\n self.managed_object = find_dvs_by_name(self.pyv.content, self.object_name)\n self.object_type = 'VmwareDistributedVirtualSwitch'\n\n if self.object_type == 'DistributedVirtualPortgroup':\n dvs_name, pg_name = self.object_name.split(\":\", 1)\n dv_switch = find_dvs_by_name(self.pyv.content, dvs_name)\n if dv_switch is None:\n self.module.fail_json(msg=\"A distributed virtual switch with name %s does not exist\" % dvs_name)\n self.managed_object = find_dvspg_by_name(dv_switch, pg_name)\n\n if self.managed_object is None:\n self.module.fail_json(msg=\"Failed to find the managed object for %s with type %s\" % (self.object_name, self.object_type))\n\n if not hasattr(self.managed_object, '_moId'):\n self.module.fail_json(msg=\"Unable to find managed object id for %s managed object\" % self.object_name)\n\n self.dynamic_managed_object = DynamicID(type=self.object_type, id=self.managed_object._moId)\n\n self.tag_service = self.api_client.tagging.Tag\n self.category_service = self.api_client.tagging.Category\n self.tag_association_svc = self.api_client.tagging.TagAssociation\n\n self.tag_names = self.params.get('tag_names')\n\n def ensure_state(self):\n \"\"\"\n Manage the internal state of tags\n\n \"\"\"\n results = dict(\n changed=False,\n tag_status=dict(),\n )\n changed = False\n action = self.params.get('state')\n available_tag_obj = self.get_tags_for_object(tag_service=self.tag_service,\n tag_assoc_svc=self.tag_association_svc,\n dobj=self.dynamic_managed_object)\n\n _temp_prev_tags = [\"%s:%s\" % (tag['category_name'], tag['name']) for tag in self.get_tags_for_dynamic_obj(self.dynamic_managed_object)]\n results['tag_status']['previous_tags'] = _temp_prev_tags\n results['tag_status']['desired_tags'] = self.tag_names\n\n # Check if category and tag combination exists as per user request\n removed_tags_for_set = False\n for tag in self.tag_names:\n category_obj, category_name, tag_name = None, None, None\n if isinstance(tag, dict):\n tag_name = tag.get('tag')\n category_name = tag.get('category')\n if category_name:\n # User specified category\n category_obj = self.search_svc_object_by_name(self.category_service, category_name)\n if not category_obj:\n self.module.fail_json(msg=\"Unable to find the category %s\" % category_name)\n elif isinstance(tag, str):\n if \":\" in tag:\n # User specified category\n category_name, tag_name = tag.split(\":\", 1)\n category_obj = self.search_svc_object_by_name(self.category_service, category_name)\n if not category_obj:\n self.module.fail_json(msg=\"Unable to find the category %s\" % category_name)\n else:\n # User specified only tag\n tag_name = tag\n\n if category_name:\n tag_obj = self.get_tag_by_category(tag_name=tag_name, category_name=category_name)\n else:\n tag_obj = self.get_tag_by_name(tag_name=tag_name)\n\n if not tag_obj:\n self.module.fail_json(msg=\"Unable to find the tag %s\" % tag_name)\n\n if action in ('add', 'present'):\n if tag_obj not in available_tag_obj:\n # Tag is not already applied\n try:\n self.tag_association_svc.attach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object)\n changed = True\n except Error as error:\n self.module.fail_json(msg=\"%s\" % self.get_error_message(error))\n\n elif action == 'set':\n # Remove all tags first\n try:\n if not removed_tags_for_set:\n for av_tag in available_tag_obj:\n self.tag_association_svc.detach(tag_id=av_tag.id, object_id=self.dynamic_managed_object)\n removed_tags_for_set = True\n self.tag_association_svc.attach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object)\n changed = True\n except Error as error:\n self.module.fail_json(msg=\"%s\" % self.get_error_message(error))\n\n elif action in ('remove', 'absent'):\n if tag_obj in available_tag_obj:\n try:\n self.tag_association_svc.detach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object)\n changed = True\n except Error as error:\n self.module.fail_json(msg=\"%s\" % self.get_error_message(error))\n\n _temp_curr_tags = [\"%s:%s\" % (tag['category_name'], tag['name']) for tag in self.get_tags_for_dynamic_obj(self.dynamic_managed_object)]\n results['tag_status']['current_tags'] = _temp_curr_tags\n results['changed'] = changed\n self.module.exit_json(**results)\n\n\ndef main():\n argument_spec = VmwareRestClient.vmware_client_argument_spec()\n argument_spec.update(\n tag_names=dict(type='list', required=True),\n state=dict(type='str', choices=['absent', 'add', 'present', 'remove', 'set'], default='add'),\n object_name=dict(type='str', required=True),\n object_type=dict(type='str', required=True, choices=['VirtualMachine', 'Datacenter', 'ClusterComputeResource',\n 'HostSystem', 'DistributedVirtualSwitch',\n 'DistributedVirtualPortgroup']),\n )\n module = AnsibleModule(argument_spec=argument_spec)\n\n vmware_tag_manager = VmwareTagManager(module)\n vmware_tag_manager.ensure_state()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_tag_manager.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright: (c) 2018, Ansible Project\n# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_tag_manager\nshort_description: Manage association of VMware tags with VMware objects\ndescription:\n- This module can be used to assign / remove VMware tags from the given VMware objects.\n- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.\n- All variables and VMware object names are case sensitive.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\n- Frederic Van Reet (@GBrawl)\nnotes:\n- Tested on vSphere 6.5\nrequirements:\n- python >= 2.6\n- PyVmomi\n- vSphere Automation SDK\noptions:\n tag_names:\n description:\n - List of tag(s) to be managed.\n - User can also specify category name by specifying colon separated value. For example, \"category_name:tag_name\".\n - User can also specify tag and category as dict, when tag or category contains colon.\n See example for more information. Added in version 2.10.\n - User can skip category name if you have unique tag names.\n required: True\n type: list\n state:\n description:\n - If C(state) is set to C(add) or C(present) will add the tags to the existing tag list of the given object.\n - If C(state) is set to C(remove) or C(absent) will remove the tags from the existing tag list of the given object.\n - If C(state) is set to C(set) will replace the tags of the given objects with the user defined list of tags.\n default: add\n choices: [ present, absent, add, remove, set ]\n type: str\n object_type:\n description:\n - Type of object to work with.\n required: True\n choices: [ VirtualMachine, Datacenter, ClusterComputeResource, HostSystem, DistributedVirtualSwitch, DistributedVirtualPortgroup, Datastore, DatastoreCluster, ResourcePool, Folder ]\n type: str\n object_name:\n description:\n - Name of the object to work with.\n - For DistributedVirtualPortgroups the format should be \"switch_name:portgroup_name\"\n required: True\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware_rest_client.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Add tags to a virtual machine\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - Sample_Tag_0002\n - Category_0001:Sample_Tag_0003\n object_name: Fedora_VM\n object_type: VirtualMachine\n state: add\n delegate_to: localhost\n\n- name: Specify tag and category as dict\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - tag: tag_0001\n category: cat_0001\n - tag: tag_0002\n category: cat_0002\n object_name: Fedora_VM\n object_type: VirtualMachine\n state: add\n delegate_to: localhost\n\n- name: Remove a tag from a virtual machine\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - Sample_Tag_0002\n object_name: Fedora_VM\n object_type: VirtualMachine\n state: remove\n delegate_to: localhost\n\n- name: Add tags to a distributed virtual switch\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - Sample_Tag_0003\n object_name: Switch_0001\n object_type: DistributedVirtualSwitch\n state: add\n delegate_to: localhost\n\n- name: Add tags to a distributed virtual portgroup\n community.vmware.vmware_tag_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n validate_certs: no\n tag_names:\n - Sample_Tag_0004\n object_name: Switch_0001:Portgroup_0001\n object_type: DistributedVirtualPortgroup\n state: add\n delegate_to: localhost\n'''\n\nRETURN = r'''\ntag_status:\n description: metadata about tags related to object configuration\n returned: on success\n type: list\n sample: {\n \"current_tags\": [\n \"backup\",\n \"security\"\n ],\n \"desired_tags\": [\n \"security\"\n ],\n \"previous_tags\": [\n \"backup\",\n \"security\"\n ]\n }\n'''\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_dvspg_by_name)\ntry:\n from com.vmware.vapi.std_client import DynamicID\n from com.vmware.vapi.std.errors_client import Error\nexcept ImportError:\n pass\n\n\nclass VmwareTagManager(VmwareRestClient):\n def __init__(self, module):\n \"\"\"\n Constructor\n \"\"\"\n super(VmwareTagManager, self).__init__(module)\n self.pyv = PyVmomi(module=module)\n\n self.object_type = self.params.get('object_type')\n self.object_name = self.params.get('object_name')\n self.managed_object = None\n\n if self.object_type == 'VirtualMachine':\n self.managed_object = self.pyv.get_vm_or_template(self.object_name)\n\n if self.object_type == 'Folder':\n self.managed_object = self.pyv.find_folder_by_name(self.object_name)\n\n if self.object_type == 'Datacenter':\n self.managed_object = self.pyv.find_datacenter_by_name(self.object_name)\n\n if self.object_type == 'Datastore':\n self.managed_object = self.pyv.find_datastore_by_name(self.object_name)\n\n if self.object_type == 'DatastoreCluster':\n self.managed_object = self.pyv.find_datastore_cluster_by_name(self.object_name)\n self.object_type = 'StoragePod'\n\n if self.object_type == 'ClusterComputeResource':\n self.managed_object = self.pyv.find_cluster_by_name(self.object_name)\n\n if self.object_type == 'ResourcePool':\n self.managed_object = self.pyv.find_resource_pool_by_name(self.object_name)\n\n if self.object_type == 'HostSystem':\n self.managed_object = self.pyv.find_hostsystem_by_name(self.object_name)\n\n if self.object_type == 'DistributedVirtualSwitch':\n self.managed_object = find_dvs_by_name(self.pyv.content, self.object_name)\n self.object_type = 'VmwareDistributedVirtualSwitch'\n\n if self.object_type == 'DistributedVirtualPortgroup':\n dvs_name, pg_name = self.object_name.split(\":\", 1)\n dv_switch = find_dvs_by_name(self.pyv.content, dvs_name)\n if dv_switch is None:\n self.module.fail_json(msg=\"A distributed virtual switch with name %s does not exist\" % dvs_name)\n self.managed_object = find_dvspg_by_name(dv_switch, pg_name)\n\n if self.managed_object is None:\n self.module.fail_json(msg=\"Failed to find the managed object for %s with type %s\" % (self.object_name, self.object_type))\n\n if not hasattr(self.managed_object, '_moId'):\n self.module.fail_json(msg=\"Unable to find managed object id for %s managed object\" % self.object_name)\n\n self.dynamic_managed_object = DynamicID(type=self.object_type, id=self.managed_object._moId)\n\n self.tag_service = self.api_client.tagging.Tag\n self.category_service = self.api_client.tagging.Category\n self.tag_association_svc = self.api_client.tagging.TagAssociation\n\n self.tag_names = self.params.get('tag_names')\n\n def ensure_state(self):\n \"\"\"\n Manage the internal state of tags\n\n \"\"\"\n results = dict(\n changed=False,\n tag_status=dict(),\n )\n changed = False\n action = self.params.get('state')\n available_tag_obj = self.get_tags_for_object(tag_service=self.tag_service,\n tag_assoc_svc=self.tag_association_svc,\n dobj=self.dynamic_managed_object)\n\n _temp_prev_tags = [\"%s:%s\" % (tag['category_name'], tag['name']) for tag in self.get_tags_for_dynamic_obj(self.dynamic_managed_object)]\n results['tag_status']['previous_tags'] = _temp_prev_tags\n results['tag_status']['desired_tags'] = self.tag_names\n\n # Check if category and tag combination exists as per user request\n removed_tags_for_set = False\n for tag in self.tag_names:\n category_obj, category_name, tag_name = None, None, None\n if isinstance(tag, dict):\n tag_name = tag.get('tag')\n category_name = tag.get('category')\n if category_name:\n # User specified category\n category_obj = self.search_svc_object_by_name(self.category_service, category_name)\n if not category_obj:\n self.module.fail_json(msg=\"Unable to find the category %s\" % category_name)\n elif isinstance(tag, str):\n if \":\" in tag:\n # User specified category\n category_name, tag_name = tag.split(\":\", 1)\n category_obj = self.search_svc_object_by_name(self.category_service, category_name)\n if not category_obj:\n self.module.fail_json(msg=\"Unable to find the category %s\" % category_name)\n else:\n # User specified only tag\n tag_name = tag\n\n if category_name:\n tag_obj = self.get_tag_by_category(tag_name=tag_name, category_name=category_name)\n else:\n tag_obj = self.get_tag_by_name(tag_name=tag_name)\n\n if not tag_obj:\n self.module.fail_json(msg=\"Unable to find the tag %s\" % tag_name)\n\n if action in ('add', 'present'):\n if tag_obj not in available_tag_obj:\n # Tag is not already applied\n try:\n self.tag_association_svc.attach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object)\n changed = True\n except Error as error:\n self.module.fail_json(msg=\"%s\" % self.get_error_message(error))\n\n elif action == 'set':\n # Remove all tags first\n try:\n if not removed_tags_for_set:\n for av_tag in available_tag_obj:\n self.tag_association_svc.detach(tag_id=av_tag.id, object_id=self.dynamic_managed_object)\n removed_tags_for_set = True\n self.tag_association_svc.attach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object)\n changed = True\n except Error as error:\n self.module.fail_json(msg=\"%s\" % self.get_error_message(error))\n\n elif action in ('remove', 'absent'):\n if tag_obj in available_tag_obj:\n try:\n self.tag_association_svc.detach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object)\n changed = True\n except Error as error:\n self.module.fail_json(msg=\"%s\" % self.get_error_message(error))\n\n _temp_curr_tags = [\"%s:%s\" % (tag['category_name'], tag['name']) for tag in self.get_tags_for_dynamic_obj(self.dynamic_managed_object)]\n results['tag_status']['current_tags'] = _temp_curr_tags\n results['changed'] = changed\n self.module.exit_json(**results)\n\n\ndef main():\n argument_spec = VmwareRestClient.vmware_client_argument_spec()\n argument_spec.update(\n tag_names=dict(type='list', required=True),\n state=dict(type='str', choices=['absent', 'add', 'present', 'remove', 'set'], default='add'),\n object_name=dict(type='str', required=True),\n object_type=dict(type='str', required=True, choices=['VirtualMachine', 'Datacenter', 'ClusterComputeResource',\n 'HostSystem', 'DistributedVirtualSwitch',\n 'DistributedVirtualPortgroup', 'Datastore', 'ResourcePool',\n 'Folder', 'DatastoreCluster']),\n )\n module = AnsibleModule(argument_spec=argument_spec)\n\n vmware_tag_manager = VmwareTagManager(module)\n vmware_tag_manager.ensure_state()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_tag_manager.py"}]}
4,051
519
gh_patches_debug_20895
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-707
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- impossible to call fetch_data manual Because i try to add my own ics http url i need debug it. Calling fetch_data doesn't work: ``` takes 0 positional arguments but 1 was given ``` just an extra question: Is it possible to use {%y} (small) to get 23 not 2023? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `custom_components/waste_collection_schedule/__init__.py` Content: ``` 1 """Waste Collection Schedule Component.""" 2 import logging 3 import site 4 from pathlib import Path 5 from random import randrange 6 7 import homeassistant.helpers.config_validation as cv 8 import homeassistant.util.dt as dt_util 9 import voluptuous as vol 10 from homeassistant.core import HomeAssistant, callback 11 from homeassistant.helpers.dispatcher import dispatcher_send 12 13 from .const import DOMAIN, UPDATE_SENSORS_SIGNAL 14 15 from homeassistant.helpers.event import async_call_later # isort:skip 16 from homeassistant.helpers.event import async_track_time_change # isort:skip 17 18 # add module directory to path 19 package_dir = Path(__file__).resolve().parents[0] 20 site.addsitedir(str(package_dir)) 21 from waste_collection_schedule import Customize, SourceShell # type: ignore # isort:skip # noqa: E402 22 23 _LOGGER = logging.getLogger(__name__) 24 25 CONF_SOURCES = "sources" 26 CONF_SOURCE_NAME = "name" 27 CONF_SOURCE_ARGS = "args" # source arguments 28 CONF_SOURCE_CALENDAR_TITLE = "calendar_title" 29 CONF_SEPARATOR = "separator" 30 CONF_FETCH_TIME = "fetch_time" 31 CONF_RANDOM_FETCH_TIME_OFFSET = "random_fetch_time_offset" 32 CONF_DAY_SWITCH_TIME = "day_switch_time" 33 34 CONF_CUSTOMIZE = "customize" 35 CONF_TYPE = "type" 36 CONF_ALIAS = "alias" 37 CONF_SHOW = "show" 38 CONF_ICON = "icon" 39 CONF_PICTURE = "picture" 40 CONF_USE_DEDICATED_CALENDAR = "use_dedicated_calendar" 41 CONF_DEDICATED_CALENDAR_TITLE = "dedicated_calendar_title" 42 43 CUSTOMIZE_CONFIG = vol.Schema( 44 { 45 vol.Optional(CONF_TYPE): cv.string, 46 vol.Optional(CONF_ALIAS): cv.string, 47 vol.Optional(CONF_SHOW): cv.boolean, 48 vol.Optional(CONF_ICON): cv.icon, 49 vol.Optional(CONF_PICTURE): cv.string, 50 vol.Optional(CONF_USE_DEDICATED_CALENDAR): cv.boolean, 51 vol.Optional(CONF_DEDICATED_CALENDAR_TITLE): cv.string, 52 } 53 ) 54 55 SOURCE_CONFIG = vol.Schema( 56 { 57 vol.Required(CONF_SOURCE_NAME): cv.string, 58 vol.Required(CONF_SOURCE_ARGS): dict, 59 vol.Optional(CONF_CUSTOMIZE, default=[]): vol.All( 60 cv.ensure_list, [CUSTOMIZE_CONFIG] 61 ), 62 vol.Optional(CONF_SOURCE_CALENDAR_TITLE): cv.string, 63 } 64 ) 65 66 CONFIG_SCHEMA = vol.Schema( 67 { 68 DOMAIN: vol.Schema( 69 { 70 vol.Required(CONF_SOURCES): vol.All(cv.ensure_list, [SOURCE_CONFIG]), 71 vol.Optional(CONF_SEPARATOR, default=", "): cv.string, 72 vol.Optional(CONF_FETCH_TIME, default="01:00"): cv.time, 73 vol.Optional( 74 CONF_RANDOM_FETCH_TIME_OFFSET, default=60 75 ): cv.positive_int, 76 vol.Optional(CONF_DAY_SWITCH_TIME, default="10:00"): cv.time, 77 } 78 ) 79 }, 80 extra=vol.ALLOW_EXTRA, 81 ) 82 83 84 async def async_setup(hass: HomeAssistant, config: dict): 85 """Set up the component. config contains data from configuration.yaml.""" 86 # create empty api object as singleton 87 api = WasteCollectionApi( 88 hass, 89 separator=config[DOMAIN][CONF_SEPARATOR], 90 fetch_time=config[DOMAIN][CONF_FETCH_TIME], 91 random_fetch_time_offset=config[DOMAIN][CONF_RANDOM_FETCH_TIME_OFFSET], 92 day_switch_time=config[DOMAIN][CONF_DAY_SWITCH_TIME], 93 ) 94 95 # create shells for source(s) 96 for source in config[DOMAIN][CONF_SOURCES]: 97 # create customize object 98 customize = {} 99 for c in source.get(CONF_CUSTOMIZE, {}): 100 customize[c[CONF_TYPE]] = Customize( 101 waste_type=c[CONF_TYPE], 102 alias=c.get(CONF_ALIAS), 103 show=c.get(CONF_SHOW, True), 104 icon=c.get(CONF_ICON), 105 picture=c.get(CONF_PICTURE), 106 use_dedicated_calendar=c.get(CONF_USE_DEDICATED_CALENDAR, False), 107 dedicated_calendar_title=c.get(CONF_DEDICATED_CALENDAR_TITLE, False), 108 ) 109 api.add_source_shell( 110 source_name=source[CONF_SOURCE_NAME], 111 customize=customize, 112 calendar_title=source.get(CONF_SOURCE_CALENDAR_TITLE), 113 source_args=source.get(CONF_SOURCE_ARGS, {}), 114 ) 115 116 # store api object 117 hass.data.setdefault(DOMAIN, api) 118 119 # load calendar platform 120 await hass.helpers.discovery.async_load_platform( 121 "calendar", DOMAIN, {"api": api}, config 122 ) 123 124 # initial fetch of all data 125 hass.add_job(api._fetch) 126 127 def fetch_data(): 128 hass.add_job(api._fetch) 129 130 # Register new Service fetch_data 131 hass.services.async_register(DOMAIN, 'fetch_data', fetch_data) 132 133 return True 134 135 136 class WasteCollectionApi: 137 def __init__( 138 self, hass, separator, fetch_time, random_fetch_time_offset, day_switch_time 139 ): 140 self._hass = hass 141 self._source_shells = [] 142 self._separator = separator 143 self._fetch_time = fetch_time 144 self._random_fetch_time_offset = random_fetch_time_offset 145 self._day_switch_time = day_switch_time 146 147 # start timer to fetch date once per day 148 async_track_time_change( 149 hass, 150 self._fetch_callback, 151 self._fetch_time.hour, 152 self._fetch_time.minute, 153 self._fetch_time.second, 154 ) 155 156 # start timer for day-switch time 157 if self._day_switch_time != self._fetch_time: 158 async_track_time_change( 159 hass, 160 self._update_sensors_callback, 161 self._day_switch_time.hour, 162 self._day_switch_time.minute, 163 self._day_switch_time.second, 164 ) 165 166 # add a timer at midnight (if not already there) to update days-to 167 midnight = dt_util.parse_time("00:00") 168 if midnight != self._fetch_time and midnight != self._day_switch_time: 169 async_track_time_change( 170 hass, 171 self._update_sensors_callback, 172 midnight.hour, 173 midnight.minute, 174 midnight.second, 175 ) 176 177 @property 178 def separator(self): 179 """Separator string, used to separator waste types.""" 180 return self._separator 181 182 @property 183 def fetch_time(self): 184 """When to fetch to data.""" 185 return self._fetch_time 186 187 @property 188 def day_switch_time(self): 189 """When to hide entries for today.""" 190 return self._day_switch_time 191 192 def add_source_shell( 193 self, 194 source_name, 195 customize, 196 source_args, 197 calendar_title, 198 ): 199 self._source_shells.append( 200 SourceShell.create( 201 source_name=source_name, 202 customize=customize, 203 source_args=source_args, 204 calendar_title=calendar_title, 205 ) 206 ) 207 208 def _fetch(self, *_): 209 for shell in self._source_shells: 210 shell.fetch() 211 212 self._update_sensors_callback() 213 214 @property 215 def shells(self): 216 return self._source_shells 217 218 def get_shell(self, index): 219 return self._source_shells[index] if index < len(self._source_shells) else None 220 221 @callback 222 def _fetch_callback(self, *_): 223 async_call_later( 224 self._hass, 225 randrange(0, 60 * self._random_fetch_time_offset), 226 self._fetch_now_callback, 227 ) 228 229 @callback 230 def _fetch_now_callback(self, *_): 231 self._hass.add_job(self._fetch) 232 233 @callback 234 def _update_sensors_callback(self, *_): 235 dispatcher_send(self._hass, UPDATE_SENSORS_SIGNAL) 236 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/custom_components/waste_collection_schedule/__init__.py b/custom_components/waste_collection_schedule/__init__.py --- a/custom_components/waste_collection_schedule/__init__.py +++ b/custom_components/waste_collection_schedule/__init__.py @@ -7,7 +7,7 @@ import homeassistant.helpers.config_validation as cv import homeassistant.util.dt as dt_util import voluptuous as vol -from homeassistant.core import HomeAssistant, callback +from homeassistant.core import HomeAssistant, ServiceCall, callback from homeassistant.helpers.dispatcher import dispatcher_send from .const import DOMAIN, UPDATE_SENSORS_SIGNAL @@ -123,12 +123,14 @@ # initial fetch of all data hass.add_job(api._fetch) - - def fetch_data(): + + async def async_fetch_data(service: ServiceCall) -> None: hass.add_job(api._fetch) # Register new Service fetch_data - hass.services.async_register(DOMAIN, 'fetch_data', fetch_data) + hass.services.async_register( + DOMAIN, "fetch_data", async_fetch_data, schema=vol.Schema({}) + ) return True
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/__init__.py b/custom_components/waste_collection_schedule/__init__.py\n--- a/custom_components/waste_collection_schedule/__init__.py\n+++ b/custom_components/waste_collection_schedule/__init__.py\n@@ -7,7 +7,7 @@\n import homeassistant.helpers.config_validation as cv\n import homeassistant.util.dt as dt_util\n import voluptuous as vol\n-from homeassistant.core import HomeAssistant, callback\n+from homeassistant.core import HomeAssistant, ServiceCall, callback\n from homeassistant.helpers.dispatcher import dispatcher_send\n \n from .const import DOMAIN, UPDATE_SENSORS_SIGNAL\n@@ -123,12 +123,14 @@\n \n # initial fetch of all data\n hass.add_job(api._fetch)\n- \n- def fetch_data():\n+\n+ async def async_fetch_data(service: ServiceCall) -> None:\n hass.add_job(api._fetch)\n \n # Register new Service fetch_data\n- hass.services.async_register(DOMAIN, 'fetch_data', fetch_data)\n+ hass.services.async_register(\n+ DOMAIN, \"fetch_data\", async_fetch_data, schema=vol.Schema({})\n+ )\n \n return True\n", "issue": "impossible to call fetch_data manual\nBecause i try to add my own ics http url i need debug it.\r\nCalling fetch_data doesn't work:\r\n\r\n```\r\ntakes 0 positional arguments but 1 was given\r\n```\r\n\r\njust an extra question: Is it possible to use {%y} (small) to get 23 not 2023?\n", "before_files": [{"content": "\"\"\"Waste Collection Schedule Component.\"\"\"\nimport logging\nimport site\nfrom pathlib import Path\nfrom random import randrange\n\nimport homeassistant.helpers.config_validation as cv\nimport homeassistant.util.dt as dt_util\nimport voluptuous as vol\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers.dispatcher import dispatcher_send\n\nfrom .const import DOMAIN, UPDATE_SENSORS_SIGNAL\n\nfrom homeassistant.helpers.event import async_call_later # isort:skip\nfrom homeassistant.helpers.event import async_track_time_change # isort:skip\n\n# add module directory to path\npackage_dir = Path(__file__).resolve().parents[0]\nsite.addsitedir(str(package_dir))\nfrom waste_collection_schedule import Customize, SourceShell # type: ignore # isort:skip # noqa: E402\n\n_LOGGER = logging.getLogger(__name__)\n\nCONF_SOURCES = \"sources\"\nCONF_SOURCE_NAME = \"name\"\nCONF_SOURCE_ARGS = \"args\" # source arguments\nCONF_SOURCE_CALENDAR_TITLE = \"calendar_title\"\nCONF_SEPARATOR = \"separator\"\nCONF_FETCH_TIME = \"fetch_time\"\nCONF_RANDOM_FETCH_TIME_OFFSET = \"random_fetch_time_offset\"\nCONF_DAY_SWITCH_TIME = \"day_switch_time\"\n\nCONF_CUSTOMIZE = \"customize\"\nCONF_TYPE = \"type\"\nCONF_ALIAS = \"alias\"\nCONF_SHOW = \"show\"\nCONF_ICON = \"icon\"\nCONF_PICTURE = \"picture\"\nCONF_USE_DEDICATED_CALENDAR = \"use_dedicated_calendar\"\nCONF_DEDICATED_CALENDAR_TITLE = \"dedicated_calendar_title\"\n\nCUSTOMIZE_CONFIG = vol.Schema(\n {\n vol.Optional(CONF_TYPE): cv.string,\n vol.Optional(CONF_ALIAS): cv.string,\n vol.Optional(CONF_SHOW): cv.boolean,\n vol.Optional(CONF_ICON): cv.icon,\n vol.Optional(CONF_PICTURE): cv.string,\n vol.Optional(CONF_USE_DEDICATED_CALENDAR): cv.boolean,\n vol.Optional(CONF_DEDICATED_CALENDAR_TITLE): cv.string,\n }\n)\n\nSOURCE_CONFIG = vol.Schema(\n {\n vol.Required(CONF_SOURCE_NAME): cv.string,\n vol.Required(CONF_SOURCE_ARGS): dict,\n vol.Optional(CONF_CUSTOMIZE, default=[]): vol.All(\n cv.ensure_list, [CUSTOMIZE_CONFIG]\n ),\n vol.Optional(CONF_SOURCE_CALENDAR_TITLE): cv.string,\n }\n)\n\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: vol.Schema(\n {\n vol.Required(CONF_SOURCES): vol.All(cv.ensure_list, [SOURCE_CONFIG]),\n vol.Optional(CONF_SEPARATOR, default=\", \"): cv.string,\n vol.Optional(CONF_FETCH_TIME, default=\"01:00\"): cv.time,\n vol.Optional(\n CONF_RANDOM_FETCH_TIME_OFFSET, default=60\n ): cv.positive_int,\n vol.Optional(CONF_DAY_SWITCH_TIME, default=\"10:00\"): cv.time,\n }\n )\n },\n extra=vol.ALLOW_EXTRA,\n)\n\n\nasync def async_setup(hass: HomeAssistant, config: dict):\n \"\"\"Set up the component. config contains data from configuration.yaml.\"\"\"\n # create empty api object as singleton\n api = WasteCollectionApi(\n hass,\n separator=config[DOMAIN][CONF_SEPARATOR],\n fetch_time=config[DOMAIN][CONF_FETCH_TIME],\n random_fetch_time_offset=config[DOMAIN][CONF_RANDOM_FETCH_TIME_OFFSET],\n day_switch_time=config[DOMAIN][CONF_DAY_SWITCH_TIME],\n )\n\n # create shells for source(s)\n for source in config[DOMAIN][CONF_SOURCES]:\n # create customize object\n customize = {}\n for c in source.get(CONF_CUSTOMIZE, {}):\n customize[c[CONF_TYPE]] = Customize(\n waste_type=c[CONF_TYPE],\n alias=c.get(CONF_ALIAS),\n show=c.get(CONF_SHOW, True),\n icon=c.get(CONF_ICON),\n picture=c.get(CONF_PICTURE),\n use_dedicated_calendar=c.get(CONF_USE_DEDICATED_CALENDAR, False),\n dedicated_calendar_title=c.get(CONF_DEDICATED_CALENDAR_TITLE, False),\n )\n api.add_source_shell(\n source_name=source[CONF_SOURCE_NAME],\n customize=customize,\n calendar_title=source.get(CONF_SOURCE_CALENDAR_TITLE),\n source_args=source.get(CONF_SOURCE_ARGS, {}),\n )\n\n # store api object\n hass.data.setdefault(DOMAIN, api)\n\n # load calendar platform\n await hass.helpers.discovery.async_load_platform(\n \"calendar\", DOMAIN, {\"api\": api}, config\n )\n\n # initial fetch of all data\n hass.add_job(api._fetch)\n \n def fetch_data():\n hass.add_job(api._fetch)\n\n # Register new Service fetch_data\n hass.services.async_register(DOMAIN, 'fetch_data', fetch_data)\n\n return True\n\n\nclass WasteCollectionApi:\n def __init__(\n self, hass, separator, fetch_time, random_fetch_time_offset, day_switch_time\n ):\n self._hass = hass\n self._source_shells = []\n self._separator = separator\n self._fetch_time = fetch_time\n self._random_fetch_time_offset = random_fetch_time_offset\n self._day_switch_time = day_switch_time\n\n # start timer to fetch date once per day\n async_track_time_change(\n hass,\n self._fetch_callback,\n self._fetch_time.hour,\n self._fetch_time.minute,\n self._fetch_time.second,\n )\n\n # start timer for day-switch time\n if self._day_switch_time != self._fetch_time:\n async_track_time_change(\n hass,\n self._update_sensors_callback,\n self._day_switch_time.hour,\n self._day_switch_time.minute,\n self._day_switch_time.second,\n )\n\n # add a timer at midnight (if not already there) to update days-to\n midnight = dt_util.parse_time(\"00:00\")\n if midnight != self._fetch_time and midnight != self._day_switch_time:\n async_track_time_change(\n hass,\n self._update_sensors_callback,\n midnight.hour,\n midnight.minute,\n midnight.second,\n )\n\n @property\n def separator(self):\n \"\"\"Separator string, used to separator waste types.\"\"\"\n return self._separator\n\n @property\n def fetch_time(self):\n \"\"\"When to fetch to data.\"\"\"\n return self._fetch_time\n\n @property\n def day_switch_time(self):\n \"\"\"When to hide entries for today.\"\"\"\n return self._day_switch_time\n\n def add_source_shell(\n self,\n source_name,\n customize,\n source_args,\n calendar_title,\n ):\n self._source_shells.append(\n SourceShell.create(\n source_name=source_name,\n customize=customize,\n source_args=source_args,\n calendar_title=calendar_title,\n )\n )\n\n def _fetch(self, *_):\n for shell in self._source_shells:\n shell.fetch()\n\n self._update_sensors_callback()\n\n @property\n def shells(self):\n return self._source_shells\n\n def get_shell(self, index):\n return self._source_shells[index] if index < len(self._source_shells) else None\n\n @callback\n def _fetch_callback(self, *_):\n async_call_later(\n self._hass,\n randrange(0, 60 * self._random_fetch_time_offset),\n self._fetch_now_callback,\n )\n\n @callback\n def _fetch_now_callback(self, *_):\n self._hass.add_job(self._fetch)\n\n @callback\n def _update_sensors_callback(self, *_):\n dispatcher_send(self._hass, UPDATE_SENSORS_SIGNAL)\n", "path": "custom_components/waste_collection_schedule/__init__.py"}], "after_files": [{"content": "\"\"\"Waste Collection Schedule Component.\"\"\"\nimport logging\nimport site\nfrom pathlib import Path\nfrom random import randrange\n\nimport homeassistant.helpers.config_validation as cv\nimport homeassistant.util.dt as dt_util\nimport voluptuous as vol\nfrom homeassistant.core import HomeAssistant, ServiceCall, callback\nfrom homeassistant.helpers.dispatcher import dispatcher_send\n\nfrom .const import DOMAIN, UPDATE_SENSORS_SIGNAL\n\nfrom homeassistant.helpers.event import async_call_later # isort:skip\nfrom homeassistant.helpers.event import async_track_time_change # isort:skip\n\n# add module directory to path\npackage_dir = Path(__file__).resolve().parents[0]\nsite.addsitedir(str(package_dir))\nfrom waste_collection_schedule import Customize, SourceShell # type: ignore # isort:skip # noqa: E402\n\n_LOGGER = logging.getLogger(__name__)\n\nCONF_SOURCES = \"sources\"\nCONF_SOURCE_NAME = \"name\"\nCONF_SOURCE_ARGS = \"args\" # source arguments\nCONF_SOURCE_CALENDAR_TITLE = \"calendar_title\"\nCONF_SEPARATOR = \"separator\"\nCONF_FETCH_TIME = \"fetch_time\"\nCONF_RANDOM_FETCH_TIME_OFFSET = \"random_fetch_time_offset\"\nCONF_DAY_SWITCH_TIME = \"day_switch_time\"\n\nCONF_CUSTOMIZE = \"customize\"\nCONF_TYPE = \"type\"\nCONF_ALIAS = \"alias\"\nCONF_SHOW = \"show\"\nCONF_ICON = \"icon\"\nCONF_PICTURE = \"picture\"\nCONF_USE_DEDICATED_CALENDAR = \"use_dedicated_calendar\"\nCONF_DEDICATED_CALENDAR_TITLE = \"dedicated_calendar_title\"\n\nCUSTOMIZE_CONFIG = vol.Schema(\n {\n vol.Optional(CONF_TYPE): cv.string,\n vol.Optional(CONF_ALIAS): cv.string,\n vol.Optional(CONF_SHOW): cv.boolean,\n vol.Optional(CONF_ICON): cv.icon,\n vol.Optional(CONF_PICTURE): cv.string,\n vol.Optional(CONF_USE_DEDICATED_CALENDAR): cv.boolean,\n vol.Optional(CONF_DEDICATED_CALENDAR_TITLE): cv.string,\n }\n)\n\nSOURCE_CONFIG = vol.Schema(\n {\n vol.Required(CONF_SOURCE_NAME): cv.string,\n vol.Required(CONF_SOURCE_ARGS): dict,\n vol.Optional(CONF_CUSTOMIZE, default=[]): vol.All(\n cv.ensure_list, [CUSTOMIZE_CONFIG]\n ),\n vol.Optional(CONF_SOURCE_CALENDAR_TITLE): cv.string,\n }\n)\n\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: vol.Schema(\n {\n vol.Required(CONF_SOURCES): vol.All(cv.ensure_list, [SOURCE_CONFIG]),\n vol.Optional(CONF_SEPARATOR, default=\", \"): cv.string,\n vol.Optional(CONF_FETCH_TIME, default=\"01:00\"): cv.time,\n vol.Optional(\n CONF_RANDOM_FETCH_TIME_OFFSET, default=60\n ): cv.positive_int,\n vol.Optional(CONF_DAY_SWITCH_TIME, default=\"10:00\"): cv.time,\n }\n )\n },\n extra=vol.ALLOW_EXTRA,\n)\n\n\nasync def async_setup(hass: HomeAssistant, config: dict):\n \"\"\"Set up the component. config contains data from configuration.yaml.\"\"\"\n # create empty api object as singleton\n api = WasteCollectionApi(\n hass,\n separator=config[DOMAIN][CONF_SEPARATOR],\n fetch_time=config[DOMAIN][CONF_FETCH_TIME],\n random_fetch_time_offset=config[DOMAIN][CONF_RANDOM_FETCH_TIME_OFFSET],\n day_switch_time=config[DOMAIN][CONF_DAY_SWITCH_TIME],\n )\n\n # create shells for source(s)\n for source in config[DOMAIN][CONF_SOURCES]:\n # create customize object\n customize = {}\n for c in source.get(CONF_CUSTOMIZE, {}):\n customize[c[CONF_TYPE]] = Customize(\n waste_type=c[CONF_TYPE],\n alias=c.get(CONF_ALIAS),\n show=c.get(CONF_SHOW, True),\n icon=c.get(CONF_ICON),\n picture=c.get(CONF_PICTURE),\n use_dedicated_calendar=c.get(CONF_USE_DEDICATED_CALENDAR, False),\n dedicated_calendar_title=c.get(CONF_DEDICATED_CALENDAR_TITLE, False),\n )\n api.add_source_shell(\n source_name=source[CONF_SOURCE_NAME],\n customize=customize,\n calendar_title=source.get(CONF_SOURCE_CALENDAR_TITLE),\n source_args=source.get(CONF_SOURCE_ARGS, {}),\n )\n\n # store api object\n hass.data.setdefault(DOMAIN, api)\n\n # load calendar platform\n await hass.helpers.discovery.async_load_platform(\n \"calendar\", DOMAIN, {\"api\": api}, config\n )\n\n # initial fetch of all data\n hass.add_job(api._fetch)\n\n async def async_fetch_data(service: ServiceCall) -> None:\n hass.add_job(api._fetch)\n\n # Register new Service fetch_data\n hass.services.async_register(\n DOMAIN, \"fetch_data\", async_fetch_data, schema=vol.Schema({})\n )\n\n return True\n\n\nclass WasteCollectionApi:\n def __init__(\n self, hass, separator, fetch_time, random_fetch_time_offset, day_switch_time\n ):\n self._hass = hass\n self._source_shells = []\n self._separator = separator\n self._fetch_time = fetch_time\n self._random_fetch_time_offset = random_fetch_time_offset\n self._day_switch_time = day_switch_time\n\n # start timer to fetch date once per day\n async_track_time_change(\n hass,\n self._fetch_callback,\n self._fetch_time.hour,\n self._fetch_time.minute,\n self._fetch_time.second,\n )\n\n # start timer for day-switch time\n if self._day_switch_time != self._fetch_time:\n async_track_time_change(\n hass,\n self._update_sensors_callback,\n self._day_switch_time.hour,\n self._day_switch_time.minute,\n self._day_switch_time.second,\n )\n\n # add a timer at midnight (if not already there) to update days-to\n midnight = dt_util.parse_time(\"00:00\")\n if midnight != self._fetch_time and midnight != self._day_switch_time:\n async_track_time_change(\n hass,\n self._update_sensors_callback,\n midnight.hour,\n midnight.minute,\n midnight.second,\n )\n\n @property\n def separator(self):\n \"\"\"Separator string, used to separator waste types.\"\"\"\n return self._separator\n\n @property\n def fetch_time(self):\n \"\"\"When to fetch to data.\"\"\"\n return self._fetch_time\n\n @property\n def day_switch_time(self):\n \"\"\"When to hide entries for today.\"\"\"\n return self._day_switch_time\n\n def add_source_shell(\n self,\n source_name,\n customize,\n source_args,\n calendar_title,\n ):\n self._source_shells.append(\n SourceShell.create(\n source_name=source_name,\n customize=customize,\n source_args=source_args,\n calendar_title=calendar_title,\n )\n )\n\n def _fetch(self, *_):\n for shell in self._source_shells:\n shell.fetch()\n\n self._update_sensors_callback()\n\n @property\n def shells(self):\n return self._source_shells\n\n def get_shell(self, index):\n return self._source_shells[index] if index < len(self._source_shells) else None\n\n @callback\n def _fetch_callback(self, *_):\n async_call_later(\n self._hass,\n randrange(0, 60 * self._random_fetch_time_offset),\n self._fetch_now_callback,\n )\n\n @callback\n def _fetch_now_callback(self, *_):\n self._hass.add_job(self._fetch)\n\n @callback\n def _update_sensors_callback(self, *_):\n dispatcher_send(self._hass, UPDATE_SENSORS_SIGNAL)\n", "path": "custom_components/waste_collection_schedule/__init__.py"}]}
2,576
253
gh_patches_debug_3
rasdani/github-patches
git_diff
plotly__dash-2553
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Flask 2.2.3 dependency has HIGH security vulnerability (fixed in 2.2.5) Issue #2538 pinned the upper bound of the Flask dependency to 2.2.3. However Flask 2.2.3 is affected by a HIGH security vulnerability that is fixed in Flask 2.2.5. See https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-30861 Debian 11, Python 3.11 (from Python official 3.11 Docker image) ``` # pip install dash Collecting dash Downloading dash-2.10.1-py3-none-any.whl (10.3 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 10.3/10.3 MB 14.1 MB/s eta 0:00:00 Collecting Flask<=2.2.3,>=1.0.4 (from dash) Downloading Flask-2.2.3-py3-none-any.whl (101 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 101.8/101.8 kB 17.0 MB/s eta 0:00:00 ``` ``` dash 2.10.1 dash-core-components 2.0.0 dash-html-components 2.0.0 dash-table 5.0.0 ``` **Describe the bug** Dash installs a vulnerable version of Flask and dependency scans flag the vulnerability. **Expected behavior** No known and fixed security vulnerabilities added. Perhaps Pin to 2.2.* instead of specific 2.2.3 version where future pins will find new security issues. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dash/version.py` Content: ``` 1 __version__ = "2.10.1" 2 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dash/version.py b/dash/version.py --- a/dash/version.py +++ b/dash/version.py @@ -1 +1 @@ -__version__ = "2.10.1" +__version__ = "2.10.2"
{"golden_diff": "diff --git a/dash/version.py b/dash/version.py\n--- a/dash/version.py\n+++ b/dash/version.py\n@@ -1 +1 @@\n-__version__ = \"2.10.1\"\n+__version__ = \"2.10.2\"\n", "issue": "[BUG] Flask 2.2.3 dependency has HIGH security vulnerability (fixed in 2.2.5)\nIssue #2538 pinned the upper bound of the Flask dependency to 2.2.3. However Flask 2.2.3 is affected by a HIGH security vulnerability that is fixed in Flask 2.2.5. See https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-30861\r\n\r\nDebian 11, Python 3.11 (from Python official 3.11 Docker image)\r\n```\r\n# pip install dash\r\nCollecting dash\r\n Downloading dash-2.10.1-py3-none-any.whl (10.3 MB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 10.3/10.3 MB 14.1 MB/s eta 0:00:00\r\nCollecting Flask<=2.2.3,>=1.0.4 (from dash)\r\n Downloading Flask-2.2.3-py3-none-any.whl (101 kB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 101.8/101.8 kB 17.0 MB/s eta 0:00:00\r\n```\r\n\r\n```\r\ndash 2.10.1\r\ndash-core-components 2.0.0\r\ndash-html-components 2.0.0\r\ndash-table 5.0.0\r\n```\r\n\r\n**Describe the bug**\r\n\r\nDash installs a vulnerable version of Flask and dependency scans flag the vulnerability.\r\n\r\n**Expected behavior**\r\n\r\nNo known and fixed security vulnerabilities added. Perhaps Pin to 2.2.* instead of specific 2.2.3 version where future pins will find new security issues.\r\n\r\n\n", "before_files": [{"content": "__version__ = \"2.10.1\"\n", "path": "dash/version.py"}], "after_files": [{"content": "__version__ = \"2.10.2\"\n", "path": "dash/version.py"}]}
692
60
gh_patches_debug_31812
rasdani/github-patches
git_diff
ToucanToco__toucan-connectors-596
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [bigquery] Delegate parameters handling to the library See https://github.com/ToucanToco/toucan-connectors/pull/594#discussion_r870425994 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `toucan_connectors/google_big_query/google_big_query_connector.py` Content: ``` 1 import logging 2 from enum import Enum 3 from timeit import default_timer as timer 4 from typing import Any, Dict, List, Optional, Union 5 6 import pandas 7 import pandas as pd 8 from google.cloud import bigquery 9 from google.oauth2.service_account import Credentials 10 from pydantic import Field 11 12 from toucan_connectors.google_credentials import GoogleCredentials, get_google_oauth2_credentials 13 from toucan_connectors.toucan_connector import ToucanConnector, ToucanDataSource 14 15 16 class Dialect(str, Enum): 17 legacy = 'legacy' 18 standard = 'standard' 19 20 21 class GoogleBigQueryDataSource(ToucanDataSource): 22 query: str = Field( 23 ..., 24 description='You can find details on the query syntax ' 25 '<a href="https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax">here</a>', 26 widget='sql', 27 ) 28 29 30 BigQueryParam = Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter] 31 32 33 # NOTE: This does not play nicely with dates. They're a bit tricky 34 # though, as we'd have to try and parse dates from strings to 35 # determine if something is a date or not. Until then, we can just 36 # use a cast. eg: SELECT * FROM table WHERE STRING(date_col) IN UNNEST({{my_dates}}) 37 def _define_scalar_type(value: Any) -> str: 38 if isinstance(value, bool): 39 return 'BOOL' 40 elif isinstance(value, int): 41 return 'NUMERIC' 42 elif isinstance(value, float): 43 return 'FLOAT64' 44 elif isinstance(value, str): 45 return 'STRING' 46 # TODO - check bad return type 47 return 'STRING' 48 49 50 def _define_array_type(name: str, values: List[Any]) -> BigQueryParam: 51 return bigquery.ArrayQueryParameter( 52 name, _define_scalar_type(values[0] if len(values) > 0 else ''), values 53 ) 54 55 56 def _define_query_param(name: str, value: Any) -> BigQueryParam: 57 if isinstance(value, list): 58 return _define_array_type(name, value) 59 else: 60 return bigquery.ScalarQueryParameter(name, _define_scalar_type(value), value) 61 62 63 class GoogleBigQueryConnector(ToucanConnector): 64 data_source_model: GoogleBigQueryDataSource 65 66 credentials: GoogleCredentials = Field( 67 ..., 68 title='Google Credentials', 69 description='For authentication, download an authentication file from your ' 70 '<a href="https://console.developers.google.com/apis/credentials" target="_blank">Google Console</a> and ' 71 'use the values here. This is an oauth2 credential file. For more information see this ' 72 '<a href="https://gspread.readthedocs.io/en/latest/oauth2.html" target="_blank" >documentation</a>. ' 73 'You should use "service_account" credentials, which is the preferred type of credentials ' 74 'to use when authenticating on behalf of a service or application', 75 ) 76 dialect: Dialect = Field( 77 Dialect.standard, 78 description='BigQuery allows you to choose between standard and legacy SQL as query syntax. ' 79 'The preferred query syntax is the default standard SQL. You can find more information on this ' 80 '<a href="https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax" target="_blank" >documentation</a>', 81 ) 82 scopes: List[str] = Field( 83 ['https://www.googleapis.com/auth/bigquery'], 84 title='OAuth scopes', 85 description='OAuth 2.0 scopes define the level of access you need to request ' 86 'the Google APIs. For more information, see this ' 87 '<a href="https://developers.google.com/identity/protocols/googlescopes" target="_blank" >documentation</a>', 88 ) 89 90 @staticmethod 91 def _get_google_credentials(credentials: GoogleCredentials, scopes: List[str]) -> Credentials: 92 credentials = get_google_oauth2_credentials(credentials).with_scopes(scopes) 93 return credentials 94 95 @staticmethod 96 def _connect(credentials: Credentials) -> bigquery.Client: 97 start = timer() 98 client = bigquery.Client(credentials=credentials) 99 end = timer() 100 logging.getLogger(__name__).info( 101 f'[benchmark][google_big_query] - connect {end - start} seconds', 102 extra={ 103 'benchmark': { 104 'operation': 'connect', 105 'execution_time': end - start, 106 'connector': 'google_big_query', 107 } 108 }, 109 ) 110 return client 111 112 @staticmethod 113 def _execute_query(client: bigquery.Client, query: str, parameters: List) -> pandas.DataFrame: 114 try: 115 start = timer() 116 result = ( 117 client.query(query, job_config=bigquery.QueryJobConfig(query_parameters=parameters)) 118 .result() 119 .to_dataframe( 120 create_bqstorage_client=True, 121 ) # Use to generate directly a dataframe pandas 122 ) 123 end = timer() 124 logging.getLogger(__name__).info( 125 f'[benchmark][google_big_query] - execute {end - start} seconds', 126 extra={ 127 'benchmark': { 128 'operation': 'execute', 129 'execution_time': end - start, 130 'connector': 'google_big_query', 131 } 132 }, 133 ) 134 return result 135 except TypeError as e: 136 logging.getLogger(__name__).error(f'Error to execute request {query} - {e}') 137 raise e 138 139 @staticmethod 140 def _prepare_parameters(query: str, parameters: Optional[Dict]) -> List: 141 """replace ToucanToco variable definitions by Google Big Query variable definition""" 142 query_parameters = [] 143 for param_name, param_value in (parameters or {}).items(): 144 if query.find('@' + param_name) > -1: 145 # set all parameters with a type defined and necessary for Big Query 146 query_parameters.append(_define_query_param(param_name, param_value)) 147 return query_parameters 148 149 @staticmethod 150 def _prepare_query(query: str) -> str: 151 """replace ToucanToco variable definition by Google Big Query variable definition""" 152 new_query = query.replace('{{', '@').replace('}}', '') 153 return new_query 154 155 def _retrieve_data(self, data_source: GoogleBigQueryDataSource) -> pd.DataFrame: 156 logging.getLogger(__name__).debug( 157 f'Play request {data_source.query} with parameters {data_source.parameters}' 158 ) 159 160 credentials = GoogleBigQueryConnector._get_google_credentials(self.credentials, self.scopes) 161 query = GoogleBigQueryConnector._prepare_query(data_source.query) 162 parameters = GoogleBigQueryConnector._prepare_parameters(query, data_source.parameters) 163 164 client = GoogleBigQueryConnector._connect(credentials) 165 result = GoogleBigQueryConnector._execute_query(client, query, parameters) 166 167 return result 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/toucan_connectors/google_big_query/google_big_query_connector.py b/toucan_connectors/google_big_query/google_big_query_connector.py --- a/toucan_connectors/google_big_query/google_big_query_connector.py +++ b/toucan_connectors/google_big_query/google_big_query_connector.py @@ -6,6 +6,7 @@ import pandas import pandas as pd from google.cloud import bigquery +from google.cloud.bigquery.dbapi import _helpers as bigquery_helpers from google.oauth2.service_account import Credentials from pydantic import Field @@ -30,34 +31,16 @@ BigQueryParam = Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter] -# NOTE: This does not play nicely with dates. They're a bit tricky -# though, as we'd have to try and parse dates from strings to -# determine if something is a date or not. Until then, we can just -# use a cast. eg: SELECT * FROM table WHERE STRING(date_col) IN UNNEST({{my_dates}}) -def _define_scalar_type(value: Any) -> str: - if isinstance(value, bool): - return 'BOOL' - elif isinstance(value, int): - return 'NUMERIC' - elif isinstance(value, float): - return 'FLOAT64' - elif isinstance(value, str): - return 'STRING' - # TODO - check bad return type - return 'STRING' - - -def _define_array_type(name: str, values: List[Any]) -> BigQueryParam: - return bigquery.ArrayQueryParameter( - name, _define_scalar_type(values[0] if len(values) > 0 else ''), values - ) - - def _define_query_param(name: str, value: Any) -> BigQueryParam: if isinstance(value, list): - return _define_array_type(name, value) + return ( + bigquery_helpers.array_to_query_parameter(value=value, name=name) + if len(value) > 0 + # array_to_query_parameter raises an exception in case of an empty list + else bigquery.ArrayQueryParameter(name=name, array_type='STRING', values=value) + ) else: - return bigquery.ScalarQueryParameter(name, _define_scalar_type(value), value) + return bigquery_helpers.scalar_to_query_parameter(value=value, name=name) class GoogleBigQueryConnector(ToucanConnector):
{"golden_diff": "diff --git a/toucan_connectors/google_big_query/google_big_query_connector.py b/toucan_connectors/google_big_query/google_big_query_connector.py\n--- a/toucan_connectors/google_big_query/google_big_query_connector.py\n+++ b/toucan_connectors/google_big_query/google_big_query_connector.py\n@@ -6,6 +6,7 @@\n import pandas\n import pandas as pd\n from google.cloud import bigquery\n+from google.cloud.bigquery.dbapi import _helpers as bigquery_helpers\n from google.oauth2.service_account import Credentials\n from pydantic import Field\n \n@@ -30,34 +31,16 @@\n BigQueryParam = Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]\n \n \n-# NOTE: This does not play nicely with dates. They're a bit tricky\n-# though, as we'd have to try and parse dates from strings to\n-# determine if something is a date or not. Until then, we can just\n-# use a cast. eg: SELECT * FROM table WHERE STRING(date_col) IN UNNEST({{my_dates}})\n-def _define_scalar_type(value: Any) -> str:\n- if isinstance(value, bool):\n- return 'BOOL'\n- elif isinstance(value, int):\n- return 'NUMERIC'\n- elif isinstance(value, float):\n- return 'FLOAT64'\n- elif isinstance(value, str):\n- return 'STRING'\n- # TODO - check bad return type\n- return 'STRING'\n-\n-\n-def _define_array_type(name: str, values: List[Any]) -> BigQueryParam:\n- return bigquery.ArrayQueryParameter(\n- name, _define_scalar_type(values[0] if len(values) > 0 else ''), values\n- )\n-\n-\n def _define_query_param(name: str, value: Any) -> BigQueryParam:\n if isinstance(value, list):\n- return _define_array_type(name, value)\n+ return (\n+ bigquery_helpers.array_to_query_parameter(value=value, name=name)\n+ if len(value) > 0\n+ # array_to_query_parameter raises an exception in case of an empty list\n+ else bigquery.ArrayQueryParameter(name=name, array_type='STRING', values=value)\n+ )\n else:\n- return bigquery.ScalarQueryParameter(name, _define_scalar_type(value), value)\n+ return bigquery_helpers.scalar_to_query_parameter(value=value, name=name)\n \n \n class GoogleBigQueryConnector(ToucanConnector):\n", "issue": "[bigquery] Delegate parameters handling to the library\nSee https://github.com/ToucanToco/toucan-connectors/pull/594#discussion_r870425994\n", "before_files": [{"content": "import logging\nfrom enum import Enum\nfrom timeit import default_timer as timer\nfrom typing import Any, Dict, List, Optional, Union\n\nimport pandas\nimport pandas as pd\nfrom google.cloud import bigquery\nfrom google.oauth2.service_account import Credentials\nfrom pydantic import Field\n\nfrom toucan_connectors.google_credentials import GoogleCredentials, get_google_oauth2_credentials\nfrom toucan_connectors.toucan_connector import ToucanConnector, ToucanDataSource\n\n\nclass Dialect(str, Enum):\n legacy = 'legacy'\n standard = 'standard'\n\n\nclass GoogleBigQueryDataSource(ToucanDataSource):\n query: str = Field(\n ...,\n description='You can find details on the query syntax '\n '<a href=\"https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax\">here</a>',\n widget='sql',\n )\n\n\nBigQueryParam = Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]\n\n\n# NOTE: This does not play nicely with dates. They're a bit tricky\n# though, as we'd have to try and parse dates from strings to\n# determine if something is a date or not. Until then, we can just\n# use a cast. eg: SELECT * FROM table WHERE STRING(date_col) IN UNNEST({{my_dates}})\ndef _define_scalar_type(value: Any) -> str:\n if isinstance(value, bool):\n return 'BOOL'\n elif isinstance(value, int):\n return 'NUMERIC'\n elif isinstance(value, float):\n return 'FLOAT64'\n elif isinstance(value, str):\n return 'STRING'\n # TODO - check bad return type\n return 'STRING'\n\n\ndef _define_array_type(name: str, values: List[Any]) -> BigQueryParam:\n return bigquery.ArrayQueryParameter(\n name, _define_scalar_type(values[0] if len(values) > 0 else ''), values\n )\n\n\ndef _define_query_param(name: str, value: Any) -> BigQueryParam:\n if isinstance(value, list):\n return _define_array_type(name, value)\n else:\n return bigquery.ScalarQueryParameter(name, _define_scalar_type(value), value)\n\n\nclass GoogleBigQueryConnector(ToucanConnector):\n data_source_model: GoogleBigQueryDataSource\n\n credentials: GoogleCredentials = Field(\n ...,\n title='Google Credentials',\n description='For authentication, download an authentication file from your '\n '<a href=\"https://console.developers.google.com/apis/credentials\" target=\"_blank\">Google Console</a> and '\n 'use the values here. This is an oauth2 credential file. For more information see this '\n '<a href=\"https://gspread.readthedocs.io/en/latest/oauth2.html\" target=\"_blank\" >documentation</a>. '\n 'You should use \"service_account\" credentials, which is the preferred type of credentials '\n 'to use when authenticating on behalf of a service or application',\n )\n dialect: Dialect = Field(\n Dialect.standard,\n description='BigQuery allows you to choose between standard and legacy SQL as query syntax. '\n 'The preferred query syntax is the default standard SQL. You can find more information on this '\n '<a href=\"https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax\" target=\"_blank\" >documentation</a>',\n )\n scopes: List[str] = Field(\n ['https://www.googleapis.com/auth/bigquery'],\n title='OAuth scopes',\n description='OAuth 2.0 scopes define the level of access you need to request '\n 'the Google APIs. For more information, see this '\n '<a href=\"https://developers.google.com/identity/protocols/googlescopes\" target=\"_blank\" >documentation</a>',\n )\n\n @staticmethod\n def _get_google_credentials(credentials: GoogleCredentials, scopes: List[str]) -> Credentials:\n credentials = get_google_oauth2_credentials(credentials).with_scopes(scopes)\n return credentials\n\n @staticmethod\n def _connect(credentials: Credentials) -> bigquery.Client:\n start = timer()\n client = bigquery.Client(credentials=credentials)\n end = timer()\n logging.getLogger(__name__).info(\n f'[benchmark][google_big_query] - connect {end - start} seconds',\n extra={\n 'benchmark': {\n 'operation': 'connect',\n 'execution_time': end - start,\n 'connector': 'google_big_query',\n }\n },\n )\n return client\n\n @staticmethod\n def _execute_query(client: bigquery.Client, query: str, parameters: List) -> pandas.DataFrame:\n try:\n start = timer()\n result = (\n client.query(query, job_config=bigquery.QueryJobConfig(query_parameters=parameters))\n .result()\n .to_dataframe(\n create_bqstorage_client=True,\n ) # Use to generate directly a dataframe pandas\n )\n end = timer()\n logging.getLogger(__name__).info(\n f'[benchmark][google_big_query] - execute {end - start} seconds',\n extra={\n 'benchmark': {\n 'operation': 'execute',\n 'execution_time': end - start,\n 'connector': 'google_big_query',\n }\n },\n )\n return result\n except TypeError as e:\n logging.getLogger(__name__).error(f'Error to execute request {query} - {e}')\n raise e\n\n @staticmethod\n def _prepare_parameters(query: str, parameters: Optional[Dict]) -> List:\n \"\"\"replace ToucanToco variable definitions by Google Big Query variable definition\"\"\"\n query_parameters = []\n for param_name, param_value in (parameters or {}).items():\n if query.find('@' + param_name) > -1:\n # set all parameters with a type defined and necessary for Big Query\n query_parameters.append(_define_query_param(param_name, param_value))\n return query_parameters\n\n @staticmethod\n def _prepare_query(query: str) -> str:\n \"\"\"replace ToucanToco variable definition by Google Big Query variable definition\"\"\"\n new_query = query.replace('{{', '@').replace('}}', '')\n return new_query\n\n def _retrieve_data(self, data_source: GoogleBigQueryDataSource) -> pd.DataFrame:\n logging.getLogger(__name__).debug(\n f'Play request {data_source.query} with parameters {data_source.parameters}'\n )\n\n credentials = GoogleBigQueryConnector._get_google_credentials(self.credentials, self.scopes)\n query = GoogleBigQueryConnector._prepare_query(data_source.query)\n parameters = GoogleBigQueryConnector._prepare_parameters(query, data_source.parameters)\n\n client = GoogleBigQueryConnector._connect(credentials)\n result = GoogleBigQueryConnector._execute_query(client, query, parameters)\n\n return result\n", "path": "toucan_connectors/google_big_query/google_big_query_connector.py"}], "after_files": [{"content": "import logging\nfrom enum import Enum\nfrom timeit import default_timer as timer\nfrom typing import Any, Dict, List, Optional, Union\n\nimport pandas\nimport pandas as pd\nfrom google.cloud import bigquery\nfrom google.cloud.bigquery.dbapi import _helpers as bigquery_helpers\nfrom google.oauth2.service_account import Credentials\nfrom pydantic import Field\n\nfrom toucan_connectors.google_credentials import GoogleCredentials, get_google_oauth2_credentials\nfrom toucan_connectors.toucan_connector import ToucanConnector, ToucanDataSource\n\n\nclass Dialect(str, Enum):\n legacy = 'legacy'\n standard = 'standard'\n\n\nclass GoogleBigQueryDataSource(ToucanDataSource):\n query: str = Field(\n ...,\n description='You can find details on the query syntax '\n '<a href=\"https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax\">here</a>',\n widget='sql',\n )\n\n\nBigQueryParam = Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]\n\n\ndef _define_query_param(name: str, value: Any) -> BigQueryParam:\n if isinstance(value, list):\n return (\n bigquery_helpers.array_to_query_parameter(value=value, name=name)\n if len(value) > 0\n # array_to_query_parameter raises an exception in case of an empty list\n else bigquery.ArrayQueryParameter(name=name, array_type='STRING', values=value)\n )\n else:\n return bigquery_helpers.scalar_to_query_parameter(value=value, name=name)\n\n\nclass GoogleBigQueryConnector(ToucanConnector):\n data_source_model: GoogleBigQueryDataSource\n\n credentials: GoogleCredentials = Field(\n ...,\n title='Google Credentials',\n description='For authentication, download an authentication file from your '\n '<a href=\"https://console.developers.google.com/apis/credentials\" target=\"_blank\">Google Console</a> and '\n 'use the values here. This is an oauth2 credential file. For more information see this '\n '<a href=\"https://gspread.readthedocs.io/en/latest/oauth2.html\" target=\"_blank\" >documentation</a>. '\n 'You should use \"service_account\" credentials, which is the preferred type of credentials '\n 'to use when authenticating on behalf of a service or application',\n )\n dialect: Dialect = Field(\n Dialect.standard,\n description='BigQuery allows you to choose between standard and legacy SQL as query syntax. '\n 'The preferred query syntax is the default standard SQL. You can find more information on this '\n '<a href=\"https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax\" target=\"_blank\" >documentation</a>',\n )\n scopes: List[str] = Field(\n ['https://www.googleapis.com/auth/bigquery'],\n title='OAuth scopes',\n description='OAuth 2.0 scopes define the level of access you need to request '\n 'the Google APIs. For more information, see this '\n '<a href=\"https://developers.google.com/identity/protocols/googlescopes\" target=\"_blank\" >documentation</a>',\n )\n\n @staticmethod\n def _get_google_credentials(credentials: GoogleCredentials, scopes: List[str]) -> Credentials:\n credentials = get_google_oauth2_credentials(credentials).with_scopes(scopes)\n return credentials\n\n @staticmethod\n def _connect(credentials: Credentials) -> bigquery.Client:\n start = timer()\n client = bigquery.Client(credentials=credentials)\n end = timer()\n logging.getLogger(__name__).info(\n f'[benchmark][google_big_query] - connect {end - start} seconds',\n extra={\n 'benchmark': {\n 'operation': 'connect',\n 'execution_time': end - start,\n 'connector': 'google_big_query',\n }\n },\n )\n return client\n\n @staticmethod\n def _execute_query(client: bigquery.Client, query: str, parameters: List) -> pandas.DataFrame:\n try:\n start = timer()\n result = (\n client.query(query, job_config=bigquery.QueryJobConfig(query_parameters=parameters))\n .result()\n .to_dataframe(\n create_bqstorage_client=True,\n ) # Use to generate directly a dataframe pandas\n )\n end = timer()\n logging.getLogger(__name__).info(\n f'[benchmark][google_big_query] - execute {end - start} seconds',\n extra={\n 'benchmark': {\n 'operation': 'execute',\n 'execution_time': end - start,\n 'connector': 'google_big_query',\n }\n },\n )\n return result\n except TypeError as e:\n logging.getLogger(__name__).error(f'Error to execute request {query} - {e}')\n raise e\n\n @staticmethod\n def _prepare_parameters(query: str, parameters: Optional[Dict]) -> List:\n \"\"\"replace ToucanToco variable definitions by Google Big Query variable definition\"\"\"\n query_parameters = []\n for param_name, param_value in (parameters or {}).items():\n if query.find('@' + param_name) > -1:\n # set all parameters with a type defined and necessary for Big Query\n query_parameters.append(_define_query_param(param_name, param_value))\n return query_parameters\n\n @staticmethod\n def _prepare_query(query: str) -> str:\n \"\"\"replace ToucanToco variable definition by Google Big Query variable definition\"\"\"\n new_query = query.replace('{{', '@').replace('}}', '')\n return new_query\n\n def _retrieve_data(self, data_source: GoogleBigQueryDataSource) -> pd.DataFrame:\n logging.getLogger(__name__).debug(\n f'Play request {data_source.query} with parameters {data_source.parameters}'\n )\n\n credentials = GoogleBigQueryConnector._get_google_credentials(self.credentials, self.scopes)\n query = GoogleBigQueryConnector._prepare_query(data_source.query)\n parameters = GoogleBigQueryConnector._prepare_parameters(query, data_source.parameters)\n\n client = GoogleBigQueryConnector._connect(credentials)\n result = GoogleBigQueryConnector._execute_query(client, query, parameters)\n\n return result\n", "path": "toucan_connectors/google_big_query/google_big_query_connector.py"}]}
2,147
529
gh_patches_debug_15928
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-3073
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "null" as Parameter Name Throws Incorrect Error ### CloudFormation Lint Version 0.85.2_1 ### What operating system are you using? Mac ### Describe the bug Setting a Parameter name to "null" produces incorrect or misleading errors: "E0002 Unknown exception while processing rule E2011: object of type 'NoneType' has no len()" "E0002 Unknown exception while processing rule E2003: expected string or buffer" (further, the document line reference is 1:1; which is not correct) The Cloudformation service itself returns the following more specific error message: "Template format error: [/Parameters] encountered malformed key" ### Expected behavior cfn-lint returns correct line number and a more accurate error message, such as: "E000X Parameters encountered malformed key" ### Reproduction template ```yaml AWSTemplateFormatVersion: 2010-09-09 Parameters: null: Description: anything Type: String #... ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/decode/cfn_yaml.py` Content: ``` 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 6 import fileinput 7 import logging 8 import sys 9 10 from yaml import MappingNode, ScalarNode, SequenceNode 11 from yaml.composer import Composer 12 from yaml.constructor import ConstructorError, SafeConstructor 13 from yaml.reader import Reader 14 from yaml.resolver import Resolver 15 from yaml.scanner import Scanner 16 17 import cfnlint 18 from cfnlint.decode.node import dict_node, list_node, str_node, sub_node 19 20 try: 21 from yaml._yaml import CParser as Parser # pylint: disable=ungrouped-imports, 22 23 cyaml = True 24 except ImportError: 25 from yaml.parser import Parser # type: ignore # pylint: disable=ungrouped-imports 26 27 cyaml = False 28 29 UNCONVERTED_SUFFIXES = ["Ref", "Condition"] 30 FN_PREFIX = "Fn::" 31 32 LOGGER = logging.getLogger(__name__) 33 34 35 class CfnParseError(ConstructorError): 36 """ 37 Error thrown when the template contains Cfn Error 38 """ 39 40 def __init__(self, filename, errors): 41 if isinstance(errors, cfnlint.rules.Match): 42 errors = [errors] 43 44 # Call the base class constructor with the parameters it needs 45 super().__init__(errors[0].message) 46 47 # Now for your custom code... 48 self.filename = filename 49 self.matches = errors 50 51 52 def build_match(filename, message, line_number, column_number, key): 53 return cfnlint.rules.Match( 54 line_number + 1, 55 column_number + 1, 56 line_number + 1, 57 column_number + 1 + len(key), 58 filename, 59 cfnlint.rules.ParseError(), 60 message=message, 61 ) 62 63 64 class NodeConstructor(SafeConstructor): 65 """ 66 Node Constructors for loading different types in Yaml 67 """ 68 69 def __init__(self, filename): 70 # Call the base class constructor 71 super().__init__() 72 73 self.filename = filename 74 75 # To support lazy loading, the original constructors first yield 76 # an empty object, then fill them in when iterated. Due to 77 # laziness we omit this behaviour (and will only do "deep 78 # construction") by first exhausting iterators, then yielding 79 # copies. 80 def construct_yaml_map(self, node): 81 # Check for duplicate keys on the current level, this is not desirable 82 # because a dict does not support this. It overwrites it with the last 83 # occurance, which can give unexpected results 84 mapping = {} 85 self.flatten_mapping(node) 86 matches = [] 87 for key_node, value_node in node.value: 88 key = self.construct_object(key_node, False) 89 value = self.construct_object(value_node, False) 90 91 for key_dup in mapping: 92 if key_dup == key: 93 if not matches: 94 matches.extend( 95 [ 96 build_match( 97 filename=self.filename, 98 message=f'Duplicate found "{key}" (line {key_dup.start_mark.line + 1})', 99 line_number=key_dup.start_mark.line, 100 column_number=key_dup.start_mark.column, 101 key=key, 102 ), 103 build_match( 104 filename=self.filename, 105 message=f'Duplicate found "{key}" (line {key_node.start_mark.line + 1})', 106 line_number=key_node.start_mark.line, 107 column_number=key_node.start_mark.column, 108 key=key, 109 ), 110 ], 111 ) 112 else: 113 matches.append( 114 build_match( 115 filename=self.filename, 116 message=f'Duplicate found "{key}" (line {key_node.start_mark.line + 1})', 117 line_number=key_node.start_mark.line, 118 column_number=key_node.start_mark.column, 119 key=key, 120 ), 121 ) 122 try: 123 mapping[key] = value 124 except Exception as exc: 125 raise CfnParseError( 126 self.filename, 127 [ 128 build_match( 129 filename=self.filename, 130 message=f'Unhashable type "{key}" (line {key.start_mark.line + 1})', 131 line_number=key.start_mark.line, 132 column_number=key.start_mark.column, 133 key=key, 134 ), 135 ], 136 ) from exc 137 138 if matches: 139 raise CfnParseError( 140 self.filename, 141 matches, 142 ) 143 144 (obj,) = SafeConstructor.construct_yaml_map(self, node) 145 146 if len(mapping) == 1: 147 if "Fn::Sub" in mapping: 148 return sub_node(obj, node.start_mark, node.end_mark) 149 150 return dict_node(obj, node.start_mark, node.end_mark) 151 152 def construct_yaml_str(self, node): 153 obj = SafeConstructor.construct_yaml_str(self, node) 154 assert isinstance(obj, (str)) 155 return str_node(obj, node.start_mark, node.end_mark) 156 157 def construct_yaml_seq(self, node): 158 (obj,) = SafeConstructor.construct_yaml_seq(self, node) 159 assert isinstance(obj, list) 160 return list_node(obj, node.start_mark, node.end_mark) 161 162 163 NodeConstructor.add_constructor( # type: ignore 164 "tag:yaml.org,2002:map", NodeConstructor.construct_yaml_map 165 ) 166 167 NodeConstructor.add_constructor( # type: ignore 168 "tag:yaml.org,2002:str", NodeConstructor.construct_yaml_str 169 ) 170 171 NodeConstructor.add_constructor( # type: ignore 172 "tag:yaml.org,2002:seq", NodeConstructor.construct_yaml_seq 173 ) 174 175 176 # pylint: disable=too-many-ancestors 177 class MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver): 178 """ 179 Class for marked loading YAML 180 """ 181 182 # pylint: disable=non-parent-init-called,super-init-not-called 183 184 def __init__(self, stream, filename): 185 Reader.__init__(self, stream) 186 Scanner.__init__(self) 187 if cyaml: 188 Parser.__init__(self, stream) 189 else: 190 Parser.__init__(self) 191 Composer.__init__(self) 192 SafeConstructor.__init__(self) 193 Resolver.__init__(self) 194 NodeConstructor.__init__(self, filename) 195 196 def construct_getatt(self, node): 197 """ 198 Reconstruct !GetAtt into a list 199 """ 200 201 if isinstance(node.value, (str)): 202 return list_node(node.value.split(".", 1), node.start_mark, node.end_mark) 203 if isinstance(node.value, list): 204 return [self.construct_object(child, deep=False) for child in node.value] 205 206 raise ValueError(f"Unexpected node type: {type(node.value)}") 207 208 209 def multi_constructor(loader, tag_suffix, node): 210 """ 211 Deal with !Ref style function format 212 """ 213 214 if tag_suffix not in UNCONVERTED_SUFFIXES: 215 tag_suffix = f"{FN_PREFIX}{tag_suffix}" 216 217 constructor = None 218 if tag_suffix == "Fn::GetAtt": 219 constructor = loader.construct_getatt 220 elif isinstance(node, ScalarNode): 221 constructor = loader.construct_scalar 222 elif isinstance(node, SequenceNode): 223 constructor = loader.construct_sequence 224 elif isinstance(node, MappingNode): 225 constructor = loader.construct_mapping 226 else: 227 raise f"Bad tag: !{tag_suffix}" 228 229 if tag_suffix == "Fn::Sub": 230 return sub_node({tag_suffix: constructor(node)}, node.start_mark, node.end_mark) 231 232 return dict_node({tag_suffix: constructor(node)}, node.start_mark, node.end_mark) 233 234 235 def loads(yaml_string, fname=None): 236 """ 237 Load the given YAML string 238 """ 239 loader = MarkedLoader(yaml_string, fname) 240 loader.add_multi_constructor("!", multi_constructor) 241 template = loader.get_single_data() 242 # Convert an empty file to an empty dict 243 if template is None: 244 template = {} 245 246 return template 247 248 249 def load(filename): 250 """ 251 Load the given YAML file 252 """ 253 254 content = "" 255 256 if not sys.stdin.isatty(): 257 filename = "-" if filename is None else filename 258 if sys.version_info.major <= 3 and sys.version_info.minor <= 9: 259 for line in fileinput.input(files=filename): 260 content = content + line 261 else: 262 for line in fileinput.input( # pylint: disable=unexpected-keyword-arg 263 files=filename, encoding="utf-8" 264 ): 265 content = content + line 266 else: 267 with open(filename, encoding="utf-8") as fp: 268 content = fp.read() 269 270 return loads(content, filename) 271 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/decode/cfn_yaml.py b/src/cfnlint/decode/cfn_yaml.py --- a/src/cfnlint/decode/cfn_yaml.py +++ b/src/cfnlint/decode/cfn_yaml.py @@ -88,6 +88,19 @@ key = self.construct_object(key_node, False) value = self.construct_object(value_node, False) + if key is None: + raise CfnParseError( + self.filename, + [ + build_match( + filename=self.filename, + message=f"Null key {key_node.value!r} not supported (line {key_node.start_mark.line + 1})", + line_number=key_node.start_mark.line, + column_number=key_node.start_mark.column, + key=key_node.value, + ), + ], + ) for key_dup in mapping: if key_dup == key: if not matches:
{"golden_diff": "diff --git a/src/cfnlint/decode/cfn_yaml.py b/src/cfnlint/decode/cfn_yaml.py\n--- a/src/cfnlint/decode/cfn_yaml.py\n+++ b/src/cfnlint/decode/cfn_yaml.py\n@@ -88,6 +88,19 @@\n key = self.construct_object(key_node, False)\n value = self.construct_object(value_node, False)\n \n+ if key is None:\n+ raise CfnParseError(\n+ self.filename,\n+ [\n+ build_match(\n+ filename=self.filename,\n+ message=f\"Null key {key_node.value!r} not supported (line {key_node.start_mark.line + 1})\",\n+ line_number=key_node.start_mark.line,\n+ column_number=key_node.start_mark.column,\n+ key=key_node.value,\n+ ),\n+ ],\n+ )\n for key_dup in mapping:\n if key_dup == key:\n if not matches:\n", "issue": "\"null\" as Parameter Name Throws Incorrect Error\n### CloudFormation Lint Version\n\n0.85.2_1\n\n### What operating system are you using?\n\nMac\n\n### Describe the bug\n\nSetting a Parameter name to \"null\" produces incorrect or misleading errors:\r\n\"E0002 Unknown exception while processing rule E2011: object of type 'NoneType' has no len()\"\r\n\"E0002 Unknown exception while processing rule E2003: expected string or buffer\"\r\n(further, the document line reference is 1:1; which is not correct)\r\n\r\nThe Cloudformation service itself returns the following more specific error message:\r\n\"Template format error: [/Parameters] encountered malformed key\"\n\n### Expected behavior\n\ncfn-lint returns correct line number and a more accurate error message, such as:\r\n\"E000X Parameters encountered malformed key\"\n\n### Reproduction template\n\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nParameters:\r\n null:\r\n Description: anything\r\n Type: String\r\n\r\n#...\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nimport fileinput\nimport logging\nimport sys\n\nfrom yaml import MappingNode, ScalarNode, SequenceNode\nfrom yaml.composer import Composer\nfrom yaml.constructor import ConstructorError, SafeConstructor\nfrom yaml.reader import Reader\nfrom yaml.resolver import Resolver\nfrom yaml.scanner import Scanner\n\nimport cfnlint\nfrom cfnlint.decode.node import dict_node, list_node, str_node, sub_node\n\ntry:\n from yaml._yaml import CParser as Parser # pylint: disable=ungrouped-imports,\n\n cyaml = True\nexcept ImportError:\n from yaml.parser import Parser # type: ignore # pylint: disable=ungrouped-imports\n\n cyaml = False\n\nUNCONVERTED_SUFFIXES = [\"Ref\", \"Condition\"]\nFN_PREFIX = \"Fn::\"\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass CfnParseError(ConstructorError):\n \"\"\"\n Error thrown when the template contains Cfn Error\n \"\"\"\n\n def __init__(self, filename, errors):\n if isinstance(errors, cfnlint.rules.Match):\n errors = [errors]\n\n # Call the base class constructor with the parameters it needs\n super().__init__(errors[0].message)\n\n # Now for your custom code...\n self.filename = filename\n self.matches = errors\n\n\ndef build_match(filename, message, line_number, column_number, key):\n return cfnlint.rules.Match(\n line_number + 1,\n column_number + 1,\n line_number + 1,\n column_number + 1 + len(key),\n filename,\n cfnlint.rules.ParseError(),\n message=message,\n )\n\n\nclass NodeConstructor(SafeConstructor):\n \"\"\"\n Node Constructors for loading different types in Yaml\n \"\"\"\n\n def __init__(self, filename):\n # Call the base class constructor\n super().__init__()\n\n self.filename = filename\n\n # To support lazy loading, the original constructors first yield\n # an empty object, then fill them in when iterated. Due to\n # laziness we omit this behaviour (and will only do \"deep\n # construction\") by first exhausting iterators, then yielding\n # copies.\n def construct_yaml_map(self, node):\n # Check for duplicate keys on the current level, this is not desirable\n # because a dict does not support this. It overwrites it with the last\n # occurance, which can give unexpected results\n mapping = {}\n self.flatten_mapping(node)\n matches = []\n for key_node, value_node in node.value:\n key = self.construct_object(key_node, False)\n value = self.construct_object(value_node, False)\n\n for key_dup in mapping:\n if key_dup == key:\n if not matches:\n matches.extend(\n [\n build_match(\n filename=self.filename,\n message=f'Duplicate found \"{key}\" (line {key_dup.start_mark.line + 1})',\n line_number=key_dup.start_mark.line,\n column_number=key_dup.start_mark.column,\n key=key,\n ),\n build_match(\n filename=self.filename,\n message=f'Duplicate found \"{key}\" (line {key_node.start_mark.line + 1})',\n line_number=key_node.start_mark.line,\n column_number=key_node.start_mark.column,\n key=key,\n ),\n ],\n )\n else:\n matches.append(\n build_match(\n filename=self.filename,\n message=f'Duplicate found \"{key}\" (line {key_node.start_mark.line + 1})',\n line_number=key_node.start_mark.line,\n column_number=key_node.start_mark.column,\n key=key,\n ),\n )\n try:\n mapping[key] = value\n except Exception as exc:\n raise CfnParseError(\n self.filename,\n [\n build_match(\n filename=self.filename,\n message=f'Unhashable type \"{key}\" (line {key.start_mark.line + 1})',\n line_number=key.start_mark.line,\n column_number=key.start_mark.column,\n key=key,\n ),\n ],\n ) from exc\n\n if matches:\n raise CfnParseError(\n self.filename,\n matches,\n )\n\n (obj,) = SafeConstructor.construct_yaml_map(self, node)\n\n if len(mapping) == 1:\n if \"Fn::Sub\" in mapping:\n return sub_node(obj, node.start_mark, node.end_mark)\n\n return dict_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_str(self, node):\n obj = SafeConstructor.construct_yaml_str(self, node)\n assert isinstance(obj, (str))\n return str_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_seq(self, node):\n (obj,) = SafeConstructor.construct_yaml_seq(self, node)\n assert isinstance(obj, list)\n return list_node(obj, node.start_mark, node.end_mark)\n\n\nNodeConstructor.add_constructor( # type: ignore\n \"tag:yaml.org,2002:map\", NodeConstructor.construct_yaml_map\n)\n\nNodeConstructor.add_constructor( # type: ignore\n \"tag:yaml.org,2002:str\", NodeConstructor.construct_yaml_str\n)\n\nNodeConstructor.add_constructor( # type: ignore\n \"tag:yaml.org,2002:seq\", NodeConstructor.construct_yaml_seq\n)\n\n\n# pylint: disable=too-many-ancestors\nclass MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver):\n \"\"\"\n Class for marked loading YAML\n \"\"\"\n\n # pylint: disable=non-parent-init-called,super-init-not-called\n\n def __init__(self, stream, filename):\n Reader.__init__(self, stream)\n Scanner.__init__(self)\n if cyaml:\n Parser.__init__(self, stream)\n else:\n Parser.__init__(self)\n Composer.__init__(self)\n SafeConstructor.__init__(self)\n Resolver.__init__(self)\n NodeConstructor.__init__(self, filename)\n\n def construct_getatt(self, node):\n \"\"\"\n Reconstruct !GetAtt into a list\n \"\"\"\n\n if isinstance(node.value, (str)):\n return list_node(node.value.split(\".\", 1), node.start_mark, node.end_mark)\n if isinstance(node.value, list):\n return [self.construct_object(child, deep=False) for child in node.value]\n\n raise ValueError(f\"Unexpected node type: {type(node.value)}\")\n\n\ndef multi_constructor(loader, tag_suffix, node):\n \"\"\"\n Deal with !Ref style function format\n \"\"\"\n\n if tag_suffix not in UNCONVERTED_SUFFIXES:\n tag_suffix = f\"{FN_PREFIX}{tag_suffix}\"\n\n constructor = None\n if tag_suffix == \"Fn::GetAtt\":\n constructor = loader.construct_getatt\n elif isinstance(node, ScalarNode):\n constructor = loader.construct_scalar\n elif isinstance(node, SequenceNode):\n constructor = loader.construct_sequence\n elif isinstance(node, MappingNode):\n constructor = loader.construct_mapping\n else:\n raise f\"Bad tag: !{tag_suffix}\"\n\n if tag_suffix == \"Fn::Sub\":\n return sub_node({tag_suffix: constructor(node)}, node.start_mark, node.end_mark)\n\n return dict_node({tag_suffix: constructor(node)}, node.start_mark, node.end_mark)\n\n\ndef loads(yaml_string, fname=None):\n \"\"\"\n Load the given YAML string\n \"\"\"\n loader = MarkedLoader(yaml_string, fname)\n loader.add_multi_constructor(\"!\", multi_constructor)\n template = loader.get_single_data()\n # Convert an empty file to an empty dict\n if template is None:\n template = {}\n\n return template\n\n\ndef load(filename):\n \"\"\"\n Load the given YAML file\n \"\"\"\n\n content = \"\"\n\n if not sys.stdin.isatty():\n filename = \"-\" if filename is None else filename\n if sys.version_info.major <= 3 and sys.version_info.minor <= 9:\n for line in fileinput.input(files=filename):\n content = content + line\n else:\n for line in fileinput.input( # pylint: disable=unexpected-keyword-arg\n files=filename, encoding=\"utf-8\"\n ):\n content = content + line\n else:\n with open(filename, encoding=\"utf-8\") as fp:\n content = fp.read()\n\n return loads(content, filename)\n", "path": "src/cfnlint/decode/cfn_yaml.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nimport fileinput\nimport logging\nimport sys\n\nfrom yaml import MappingNode, ScalarNode, SequenceNode\nfrom yaml.composer import Composer\nfrom yaml.constructor import ConstructorError, SafeConstructor\nfrom yaml.reader import Reader\nfrom yaml.resolver import Resolver\nfrom yaml.scanner import Scanner\n\nimport cfnlint\nfrom cfnlint.decode.node import dict_node, list_node, str_node, sub_node\n\ntry:\n from yaml._yaml import CParser as Parser # pylint: disable=ungrouped-imports,\n\n cyaml = True\nexcept ImportError:\n from yaml.parser import Parser # type: ignore # pylint: disable=ungrouped-imports\n\n cyaml = False\n\nUNCONVERTED_SUFFIXES = [\"Ref\", \"Condition\"]\nFN_PREFIX = \"Fn::\"\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass CfnParseError(ConstructorError):\n \"\"\"\n Error thrown when the template contains Cfn Error\n \"\"\"\n\n def __init__(self, filename, errors):\n if isinstance(errors, cfnlint.rules.Match):\n errors = [errors]\n\n # Call the base class constructor with the parameters it needs\n super().__init__(errors[0].message)\n\n # Now for your custom code...\n self.filename = filename\n self.matches = errors\n\n\ndef build_match(filename, message, line_number, column_number, key):\n return cfnlint.rules.Match(\n line_number + 1,\n column_number + 1,\n line_number + 1,\n column_number + 1 + len(key),\n filename,\n cfnlint.rules.ParseError(),\n message=message,\n )\n\n\nclass NodeConstructor(SafeConstructor):\n \"\"\"\n Node Constructors for loading different types in Yaml\n \"\"\"\n\n def __init__(self, filename):\n # Call the base class constructor\n super().__init__()\n\n self.filename = filename\n\n # To support lazy loading, the original constructors first yield\n # an empty object, then fill them in when iterated. Due to\n # laziness we omit this behaviour (and will only do \"deep\n # construction\") by first exhausting iterators, then yielding\n # copies.\n def construct_yaml_map(self, node):\n # Check for duplicate keys on the current level, this is not desirable\n # because a dict does not support this. It overwrites it with the last\n # occurance, which can give unexpected results\n mapping = {}\n self.flatten_mapping(node)\n matches = []\n for key_node, value_node in node.value:\n key = self.construct_object(key_node, False)\n value = self.construct_object(value_node, False)\n\n if key is None:\n raise CfnParseError(\n self.filename,\n [\n build_match(\n filename=self.filename,\n message=f\"Null key {key_node.value!r} not supported (line {key_node.start_mark.line + 1})\",\n line_number=key_node.start_mark.line,\n column_number=key_node.start_mark.column,\n key=key_node.value,\n ),\n ],\n )\n for key_dup in mapping:\n if key_dup == key:\n if not matches:\n matches.extend(\n [\n build_match(\n filename=self.filename,\n message=f'Duplicate found \"{key}\" (line {key_dup.start_mark.line + 1})',\n line_number=key_dup.start_mark.line,\n column_number=key_dup.start_mark.column,\n key=key,\n ),\n build_match(\n filename=self.filename,\n message=f'Duplicate found \"{key}\" (line {key_node.start_mark.line + 1})',\n line_number=key_node.start_mark.line,\n column_number=key_node.start_mark.column,\n key=key,\n ),\n ],\n )\n else:\n matches.append(\n build_match(\n filename=self.filename,\n message=f'Duplicate found \"{key}\" (line {key_node.start_mark.line + 1})',\n line_number=key_node.start_mark.line,\n column_number=key_node.start_mark.column,\n key=key,\n ),\n )\n try:\n mapping[key] = value\n except Exception as exc:\n raise CfnParseError(\n self.filename,\n [\n build_match(\n filename=self.filename,\n message=f'Unhashable type \"{key}\" (line {key.start_mark.line + 1})',\n line_number=key.start_mark.line,\n column_number=key.start_mark.column,\n key=key,\n ),\n ],\n ) from exc\n\n if matches:\n raise CfnParseError(\n self.filename,\n matches,\n )\n\n (obj,) = SafeConstructor.construct_yaml_map(self, node)\n\n if len(mapping) == 1:\n if \"Fn::Sub\" in mapping:\n return sub_node(obj, node.start_mark, node.end_mark)\n\n return dict_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_str(self, node):\n obj = SafeConstructor.construct_yaml_str(self, node)\n assert isinstance(obj, (str))\n return str_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_seq(self, node):\n (obj,) = SafeConstructor.construct_yaml_seq(self, node)\n assert isinstance(obj, list)\n return list_node(obj, node.start_mark, node.end_mark)\n\n\nNodeConstructor.add_constructor( # type: ignore\n \"tag:yaml.org,2002:map\", NodeConstructor.construct_yaml_map\n)\n\nNodeConstructor.add_constructor( # type: ignore\n \"tag:yaml.org,2002:str\", NodeConstructor.construct_yaml_str\n)\n\nNodeConstructor.add_constructor( # type: ignore\n \"tag:yaml.org,2002:seq\", NodeConstructor.construct_yaml_seq\n)\n\n\n# pylint: disable=too-many-ancestors\nclass MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver):\n \"\"\"\n Class for marked loading YAML\n \"\"\"\n\n # pylint: disable=non-parent-init-called,super-init-not-called\n\n def __init__(self, stream, filename):\n Reader.__init__(self, stream)\n Scanner.__init__(self)\n if cyaml:\n Parser.__init__(self, stream)\n else:\n Parser.__init__(self)\n Composer.__init__(self)\n SafeConstructor.__init__(self)\n Resolver.__init__(self)\n NodeConstructor.__init__(self, filename)\n\n def construct_getatt(self, node):\n \"\"\"\n Reconstruct !GetAtt into a list\n \"\"\"\n\n if isinstance(node.value, (str)):\n return list_node(node.value.split(\".\", 1), node.start_mark, node.end_mark)\n if isinstance(node.value, list):\n return [self.construct_object(child, deep=False) for child in node.value]\n\n raise ValueError(f\"Unexpected node type: {type(node.value)}\")\n\n\ndef multi_constructor(loader, tag_suffix, node):\n \"\"\"\n Deal with !Ref style function format\n \"\"\"\n\n if tag_suffix not in UNCONVERTED_SUFFIXES:\n tag_suffix = f\"{FN_PREFIX}{tag_suffix}\"\n\n constructor = None\n if tag_suffix == \"Fn::GetAtt\":\n constructor = loader.construct_getatt\n elif isinstance(node, ScalarNode):\n constructor = loader.construct_scalar\n elif isinstance(node, SequenceNode):\n constructor = loader.construct_sequence\n elif isinstance(node, MappingNode):\n constructor = loader.construct_mapping\n else:\n raise f\"Bad tag: !{tag_suffix}\"\n\n if tag_suffix == \"Fn::Sub\":\n return sub_node({tag_suffix: constructor(node)}, node.start_mark, node.end_mark)\n\n return dict_node({tag_suffix: constructor(node)}, node.start_mark, node.end_mark)\n\n\ndef loads(yaml_string, fname=None):\n \"\"\"\n Load the given YAML string\n \"\"\"\n loader = MarkedLoader(yaml_string, fname)\n loader.add_multi_constructor(\"!\", multi_constructor)\n template = loader.get_single_data()\n # Convert an empty file to an empty dict\n if template is None:\n template = {}\n\n return template\n\n\ndef load(filename):\n \"\"\"\n Load the given YAML file\n \"\"\"\n\n content = \"\"\n\n if not sys.stdin.isatty():\n filename = \"-\" if filename is None else filename\n if sys.version_info.major <= 3 and sys.version_info.minor <= 9:\n for line in fileinput.input(files=filename):\n content = content + line\n else:\n for line in fileinput.input( # pylint: disable=unexpected-keyword-arg\n files=filename, encoding=\"utf-8\"\n ):\n content = content + line\n else:\n with open(filename, encoding=\"utf-8\") as fp:\n content = fp.read()\n\n return loads(content, filename)\n", "path": "src/cfnlint/decode/cfn_yaml.py"}]}
3,009
206
gh_patches_debug_41606
rasdani/github-patches
git_diff
canonical__snapcraft-4622
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support project hooks for core24 snaps ### What needs to get done The `PackageService` for core24 snaps should support project hooks. The behavior should be the same as core22. The failures can be found by running `spread google:ubuntu-24.04-64:tests/spread/general/hooks/`. See failing logs [here](https://paste.ubuntu.com/p/CjBwVKcwyR/). ### Why it needs to get done To support building core24 snaps with craft-application --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `snapcraft/services/package.py` Content: ``` 1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- 2 # 3 # Copyright 2023 Canonical Ltd. 4 # 5 # This program is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License version 3 as 7 # published by the Free Software Foundation. 8 # 9 # This program is distributed in the hope that it will be useful, 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 # GNU General Public License for more details. 13 # 14 # You should have received a copy of the GNU General Public License 15 # along with this program. If not, see <http://www.gnu.org/licenses/>. 16 17 """Snapcraft Package service.""" 18 19 from __future__ import annotations 20 21 import os 22 import pathlib 23 import shutil 24 from typing import TYPE_CHECKING, cast 25 26 from craft_application import AppMetadata, PackageService 27 from overrides import override 28 29 from snapcraft import errors, linters, models, pack, utils 30 from snapcraft.linters import LinterStatus 31 from snapcraft.meta import snap_yaml 32 from snapcraft.services import Lifecycle 33 from snapcraft.utils import process_version 34 35 if TYPE_CHECKING: 36 from snapcraft.services import SnapcraftServiceFactory 37 38 39 class Package(PackageService): 40 """Package service subclass for Snapcraft.""" 41 42 _project: models.Project 43 44 def __init__( # noqa: PLR0913 (Too many arguments) 45 self, 46 app: AppMetadata, 47 services: SnapcraftServiceFactory, 48 *, 49 project: models.Project, 50 snapcraft_yaml_path: pathlib.Path, 51 platform: str | None, 52 build_for: str, 53 ) -> None: 54 super().__init__(app, services, project=project) 55 self._platform = platform 56 self._build_for = build_for 57 self._snapcraft_yaml_path = snapcraft_yaml_path 58 59 @override 60 def pack(self, prime_dir: pathlib.Path, dest: pathlib.Path) -> list[pathlib.Path]: 61 """Create one or more packages as appropriate. 62 63 :param prime_dir: Path to the directory to pack. 64 :param dest: Directory into which to write the package(s). 65 :returns: A list of paths to created packages. 66 """ 67 issues = linters.run_linters(prime_dir, lint=self._project.lint) 68 status = linters.report(issues, intermediate=True) 69 70 # In case of linter errors, stop execution and return the error code. 71 if status in (LinterStatus.ERRORS, LinterStatus.FATAL): 72 raise errors.LinterError("Linter errors found", exit_code=status) 73 74 return [ 75 pathlib.Path( 76 pack.pack_snap( 77 prime_dir, 78 output=str(dest), 79 compression=self._project.compression, 80 name=self._project.name, 81 version=process_version(self._project.version), 82 target_arch=self._build_for, 83 ) 84 ) 85 ] 86 87 @override 88 def write_metadata(self, path: pathlib.Path) -> None: 89 """Write the project metadata to metadata.yaml in the given directory. 90 91 :param path: The path to the prime directory. 92 """ 93 meta_dir = path / "meta" 94 meta_dir.mkdir(parents=True, exist_ok=True) 95 self.metadata.to_yaml_file(meta_dir / "snap.yaml") 96 97 enable_manifest = utils.strtobool(os.getenv("SNAPCRAFT_BUILD_INFO", "n")) 98 99 if enable_manifest: 100 snap_dir = path / "snap" 101 snap_dir.mkdir(parents=True, exist_ok=True) 102 lifecycle = cast(Lifecycle, self._services.lifecycle) 103 manifest = lifecycle.generate_manifest() 104 manifest.to_yaml_file(snap_dir / "manifest.yaml") 105 106 shutil.copy(self._snapcraft_yaml_path, snap_dir) 107 108 @property 109 def metadata(self) -> snap_yaml.SnapMetadata: 110 """Get the metadata model for this project.""" 111 return snap_yaml.get_metadata_from_project( 112 self._project, self._services.lifecycle.prime_dir, arch=self._build_for 113 ) 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/snapcraft/services/package.py b/snapcraft/services/package.py --- a/snapcraft/services/package.py +++ b/snapcraft/services/package.py @@ -29,6 +29,7 @@ from snapcraft import errors, linters, models, pack, utils from snapcraft.linters import LinterStatus from snapcraft.meta import snap_yaml +from snapcraft.parts.setup_assets import setup_assets from snapcraft.services import Lifecycle from snapcraft.utils import process_version @@ -84,6 +85,23 @@ ) ] + def _get_assets_dir(self) -> pathlib.Path: + """Return a snapcraft assets directory. + + Asset directories can exist in: + + - <PROJECT_ROOT>/snap + - <PROJECT_ROOT>/build-aux/snap + """ + project_dir = self._services.lifecycle.project_info.project_dir + for asset_reldir in ("snap", "build-aux/snap"): + asset_dir = project_dir / asset_reldir + if asset_dir.exists(): + return asset_dir + + # This is for backwards compatibility with setup_assets(...) + return project_dir / "snap" + @override def write_metadata(self, path: pathlib.Path) -> None: """Write the project metadata to metadata.yaml in the given directory. @@ -105,9 +123,79 @@ shutil.copy(self._snapcraft_yaml_path, snap_dir) + assets_dir = self._get_assets_dir() + setup_assets( + self._project, + assets_dir=assets_dir, + project_dir=self._services.lifecycle.project_info.project_dir, + prime_dir=path, + meta_directory_handler=meta_directory_handler, + ) + @property def metadata(self) -> snap_yaml.SnapMetadata: """Get the metadata model for this project.""" return snap_yaml.get_metadata_from_project( self._project, self._services.lifecycle.prime_dir, arch=self._build_for ) + + +def _hardlink_or_copy(source: pathlib.Path, destination: pathlib.Path) -> bool: + """Try to hardlink and fallback to copy if it fails. + + :param source: the source path. + :param destination: the destination path. + :returns: True if a hardlink was done or False for copy. + """ + # Unlink the destination to avoid link failures + destination.unlink(missing_ok=True) + + try: + destination.hardlink_to(source) + except OSError as os_error: + # Cross device link + if os_error.errno != 18: + raise + shutil.copy(source, destination) + return False + + return True + + +def meta_directory_handler(assets_dir: pathlib.Path, path: pathlib.Path): + """Handle hooks and gui assets from Snapcraft. + + :param assets_dir: directory with project assets. + :param path: directory to write assets to. + """ + meta_dir = path / "meta" + built_snap_hooks = path / "snap" / "hooks" + hooks_project_dir = assets_dir / "hooks" + + hooks_meta_dir = meta_dir / "hooks" + + if built_snap_hooks.is_dir(): + hooks_meta_dir.mkdir(parents=True, exist_ok=True) + for hook in built_snap_hooks.iterdir(): + meta_dir_hook = hooks_meta_dir / hook.name + # Remove to always refresh to the latest + meta_dir_hook.unlink(missing_ok=True) + meta_dir_hook.hardlink_to(hook) + + # Overwrite any built hooks with project level ones + if hooks_project_dir.is_dir(): + hooks_meta_dir.mkdir(parents=True, exist_ok=True) + for hook in hooks_project_dir.iterdir(): + meta_dir_hook = hooks_meta_dir / hook.name + + _hardlink_or_copy(hook, meta_dir_hook) + + # Write any gui assets + gui_project_dir = assets_dir / "gui" + gui_meta_dir = meta_dir / "gui" + if gui_project_dir.is_dir(): + gui_meta_dir.mkdir(parents=True, exist_ok=True) + for gui in gui_project_dir.iterdir(): + meta_dir_gui = gui_meta_dir / gui.name + + _hardlink_or_copy(gui, meta_dir_gui)
{"golden_diff": "diff --git a/snapcraft/services/package.py b/snapcraft/services/package.py\n--- a/snapcraft/services/package.py\n+++ b/snapcraft/services/package.py\n@@ -29,6 +29,7 @@\n from snapcraft import errors, linters, models, pack, utils\n from snapcraft.linters import LinterStatus\n from snapcraft.meta import snap_yaml\n+from snapcraft.parts.setup_assets import setup_assets\n from snapcraft.services import Lifecycle\n from snapcraft.utils import process_version\n \n@@ -84,6 +85,23 @@\n )\n ]\n \n+ def _get_assets_dir(self) -> pathlib.Path:\n+ \"\"\"Return a snapcraft assets directory.\n+\n+ Asset directories can exist in:\n+\n+ - <PROJECT_ROOT>/snap\n+ - <PROJECT_ROOT>/build-aux/snap\n+ \"\"\"\n+ project_dir = self._services.lifecycle.project_info.project_dir\n+ for asset_reldir in (\"snap\", \"build-aux/snap\"):\n+ asset_dir = project_dir / asset_reldir\n+ if asset_dir.exists():\n+ return asset_dir\n+\n+ # This is for backwards compatibility with setup_assets(...)\n+ return project_dir / \"snap\"\n+\n @override\n def write_metadata(self, path: pathlib.Path) -> None:\n \"\"\"Write the project metadata to metadata.yaml in the given directory.\n@@ -105,9 +123,79 @@\n \n shutil.copy(self._snapcraft_yaml_path, snap_dir)\n \n+ assets_dir = self._get_assets_dir()\n+ setup_assets(\n+ self._project,\n+ assets_dir=assets_dir,\n+ project_dir=self._services.lifecycle.project_info.project_dir,\n+ prime_dir=path,\n+ meta_directory_handler=meta_directory_handler,\n+ )\n+\n @property\n def metadata(self) -> snap_yaml.SnapMetadata:\n \"\"\"Get the metadata model for this project.\"\"\"\n return snap_yaml.get_metadata_from_project(\n self._project, self._services.lifecycle.prime_dir, arch=self._build_for\n )\n+\n+\n+def _hardlink_or_copy(source: pathlib.Path, destination: pathlib.Path) -> bool:\n+ \"\"\"Try to hardlink and fallback to copy if it fails.\n+\n+ :param source: the source path.\n+ :param destination: the destination path.\n+ :returns: True if a hardlink was done or False for copy.\n+ \"\"\"\n+ # Unlink the destination to avoid link failures\n+ destination.unlink(missing_ok=True)\n+\n+ try:\n+ destination.hardlink_to(source)\n+ except OSError as os_error:\n+ # Cross device link\n+ if os_error.errno != 18:\n+ raise\n+ shutil.copy(source, destination)\n+ return False\n+\n+ return True\n+\n+\n+def meta_directory_handler(assets_dir: pathlib.Path, path: pathlib.Path):\n+ \"\"\"Handle hooks and gui assets from Snapcraft.\n+\n+ :param assets_dir: directory with project assets.\n+ :param path: directory to write assets to.\n+ \"\"\"\n+ meta_dir = path / \"meta\"\n+ built_snap_hooks = path / \"snap\" / \"hooks\"\n+ hooks_project_dir = assets_dir / \"hooks\"\n+\n+ hooks_meta_dir = meta_dir / \"hooks\"\n+\n+ if built_snap_hooks.is_dir():\n+ hooks_meta_dir.mkdir(parents=True, exist_ok=True)\n+ for hook in built_snap_hooks.iterdir():\n+ meta_dir_hook = hooks_meta_dir / hook.name\n+ # Remove to always refresh to the latest\n+ meta_dir_hook.unlink(missing_ok=True)\n+ meta_dir_hook.hardlink_to(hook)\n+\n+ # Overwrite any built hooks with project level ones\n+ if hooks_project_dir.is_dir():\n+ hooks_meta_dir.mkdir(parents=True, exist_ok=True)\n+ for hook in hooks_project_dir.iterdir():\n+ meta_dir_hook = hooks_meta_dir / hook.name\n+\n+ _hardlink_or_copy(hook, meta_dir_hook)\n+\n+ # Write any gui assets\n+ gui_project_dir = assets_dir / \"gui\"\n+ gui_meta_dir = meta_dir / \"gui\"\n+ if gui_project_dir.is_dir():\n+ gui_meta_dir.mkdir(parents=True, exist_ok=True)\n+ for gui in gui_project_dir.iterdir():\n+ meta_dir_gui = gui_meta_dir / gui.name\n+\n+ _hardlink_or_copy(gui, meta_dir_gui)\n", "issue": "Support project hooks for core24 snaps\n### What needs to get done\n\nThe `PackageService` for core24 snaps should support project hooks. The behavior should be the same as core22.\r\n\r\nThe failures can be found by running `spread google:ubuntu-24.04-64:tests/spread/general/hooks/`. See failing logs [here](https://paste.ubuntu.com/p/CjBwVKcwyR/).\n\n### Why it needs to get done\n\nTo support building core24 snaps with craft-application\n", "before_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Snapcraft Package service.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport pathlib\nimport shutil\nfrom typing import TYPE_CHECKING, cast\n\nfrom craft_application import AppMetadata, PackageService\nfrom overrides import override\n\nfrom snapcraft import errors, linters, models, pack, utils\nfrom snapcraft.linters import LinterStatus\nfrom snapcraft.meta import snap_yaml\nfrom snapcraft.services import Lifecycle\nfrom snapcraft.utils import process_version\n\nif TYPE_CHECKING:\n from snapcraft.services import SnapcraftServiceFactory\n\n\nclass Package(PackageService):\n \"\"\"Package service subclass for Snapcraft.\"\"\"\n\n _project: models.Project\n\n def __init__( # noqa: PLR0913 (Too many arguments)\n self,\n app: AppMetadata,\n services: SnapcraftServiceFactory,\n *,\n project: models.Project,\n snapcraft_yaml_path: pathlib.Path,\n platform: str | None,\n build_for: str,\n ) -> None:\n super().__init__(app, services, project=project)\n self._platform = platform\n self._build_for = build_for\n self._snapcraft_yaml_path = snapcraft_yaml_path\n\n @override\n def pack(self, prime_dir: pathlib.Path, dest: pathlib.Path) -> list[pathlib.Path]:\n \"\"\"Create one or more packages as appropriate.\n\n :param prime_dir: Path to the directory to pack.\n :param dest: Directory into which to write the package(s).\n :returns: A list of paths to created packages.\n \"\"\"\n issues = linters.run_linters(prime_dir, lint=self._project.lint)\n status = linters.report(issues, intermediate=True)\n\n # In case of linter errors, stop execution and return the error code.\n if status in (LinterStatus.ERRORS, LinterStatus.FATAL):\n raise errors.LinterError(\"Linter errors found\", exit_code=status)\n\n return [\n pathlib.Path(\n pack.pack_snap(\n prime_dir,\n output=str(dest),\n compression=self._project.compression,\n name=self._project.name,\n version=process_version(self._project.version),\n target_arch=self._build_for,\n )\n )\n ]\n\n @override\n def write_metadata(self, path: pathlib.Path) -> None:\n \"\"\"Write the project metadata to metadata.yaml in the given directory.\n\n :param path: The path to the prime directory.\n \"\"\"\n meta_dir = path / \"meta\"\n meta_dir.mkdir(parents=True, exist_ok=True)\n self.metadata.to_yaml_file(meta_dir / \"snap.yaml\")\n\n enable_manifest = utils.strtobool(os.getenv(\"SNAPCRAFT_BUILD_INFO\", \"n\"))\n\n if enable_manifest:\n snap_dir = path / \"snap\"\n snap_dir.mkdir(parents=True, exist_ok=True)\n lifecycle = cast(Lifecycle, self._services.lifecycle)\n manifest = lifecycle.generate_manifest()\n manifest.to_yaml_file(snap_dir / \"manifest.yaml\")\n\n shutil.copy(self._snapcraft_yaml_path, snap_dir)\n\n @property\n def metadata(self) -> snap_yaml.SnapMetadata:\n \"\"\"Get the metadata model for this project.\"\"\"\n return snap_yaml.get_metadata_from_project(\n self._project, self._services.lifecycle.prime_dir, arch=self._build_for\n )\n", "path": "snapcraft/services/package.py"}], "after_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Snapcraft Package service.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport pathlib\nimport shutil\nfrom typing import TYPE_CHECKING, cast\n\nfrom craft_application import AppMetadata, PackageService\nfrom overrides import override\n\nfrom snapcraft import errors, linters, models, pack, utils\nfrom snapcraft.linters import LinterStatus\nfrom snapcraft.meta import snap_yaml\nfrom snapcraft.parts.setup_assets import setup_assets\nfrom snapcraft.services import Lifecycle\nfrom snapcraft.utils import process_version\n\nif TYPE_CHECKING:\n from snapcraft.services import SnapcraftServiceFactory\n\n\nclass Package(PackageService):\n \"\"\"Package service subclass for Snapcraft.\"\"\"\n\n _project: models.Project\n\n def __init__( # noqa: PLR0913 (Too many arguments)\n self,\n app: AppMetadata,\n services: SnapcraftServiceFactory,\n *,\n project: models.Project,\n snapcraft_yaml_path: pathlib.Path,\n platform: str | None,\n build_for: str,\n ) -> None:\n super().__init__(app, services, project=project)\n self._platform = platform\n self._build_for = build_for\n self._snapcraft_yaml_path = snapcraft_yaml_path\n\n @override\n def pack(self, prime_dir: pathlib.Path, dest: pathlib.Path) -> list[pathlib.Path]:\n \"\"\"Create one or more packages as appropriate.\n\n :param prime_dir: Path to the directory to pack.\n :param dest: Directory into which to write the package(s).\n :returns: A list of paths to created packages.\n \"\"\"\n issues = linters.run_linters(prime_dir, lint=self._project.lint)\n status = linters.report(issues, intermediate=True)\n\n # In case of linter errors, stop execution and return the error code.\n if status in (LinterStatus.ERRORS, LinterStatus.FATAL):\n raise errors.LinterError(\"Linter errors found\", exit_code=status)\n\n return [\n pathlib.Path(\n pack.pack_snap(\n prime_dir,\n output=str(dest),\n compression=self._project.compression,\n name=self._project.name,\n version=process_version(self._project.version),\n target_arch=self._build_for,\n )\n )\n ]\n\n def _get_assets_dir(self) -> pathlib.Path:\n \"\"\"Return a snapcraft assets directory.\n\n Asset directories can exist in:\n\n - <PROJECT_ROOT>/snap\n - <PROJECT_ROOT>/build-aux/snap\n \"\"\"\n project_dir = self._services.lifecycle.project_info.project_dir\n for asset_reldir in (\"snap\", \"build-aux/snap\"):\n asset_dir = project_dir / asset_reldir\n if asset_dir.exists():\n return asset_dir\n\n # This is for backwards compatibility with setup_assets(...)\n return project_dir / \"snap\"\n\n @override\n def write_metadata(self, path: pathlib.Path) -> None:\n \"\"\"Write the project metadata to metadata.yaml in the given directory.\n\n :param path: The path to the prime directory.\n \"\"\"\n meta_dir = path / \"meta\"\n meta_dir.mkdir(parents=True, exist_ok=True)\n self.metadata.to_yaml_file(meta_dir / \"snap.yaml\")\n\n enable_manifest = utils.strtobool(os.getenv(\"SNAPCRAFT_BUILD_INFO\", \"n\"))\n\n if enable_manifest:\n snap_dir = path / \"snap\"\n snap_dir.mkdir(parents=True, exist_ok=True)\n lifecycle = cast(Lifecycle, self._services.lifecycle)\n manifest = lifecycle.generate_manifest()\n manifest.to_yaml_file(snap_dir / \"manifest.yaml\")\n\n shutil.copy(self._snapcraft_yaml_path, snap_dir)\n\n assets_dir = self._get_assets_dir()\n setup_assets(\n self._project,\n assets_dir=assets_dir,\n project_dir=self._services.lifecycle.project_info.project_dir,\n prime_dir=path,\n meta_directory_handler=meta_directory_handler,\n )\n\n @property\n def metadata(self) -> snap_yaml.SnapMetadata:\n \"\"\"Get the metadata model for this project.\"\"\"\n return snap_yaml.get_metadata_from_project(\n self._project, self._services.lifecycle.prime_dir, arch=self._build_for\n )\n\n\ndef _hardlink_or_copy(source: pathlib.Path, destination: pathlib.Path) -> bool:\n \"\"\"Try to hardlink and fallback to copy if it fails.\n\n :param source: the source path.\n :param destination: the destination path.\n :returns: True if a hardlink was done or False for copy.\n \"\"\"\n # Unlink the destination to avoid link failures\n destination.unlink(missing_ok=True)\n\n try:\n destination.hardlink_to(source)\n except OSError as os_error:\n # Cross device link\n if os_error.errno != 18:\n raise\n shutil.copy(source, destination)\n return False\n\n return True\n\n\ndef meta_directory_handler(assets_dir: pathlib.Path, path: pathlib.Path):\n \"\"\"Handle hooks and gui assets from Snapcraft.\n\n :param assets_dir: directory with project assets.\n :param path: directory to write assets to.\n \"\"\"\n meta_dir = path / \"meta\"\n built_snap_hooks = path / \"snap\" / \"hooks\"\n hooks_project_dir = assets_dir / \"hooks\"\n\n hooks_meta_dir = meta_dir / \"hooks\"\n\n if built_snap_hooks.is_dir():\n hooks_meta_dir.mkdir(parents=True, exist_ok=True)\n for hook in built_snap_hooks.iterdir():\n meta_dir_hook = hooks_meta_dir / hook.name\n # Remove to always refresh to the latest\n meta_dir_hook.unlink(missing_ok=True)\n meta_dir_hook.hardlink_to(hook)\n\n # Overwrite any built hooks with project level ones\n if hooks_project_dir.is_dir():\n hooks_meta_dir.mkdir(parents=True, exist_ok=True)\n for hook in hooks_project_dir.iterdir():\n meta_dir_hook = hooks_meta_dir / hook.name\n\n _hardlink_or_copy(hook, meta_dir_hook)\n\n # Write any gui assets\n gui_project_dir = assets_dir / \"gui\"\n gui_meta_dir = meta_dir / \"gui\"\n if gui_project_dir.is_dir():\n gui_meta_dir.mkdir(parents=True, exist_ok=True)\n for gui in gui_project_dir.iterdir():\n meta_dir_gui = gui_meta_dir / gui.name\n\n _hardlink_or_copy(gui, meta_dir_gui)\n", "path": "snapcraft/services/package.py"}]}
1,472
969
gh_patches_debug_9672
rasdani/github-patches
git_diff
svthalia__concrexit-2712
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Paparazcie committee members cannot edit promo requests Members of the paparazcie cannot edit the promo requests in the back-end. I can, so it might be an issue with permissions. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/promotion/admin.py` Content: ``` 1 """Registers admin interfaces for the models defined in this module.""" 2 from django.contrib import admin 3 from django.contrib.admin import ModelAdmin 4 5 from events.services import is_organiser 6 from promotion.forms import PromotionRequestForm 7 8 from .models import PromotionChannel, PromotionRequest 9 10 11 @admin.register(PromotionRequest) 12 class PromotionRequestAdmin(admin.ModelAdmin): 13 """This manages the admin interface for the model items.""" 14 15 list_display = ("event", "publish_date", "channel", "assigned_to", "status") 16 list_filter = ( 17 "publish_date", 18 "assigned_to", 19 "status", 20 ) 21 date_hierarchy = "publish_date" 22 form = PromotionRequestForm 23 actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"] 24 25 def has_change_permission(self, request, obj=None): 26 if obj is not None and not is_organiser(request.member, obj.event): 27 return False 28 return super().has_change_permission(request, obj) 29 30 def mark_not_started(self, request, queryset): 31 """Change the status of the event to published.""" 32 self._change_published(queryset, PromotionRequest.NOT_STARTED) 33 34 mark_not_started.short_description = "Mark requests as not started" 35 36 def mark_started(self, request, queryset): 37 """Change the status of the event to published.""" 38 self._change_published(queryset, PromotionRequest.STARTED) 39 40 mark_started.short_description = "Mark requests as started" 41 42 def mark_finished(self, request, queryset): 43 """Change the status of the event to published.""" 44 self._change_published(queryset, PromotionRequest.FINISHED) 45 46 mark_finished.short_description = "Mark requests as finished" 47 48 def mark_published(self, request, queryset): 49 """Change the status of the event to published.""" 50 self._change_published(queryset, PromotionRequest.PUBLISHED) 51 52 mark_published.short_description = "Mark requests as published" 53 54 @staticmethod 55 def _change_published(queryset, status): 56 queryset.update(status=status) 57 58 59 @admin.register(PromotionChannel) 60 class PromotionChannelAdmin(ModelAdmin): 61 pass 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/promotion/admin.py b/website/promotion/admin.py --- a/website/promotion/admin.py +++ b/website/promotion/admin.py @@ -23,8 +23,8 @@ actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"] def has_change_permission(self, request, obj=None): - if obj is not None and not is_organiser(request.member, obj.event): - return False + if obj is not None and obj.event and is_organiser(request.member, obj.event): + return True return super().has_change_permission(request, obj) def mark_not_started(self, request, queryset):
{"golden_diff": "diff --git a/website/promotion/admin.py b/website/promotion/admin.py\n--- a/website/promotion/admin.py\n+++ b/website/promotion/admin.py\n@@ -23,8 +23,8 @@\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n \n def has_change_permission(self, request, obj=None):\n- if obj is not None and not is_organiser(request.member, obj.event):\n- return False\n+ if obj is not None and obj.event and is_organiser(request.member, obj.event):\n+ return True\n return super().has_change_permission(request, obj)\n \n def mark_not_started(self, request, queryset):\n", "issue": "Paparazcie committee members cannot edit promo requests\nMembers of the paparazcie cannot edit the promo requests in the back-end. I can, so it might be an issue with permissions. \n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom events.services import is_organiser\nfrom promotion.forms import PromotionRequestForm\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and not is_organiser(request.member, obj.event):\n return False\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n pass\n", "path": "website/promotion/admin.py"}], "after_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom events.services import is_organiser\nfrom promotion.forms import PromotionRequestForm\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and obj.event and is_organiser(request.member, obj.event):\n return True\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n pass\n", "path": "website/promotion/admin.py"}]}
862
154
gh_patches_debug_59322
rasdani/github-patches
git_diff
azavea__raster-vision-1236
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CPU Batch jobs cannot pull container After making a large number of dependency upgrades suggested by Dependabot, I started getting the following error when running any job that runs in the CPU compute environment on Batch. The GPU ones work fine. See https://aws.amazon.com/premiumsupport/knowledge-center/batch-job-failure-disk-space/ ``` CannotPullContainerError: failed to register layer: ApplyLayer exit status 1 stdout: stderr: write /root/.cache/pip/http/d/c/1/1/c/dc11c115dbb602e6636493addd92f61ac957da65f9166a69f6820fbc: no space left on device ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py` Content: ``` 1 import warnings 2 warnings.filterwarnings('ignore') # noqa 3 4 from typing import Union, Iterable, Optional 5 6 import logging 7 8 import numpy as np 9 import matplotlib 10 from matplotlib import pyplot as plt 11 import matplotlib.patches as mpatches 12 matplotlib.use('Agg') # noqa 13 import albumentations as A 14 15 import torch 16 from torch import nn 17 from torch.nn import functional as F 18 from torchvision import models 19 20 from rastervision.pipeline.config import ConfigError 21 from rastervision.pytorch_learner.learner import Learner 22 from rastervision.pytorch_learner.utils import ( 23 compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor, 24 Parallel, AddTensors) 25 from rastervision.pipeline.file_system import make_dir 26 27 log = logging.getLogger(__name__) 28 29 30 class SemanticSegmentationLearner(Learner): 31 def build_model(self) -> nn.Module: 32 # TODO support FCN option 33 pretrained = self.cfg.model.pretrained 34 out_classes = len(self.cfg.data.class_names) 35 if self.cfg.solver.ignore_last_class: 36 out_classes -= 1 37 model = models.segmentation.segmentation._segm_resnet( 38 'deeplabv3', 39 self.cfg.model.get_backbone_str(), 40 out_classes, 41 False, 42 pretrained_backbone=pretrained) 43 44 input_channels = self.cfg.data.img_channels 45 old_conv = model.backbone.conv1 46 47 if input_channels == old_conv.in_channels: 48 return model 49 50 # these parameters will be the same for the new conv layer 51 old_conv_args = { 52 'out_channels': old_conv.out_channels, 53 'kernel_size': old_conv.kernel_size, 54 'stride': old_conv.stride, 55 'padding': old_conv.padding, 56 'dilation': old_conv.dilation, 57 'groups': old_conv.groups, 58 'bias': old_conv.bias 59 } 60 61 if not pretrained: 62 # simply replace the first conv layer with one with the 63 # correct number of input channels 64 new_conv = nn.Conv2d(in_channels=input_channels, **old_conv_args) 65 model.backbone.conv1 = new_conv 66 return model 67 68 if input_channels > old_conv.in_channels: 69 # insert a new conv layer parallel to the existing one 70 # and sum their outputs 71 new_conv_channels = input_channels - old_conv.in_channels 72 new_conv = nn.Conv2d( 73 in_channels=new_conv_channels, **old_conv_args) 74 model.backbone.conv1 = nn.Sequential( 75 # split input along channel dim 76 SplitTensor((old_conv.in_channels, new_conv_channels), dim=1), 77 # each split goes to its respective conv layer 78 Parallel(old_conv, new_conv), 79 # sum the parallel outputs 80 AddTensors()) 81 elif input_channels < old_conv.in_channels: 82 model.backbone.conv1 = nn.Conv2d( 83 in_channels=input_channels, **old_conv_args) 84 model.backbone.conv1.weight.data[:, :input_channels] = \ 85 old_conv.weight.data[:, :input_channels] 86 else: 87 raise ConfigError(f'Something went wrong') 88 89 return model 90 91 def build_loss(self): 92 args = {} 93 94 loss_weights = self.cfg.solver.class_loss_weights 95 if loss_weights is not None: 96 loss_weights = torch.tensor(loss_weights, device=self.device) 97 args.update({'weight': loss_weights}) 98 99 if self.cfg.solver.ignore_last_class: 100 num_classes = len(self.cfg.data.class_names) 101 args.update({'ignore_index': num_classes - 1}) 102 103 loss = nn.CrossEntropyLoss(**args) 104 105 return loss 106 107 def train_step(self, batch, batch_ind): 108 x, y = batch 109 out = self.post_forward(self.model(x)) 110 return {'train_loss': self.loss(out, y)} 111 112 def validate_step(self, batch, batch_ind): 113 x, y = batch 114 out = self.post_forward(self.model(x)) 115 val_loss = self.loss(out, y) 116 117 num_labels = len(self.cfg.data.class_names) 118 y = y.view(-1) 119 out = self.prob_to_pred(out).view(-1) 120 conf_mat = compute_conf_mat(out, y, num_labels) 121 122 return {'val_loss': val_loss, 'conf_mat': conf_mat} 123 124 def validate_end(self, outputs, num_samples): 125 conf_mat = sum([o['conf_mat'] for o in outputs]) 126 val_loss = torch.stack([o['val_loss'] 127 for o in outputs]).sum() / num_samples 128 conf_mat_metrics = compute_conf_mat_metrics(conf_mat, 129 self.cfg.data.class_names) 130 131 metrics = {'val_loss': val_loss.item()} 132 metrics.update(conf_mat_metrics) 133 134 return metrics 135 136 def post_forward(self, x): 137 if isinstance(x, dict): 138 return x['out'] 139 return x 140 141 def predict(self, x: torch.Tensor, raw_out: bool = False) -> torch.Tensor: 142 x = self.to_batch(x).float() 143 x = self.to_device(x, self.device) 144 with torch.no_grad(): 145 out = self.model(x) 146 out = self.post_forward(out) 147 out = out.softmax(dim=1) 148 if not raw_out: 149 out = self.prob_to_pred(out) 150 out = self.to_device(out, 'cpu') 151 return out 152 153 def numpy_predict(self, x: np.ndarray, 154 raw_out: bool = False) -> np.ndarray: 155 _, h, w, _ = x.shape 156 transform, _ = self.get_data_transforms() 157 x = self.normalize_input(x) 158 x = self.to_batch(x) 159 x = np.stack([transform(image=img)['image'] for img in x]) 160 x = torch.from_numpy(x) 161 x = x.permute((0, 3, 1, 2)) 162 out = self.predict(x, raw_out=True) 163 out = F.interpolate( 164 out, size=(h, w), mode='bilinear', align_corners=False) 165 out = self.prob_to_pred(out) 166 return self.output_to_numpy(out) 167 168 def prob_to_pred(self, x): 169 return x.argmax(1) 170 171 def plot_batch(self, 172 x: torch.Tensor, 173 y: Union[torch.Tensor, np.ndarray], 174 output_path: str, 175 z: Optional[torch.Tensor] = None, 176 batch_limit: Optional[int] = None) -> None: 177 """Plot a whole batch in a grid using plot_xyz. 178 179 Args: 180 x: batch of images 181 y: ground truth labels 182 output_path: local path where to save plot image 183 z: optional predicted labels 184 batch_limit: optional limit on (rendered) batch size 185 """ 186 batch_sz, c, h, w = x.shape 187 batch_sz = min(batch_sz, 188 batch_limit) if batch_limit is not None else batch_sz 189 if batch_sz == 0: 190 return 191 192 channel_groups = self.cfg.data.channel_display_groups 193 194 nrows = batch_sz 195 # one col for each group + 1 for labels + 1 for predictions 196 ncols = len(channel_groups) + 1 197 if z is not None: 198 ncols += 1 199 200 fig, axes = plt.subplots( 201 nrows=nrows, 202 ncols=ncols, 203 squeeze=False, 204 constrained_layout=True, 205 figsize=(3 * ncols, 3 * nrows)) 206 207 assert axes.shape == (nrows, ncols) 208 209 # (N, c, h, w) --> (N, h, w, c) 210 x = x.permute(0, 2, 3, 1) 211 212 # apply transform, if given 213 if self.cfg.data.plot_options.transform is not None: 214 tf = A.from_dict(self.cfg.data.plot_options.transform) 215 imgs = [tf(image=img)['image'] for img in x.numpy()] 216 x = torch.from_numpy(np.stack(imgs)) 217 218 for i in range(batch_sz): 219 ax = (fig, axes[i]) 220 if z is None: 221 self.plot_xyz(ax, x[i], y[i]) 222 else: 223 self.plot_xyz(ax, x[i], y[i], z=z[i]) 224 225 make_dir(output_path, use_dirname=True) 226 plt.savefig(output_path, bbox_inches='tight') 227 plt.close() 228 229 def plot_xyz(self, 230 ax: Iterable, 231 x: torch.Tensor, 232 y: Union[torch.Tensor, np.ndarray], 233 z: Optional[torch.Tensor] = None) -> None: 234 235 channel_groups = self.cfg.data.channel_display_groups 236 237 # make subplot titles 238 if not isinstance(channel_groups, dict): 239 channel_groups = { 240 f'Channels: {[*chs]}': chs 241 for chs in channel_groups 242 } 243 244 fig, ax = ax 245 img_axes = ax[:len(channel_groups)] 246 label_ax = ax[len(channel_groups)] 247 248 # plot input image(s) 249 for (title, chs), ch_ax in zip(channel_groups.items(), img_axes): 250 im = x[..., chs] 251 if len(chs) == 1: 252 # repeat single channel 3 times 253 im = im.expand(-1, -1, 3) 254 elif len(chs) == 2: 255 # add a 3rd channel with all pixels set to 0.5 256 h, w, _ = x.shape 257 third_channel = torch.full((h, w, 1), fill_value=.5) 258 im = torch.cat((im, third_channel), dim=-1) 259 elif len(chs) > 3: 260 # only use the first 3 channels 261 log.warn(f'Only plotting first 3 channels of channel-group ' 262 f'{title}: {chs}.') 263 im = x[..., chs[:3]] 264 ch_ax.imshow(im) 265 ch_ax.set_title(title) 266 ch_ax.set_xticks([]) 267 ch_ax.set_yticks([]) 268 269 class_colors = self.cfg.data.class_colors 270 colors = [color_to_triple(c) for c in class_colors] 271 colors = np.array(colors) / 255. 272 cmap = matplotlib.colors.ListedColormap(colors) 273 274 # plot labels 275 label_ax.imshow( 276 y, vmin=0, vmax=len(colors), cmap=cmap, interpolation='none') 277 label_ax.set_title(f'Ground truth labels') 278 label_ax.set_xticks([]) 279 label_ax.set_yticks([]) 280 281 # plot predictions 282 if z is not None: 283 pred_ax = ax[-1] 284 pred_ax.imshow( 285 z, vmin=0, vmax=len(colors), cmap=cmap, interpolation='none') 286 pred_ax.set_title(f'Predicted labels') 287 pred_ax.set_xticks([]) 288 pred_ax.set_yticks([]) 289 290 # add a legend to the rightmost subplot 291 class_names = self.cfg.data.class_names 292 legend_items = [ 293 mpatches.Patch(facecolor=col, edgecolor='black', label=name) 294 for col, name in zip(colors, class_names) 295 ] 296 ax[-1].legend( 297 handles=legend_items, 298 loc='center right', 299 bbox_to_anchor=(1.8, 0.5)) 300 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py b/rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py --- a/rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py +++ b/rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py @@ -34,7 +34,7 @@ out_classes = len(self.cfg.data.class_names) if self.cfg.solver.ignore_last_class: out_classes -= 1 - model = models.segmentation.segmentation._segm_resnet( + model = models.segmentation.segmentation._segm_model( 'deeplabv3', self.cfg.model.get_backbone_str(), out_classes,
{"golden_diff": "diff --git a/rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py b/rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py\n--- a/rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py\n+++ b/rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py\n@@ -34,7 +34,7 @@\n out_classes = len(self.cfg.data.class_names)\n if self.cfg.solver.ignore_last_class:\n out_classes -= 1\n- model = models.segmentation.segmentation._segm_resnet(\n+ model = models.segmentation.segmentation._segm_model(\n 'deeplabv3',\n self.cfg.model.get_backbone_str(),\n out_classes,\n", "issue": "CPU Batch jobs cannot pull container\nAfter making a large number of dependency upgrades suggested by Dependabot, I started getting the following error when running any job that runs in the CPU compute environment on Batch. The GPU ones work fine. \r\n\r\nSee https://aws.amazon.com/premiumsupport/knowledge-center/batch-job-failure-disk-space/\r\n\r\n```\r\nCannotPullContainerError: failed to register layer: ApplyLayer exit status 1 stdout: stderr: write /root/.cache/pip/http/d/c/1/1/c/dc11c115dbb602e6636493addd92f61ac957da65f9166a69f6820fbc: no space left on device\r\n```\n", "before_files": [{"content": "import warnings\nwarnings.filterwarnings('ignore') # noqa\n\nfrom typing import Union, Iterable, Optional\n\nimport logging\n\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\nmatplotlib.use('Agg') # noqa\nimport albumentations as A\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision import models\n\nfrom rastervision.pipeline.config import ConfigError\nfrom rastervision.pytorch_learner.learner import Learner\nfrom rastervision.pytorch_learner.utils import (\n compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor,\n Parallel, AddTensors)\nfrom rastervision.pipeline.file_system import make_dir\n\nlog = logging.getLogger(__name__)\n\n\nclass SemanticSegmentationLearner(Learner):\n def build_model(self) -> nn.Module:\n # TODO support FCN option\n pretrained = self.cfg.model.pretrained\n out_classes = len(self.cfg.data.class_names)\n if self.cfg.solver.ignore_last_class:\n out_classes -= 1\n model = models.segmentation.segmentation._segm_resnet(\n 'deeplabv3',\n self.cfg.model.get_backbone_str(),\n out_classes,\n False,\n pretrained_backbone=pretrained)\n\n input_channels = self.cfg.data.img_channels\n old_conv = model.backbone.conv1\n\n if input_channels == old_conv.in_channels:\n return model\n\n # these parameters will be the same for the new conv layer\n old_conv_args = {\n 'out_channels': old_conv.out_channels,\n 'kernel_size': old_conv.kernel_size,\n 'stride': old_conv.stride,\n 'padding': old_conv.padding,\n 'dilation': old_conv.dilation,\n 'groups': old_conv.groups,\n 'bias': old_conv.bias\n }\n\n if not pretrained:\n # simply replace the first conv layer with one with the\n # correct number of input channels\n new_conv = nn.Conv2d(in_channels=input_channels, **old_conv_args)\n model.backbone.conv1 = new_conv\n return model\n\n if input_channels > old_conv.in_channels:\n # insert a new conv layer parallel to the existing one\n # and sum their outputs\n new_conv_channels = input_channels - old_conv.in_channels\n new_conv = nn.Conv2d(\n in_channels=new_conv_channels, **old_conv_args)\n model.backbone.conv1 = nn.Sequential(\n # split input along channel dim\n SplitTensor((old_conv.in_channels, new_conv_channels), dim=1),\n # each split goes to its respective conv layer\n Parallel(old_conv, new_conv),\n # sum the parallel outputs\n AddTensors())\n elif input_channels < old_conv.in_channels:\n model.backbone.conv1 = nn.Conv2d(\n in_channels=input_channels, **old_conv_args)\n model.backbone.conv1.weight.data[:, :input_channels] = \\\n old_conv.weight.data[:, :input_channels]\n else:\n raise ConfigError(f'Something went wrong')\n\n return model\n\n def build_loss(self):\n args = {}\n\n loss_weights = self.cfg.solver.class_loss_weights\n if loss_weights is not None:\n loss_weights = torch.tensor(loss_weights, device=self.device)\n args.update({'weight': loss_weights})\n\n if self.cfg.solver.ignore_last_class:\n num_classes = len(self.cfg.data.class_names)\n args.update({'ignore_index': num_classes - 1})\n\n loss = nn.CrossEntropyLoss(**args)\n\n return loss\n\n def train_step(self, batch, batch_ind):\n x, y = batch\n out = self.post_forward(self.model(x))\n return {'train_loss': self.loss(out, y)}\n\n def validate_step(self, batch, batch_ind):\n x, y = batch\n out = self.post_forward(self.model(x))\n val_loss = self.loss(out, y)\n\n num_labels = len(self.cfg.data.class_names)\n y = y.view(-1)\n out = self.prob_to_pred(out).view(-1)\n conf_mat = compute_conf_mat(out, y, num_labels)\n\n return {'val_loss': val_loss, 'conf_mat': conf_mat}\n\n def validate_end(self, outputs, num_samples):\n conf_mat = sum([o['conf_mat'] for o in outputs])\n val_loss = torch.stack([o['val_loss']\n for o in outputs]).sum() / num_samples\n conf_mat_metrics = compute_conf_mat_metrics(conf_mat,\n self.cfg.data.class_names)\n\n metrics = {'val_loss': val_loss.item()}\n metrics.update(conf_mat_metrics)\n\n return metrics\n\n def post_forward(self, x):\n if isinstance(x, dict):\n return x['out']\n return x\n\n def predict(self, x: torch.Tensor, raw_out: bool = False) -> torch.Tensor:\n x = self.to_batch(x).float()\n x = self.to_device(x, self.device)\n with torch.no_grad():\n out = self.model(x)\n out = self.post_forward(out)\n out = out.softmax(dim=1)\n if not raw_out:\n out = self.prob_to_pred(out)\n out = self.to_device(out, 'cpu')\n return out\n\n def numpy_predict(self, x: np.ndarray,\n raw_out: bool = False) -> np.ndarray:\n _, h, w, _ = x.shape\n transform, _ = self.get_data_transforms()\n x = self.normalize_input(x)\n x = self.to_batch(x)\n x = np.stack([transform(image=img)['image'] for img in x])\n x = torch.from_numpy(x)\n x = x.permute((0, 3, 1, 2))\n out = self.predict(x, raw_out=True)\n out = F.interpolate(\n out, size=(h, w), mode='bilinear', align_corners=False)\n out = self.prob_to_pred(out)\n return self.output_to_numpy(out)\n\n def prob_to_pred(self, x):\n return x.argmax(1)\n\n def plot_batch(self,\n x: torch.Tensor,\n y: Union[torch.Tensor, np.ndarray],\n output_path: str,\n z: Optional[torch.Tensor] = None,\n batch_limit: Optional[int] = None) -> None:\n \"\"\"Plot a whole batch in a grid using plot_xyz.\n\n Args:\n x: batch of images\n y: ground truth labels\n output_path: local path where to save plot image\n z: optional predicted labels\n batch_limit: optional limit on (rendered) batch size\n \"\"\"\n batch_sz, c, h, w = x.shape\n batch_sz = min(batch_sz,\n batch_limit) if batch_limit is not None else batch_sz\n if batch_sz == 0:\n return\n\n channel_groups = self.cfg.data.channel_display_groups\n\n nrows = batch_sz\n # one col for each group + 1 for labels + 1 for predictions\n ncols = len(channel_groups) + 1\n if z is not None:\n ncols += 1\n\n fig, axes = plt.subplots(\n nrows=nrows,\n ncols=ncols,\n squeeze=False,\n constrained_layout=True,\n figsize=(3 * ncols, 3 * nrows))\n\n assert axes.shape == (nrows, ncols)\n\n # (N, c, h, w) --> (N, h, w, c)\n x = x.permute(0, 2, 3, 1)\n\n # apply transform, if given\n if self.cfg.data.plot_options.transform is not None:\n tf = A.from_dict(self.cfg.data.plot_options.transform)\n imgs = [tf(image=img)['image'] for img in x.numpy()]\n x = torch.from_numpy(np.stack(imgs))\n\n for i in range(batch_sz):\n ax = (fig, axes[i])\n if z is None:\n self.plot_xyz(ax, x[i], y[i])\n else:\n self.plot_xyz(ax, x[i], y[i], z=z[i])\n\n make_dir(output_path, use_dirname=True)\n plt.savefig(output_path, bbox_inches='tight')\n plt.close()\n\n def plot_xyz(self,\n ax: Iterable,\n x: torch.Tensor,\n y: Union[torch.Tensor, np.ndarray],\n z: Optional[torch.Tensor] = None) -> None:\n\n channel_groups = self.cfg.data.channel_display_groups\n\n # make subplot titles\n if not isinstance(channel_groups, dict):\n channel_groups = {\n f'Channels: {[*chs]}': chs\n for chs in channel_groups\n }\n\n fig, ax = ax\n img_axes = ax[:len(channel_groups)]\n label_ax = ax[len(channel_groups)]\n\n # plot input image(s)\n for (title, chs), ch_ax in zip(channel_groups.items(), img_axes):\n im = x[..., chs]\n if len(chs) == 1:\n # repeat single channel 3 times\n im = im.expand(-1, -1, 3)\n elif len(chs) == 2:\n # add a 3rd channel with all pixels set to 0.5\n h, w, _ = x.shape\n third_channel = torch.full((h, w, 1), fill_value=.5)\n im = torch.cat((im, third_channel), dim=-1)\n elif len(chs) > 3:\n # only use the first 3 channels\n log.warn(f'Only plotting first 3 channels of channel-group '\n f'{title}: {chs}.')\n im = x[..., chs[:3]]\n ch_ax.imshow(im)\n ch_ax.set_title(title)\n ch_ax.set_xticks([])\n ch_ax.set_yticks([])\n\n class_colors = self.cfg.data.class_colors\n colors = [color_to_triple(c) for c in class_colors]\n colors = np.array(colors) / 255.\n cmap = matplotlib.colors.ListedColormap(colors)\n\n # plot labels\n label_ax.imshow(\n y, vmin=0, vmax=len(colors), cmap=cmap, interpolation='none')\n label_ax.set_title(f'Ground truth labels')\n label_ax.set_xticks([])\n label_ax.set_yticks([])\n\n # plot predictions\n if z is not None:\n pred_ax = ax[-1]\n pred_ax.imshow(\n z, vmin=0, vmax=len(colors), cmap=cmap, interpolation='none')\n pred_ax.set_title(f'Predicted labels')\n pred_ax.set_xticks([])\n pred_ax.set_yticks([])\n\n # add a legend to the rightmost subplot\n class_names = self.cfg.data.class_names\n legend_items = [\n mpatches.Patch(facecolor=col, edgecolor='black', label=name)\n for col, name in zip(colors, class_names)\n ]\n ax[-1].legend(\n handles=legend_items,\n loc='center right',\n bbox_to_anchor=(1.8, 0.5))\n", "path": "rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py"}], "after_files": [{"content": "import warnings\nwarnings.filterwarnings('ignore') # noqa\n\nfrom typing import Union, Iterable, Optional\n\nimport logging\n\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\nmatplotlib.use('Agg') # noqa\nimport albumentations as A\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision import models\n\nfrom rastervision.pipeline.config import ConfigError\nfrom rastervision.pytorch_learner.learner import Learner\nfrom rastervision.pytorch_learner.utils import (\n compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor,\n Parallel, AddTensors)\nfrom rastervision.pipeline.file_system import make_dir\n\nlog = logging.getLogger(__name__)\n\n\nclass SemanticSegmentationLearner(Learner):\n def build_model(self) -> nn.Module:\n # TODO support FCN option\n pretrained = self.cfg.model.pretrained\n out_classes = len(self.cfg.data.class_names)\n if self.cfg.solver.ignore_last_class:\n out_classes -= 1\n model = models.segmentation.segmentation._segm_model(\n 'deeplabv3',\n self.cfg.model.get_backbone_str(),\n out_classes,\n False,\n pretrained_backbone=pretrained)\n\n input_channels = self.cfg.data.img_channels\n old_conv = model.backbone.conv1\n\n if input_channels == old_conv.in_channels:\n return model\n\n # these parameters will be the same for the new conv layer\n old_conv_args = {\n 'out_channels': old_conv.out_channels,\n 'kernel_size': old_conv.kernel_size,\n 'stride': old_conv.stride,\n 'padding': old_conv.padding,\n 'dilation': old_conv.dilation,\n 'groups': old_conv.groups,\n 'bias': old_conv.bias\n }\n\n if not pretrained:\n # simply replace the first conv layer with one with the\n # correct number of input channels\n new_conv = nn.Conv2d(in_channels=input_channels, **old_conv_args)\n model.backbone.conv1 = new_conv\n return model\n\n if input_channels > old_conv.in_channels:\n # insert a new conv layer parallel to the existing one\n # and sum their outputs\n new_conv_channels = input_channels - old_conv.in_channels\n new_conv = nn.Conv2d(\n in_channels=new_conv_channels, **old_conv_args)\n model.backbone.conv1 = nn.Sequential(\n # split input along channel dim\n SplitTensor((old_conv.in_channels, new_conv_channels), dim=1),\n # each split goes to its respective conv layer\n Parallel(old_conv, new_conv),\n # sum the parallel outputs\n AddTensors())\n elif input_channels < old_conv.in_channels:\n model.backbone.conv1 = nn.Conv2d(\n in_channels=input_channels, **old_conv_args)\n model.backbone.conv1.weight.data[:, :input_channels] = \\\n old_conv.weight.data[:, :input_channels]\n else:\n raise ConfigError(f'Something went wrong')\n\n return model\n\n def build_loss(self):\n args = {}\n\n loss_weights = self.cfg.solver.class_loss_weights\n if loss_weights is not None:\n loss_weights = torch.tensor(loss_weights, device=self.device)\n args.update({'weight': loss_weights})\n\n if self.cfg.solver.ignore_last_class:\n num_classes = len(self.cfg.data.class_names)\n args.update({'ignore_index': num_classes - 1})\n\n loss = nn.CrossEntropyLoss(**args)\n\n return loss\n\n def train_step(self, batch, batch_ind):\n x, y = batch\n out = self.post_forward(self.model(x))\n return {'train_loss': self.loss(out, y)}\n\n def validate_step(self, batch, batch_ind):\n x, y = batch\n out = self.post_forward(self.model(x))\n val_loss = self.loss(out, y)\n\n num_labels = len(self.cfg.data.class_names)\n y = y.view(-1)\n out = self.prob_to_pred(out).view(-1)\n conf_mat = compute_conf_mat(out, y, num_labels)\n\n return {'val_loss': val_loss, 'conf_mat': conf_mat}\n\n def validate_end(self, outputs, num_samples):\n conf_mat = sum([o['conf_mat'] for o in outputs])\n val_loss = torch.stack([o['val_loss']\n for o in outputs]).sum() / num_samples\n conf_mat_metrics = compute_conf_mat_metrics(conf_mat,\n self.cfg.data.class_names)\n\n metrics = {'val_loss': val_loss.item()}\n metrics.update(conf_mat_metrics)\n\n return metrics\n\n def post_forward(self, x):\n if isinstance(x, dict):\n return x['out']\n return x\n\n def predict(self, x: torch.Tensor, raw_out: bool = False) -> torch.Tensor:\n x = self.to_batch(x).float()\n x = self.to_device(x, self.device)\n with torch.no_grad():\n out = self.model(x)\n out = self.post_forward(out)\n out = out.softmax(dim=1)\n if not raw_out:\n out = self.prob_to_pred(out)\n out = self.to_device(out, 'cpu')\n return out\n\n def numpy_predict(self, x: np.ndarray,\n raw_out: bool = False) -> np.ndarray:\n _, h, w, _ = x.shape\n transform, _ = self.get_data_transforms()\n x = self.normalize_input(x)\n x = self.to_batch(x)\n x = np.stack([transform(image=img)['image'] for img in x])\n x = torch.from_numpy(x)\n x = x.permute((0, 3, 1, 2))\n out = self.predict(x, raw_out=True)\n out = F.interpolate(\n out, size=(h, w), mode='bilinear', align_corners=False)\n out = self.prob_to_pred(out)\n return self.output_to_numpy(out)\n\n def prob_to_pred(self, x):\n return x.argmax(1)\n\n def plot_batch(self,\n x: torch.Tensor,\n y: Union[torch.Tensor, np.ndarray],\n output_path: str,\n z: Optional[torch.Tensor] = None,\n batch_limit: Optional[int] = None) -> None:\n \"\"\"Plot a whole batch in a grid using plot_xyz.\n\n Args:\n x: batch of images\n y: ground truth labels\n output_path: local path where to save plot image\n z: optional predicted labels\n batch_limit: optional limit on (rendered) batch size\n \"\"\"\n batch_sz, c, h, w = x.shape\n batch_sz = min(batch_sz,\n batch_limit) if batch_limit is not None else batch_sz\n if batch_sz == 0:\n return\n\n channel_groups = self.cfg.data.channel_display_groups\n\n nrows = batch_sz\n # one col for each group + 1 for labels + 1 for predictions\n ncols = len(channel_groups) + 1\n if z is not None:\n ncols += 1\n\n fig, axes = plt.subplots(\n nrows=nrows,\n ncols=ncols,\n squeeze=False,\n constrained_layout=True,\n figsize=(3 * ncols, 3 * nrows))\n\n assert axes.shape == (nrows, ncols)\n\n # (N, c, h, w) --> (N, h, w, c)\n x = x.permute(0, 2, 3, 1)\n\n # apply transform, if given\n if self.cfg.data.plot_options.transform is not None:\n tf = A.from_dict(self.cfg.data.plot_options.transform)\n imgs = [tf(image=img)['image'] for img in x.numpy()]\n x = torch.from_numpy(np.stack(imgs))\n\n for i in range(batch_sz):\n ax = (fig, axes[i])\n if z is None:\n self.plot_xyz(ax, x[i], y[i])\n else:\n self.plot_xyz(ax, x[i], y[i], z=z[i])\n\n make_dir(output_path, use_dirname=True)\n plt.savefig(output_path, bbox_inches='tight')\n plt.close()\n\n def plot_xyz(self,\n ax: Iterable,\n x: torch.Tensor,\n y: Union[torch.Tensor, np.ndarray],\n z: Optional[torch.Tensor] = None) -> None:\n\n channel_groups = self.cfg.data.channel_display_groups\n\n # make subplot titles\n if not isinstance(channel_groups, dict):\n channel_groups = {\n f'Channels: {[*chs]}': chs\n for chs in channel_groups\n }\n\n fig, ax = ax\n img_axes = ax[:len(channel_groups)]\n label_ax = ax[len(channel_groups)]\n\n # plot input image(s)\n for (title, chs), ch_ax in zip(channel_groups.items(), img_axes):\n im = x[..., chs]\n if len(chs) == 1:\n # repeat single channel 3 times\n im = im.expand(-1, -1, 3)\n elif len(chs) == 2:\n # add a 3rd channel with all pixels set to 0.5\n h, w, _ = x.shape\n third_channel = torch.full((h, w, 1), fill_value=.5)\n im = torch.cat((im, third_channel), dim=-1)\n elif len(chs) > 3:\n # only use the first 3 channels\n log.warn(f'Only plotting first 3 channels of channel-group '\n f'{title}: {chs}.')\n im = x[..., chs[:3]]\n ch_ax.imshow(im)\n ch_ax.set_title(title)\n ch_ax.set_xticks([])\n ch_ax.set_yticks([])\n\n class_colors = self.cfg.data.class_colors\n colors = [color_to_triple(c) for c in class_colors]\n colors = np.array(colors) / 255.\n cmap = matplotlib.colors.ListedColormap(colors)\n\n # plot labels\n label_ax.imshow(\n y, vmin=0, vmax=len(colors), cmap=cmap, interpolation='none')\n label_ax.set_title(f'Ground truth labels')\n label_ax.set_xticks([])\n label_ax.set_yticks([])\n\n # plot predictions\n if z is not None:\n pred_ax = ax[-1]\n pred_ax.imshow(\n z, vmin=0, vmax=len(colors), cmap=cmap, interpolation='none')\n pred_ax.set_title(f'Predicted labels')\n pred_ax.set_xticks([])\n pred_ax.set_yticks([])\n\n # add a legend to the rightmost subplot\n class_names = self.cfg.data.class_names\n legend_items = [\n mpatches.Patch(facecolor=col, edgecolor='black', label=name)\n for col, name in zip(colors, class_names)\n ]\n ax[-1].legend(\n handles=legend_items,\n loc='center right',\n bbox_to_anchor=(1.8, 0.5))\n", "path": "rastervision_pytorch_learner/rastervision/pytorch_learner/semantic_segmentation_learner.py"}]}
3,633
194
gh_patches_debug_35024
rasdani/github-patches
git_diff
internetarchive__openlibrary-4013
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sentry should include deployment SHA Sentry has options for specifying the SHA of the current code, so you can see when an error was introduced. We currently don't take advantage of this. ### Describe the problem that you'd like solved <!-- A clear and concise description of what you want to happen. --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> <!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? --> ### Additional context <!-- Add any other context or screenshots about the feature request here. --> ### Stakeholders @cdrini --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openlibrary/plugins/openlibrary/sentry.py` Content: ``` 1 import logging 2 3 import sentry_sdk 4 5 import infogami 6 from infogami.utils import delegate 7 8 logger = logging.getLogger("openlibrary.sentry") 9 10 11 def is_enabled(): 12 return hasattr(infogami.config, 'sentry') and infogami.config.sentry.enabled 13 14 15 def setup(): 16 logger.info("Setting up sentry (enabled={})".format(is_enabled())) 17 18 if not is_enabled(): 19 return 20 21 sentry_sdk.init(dsn=infogami.config.sentry.dsn, 22 environment=infogami.config.sentry.environment) 23 delegate.add_exception_hook(lambda: sentry_sdk.capture_exception()) 24 ``` Path: `openlibrary/plugins/openlibrary/status.py` Content: ``` 1 import web 2 3 import datetime 4 import socket 5 import subprocess 6 import sys 7 8 from infogami import config 9 from infogami.utils import delegate 10 from infogami.utils.view import render_template, public 11 from openlibrary.core import stats 12 13 status_info = {} 14 feature_flags = {} 15 16 class status(delegate.page): 17 def GET(self): 18 template = render_template("status", status_info, feature_flags) 19 template.v2 = True 20 return template 21 22 @public 23 def get_git_revision_short_hash(): 24 return (status_info.get('Software version') 25 if status_info and isinstance(status_info, dict) 26 else None) 27 28 def get_software_version(): 29 return subprocess.Popen("git rev-parse --short HEAD --".split(), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).stdout.read().strip() 30 31 def get_features_enabled(): 32 return config.features 33 34 def setup(): 35 "Basic startup status for the server" 36 global status_info, feature_flags 37 version = get_software_version() 38 if bytes != str: # Python 3 39 version = version.decode("utf-8") 40 host = socket.gethostname() 41 status_info = { 42 "Software version": version, 43 "Python version": sys.version.split()[0], 44 "Host": host, 45 "Start time": datetime.datetime.utcnow(), 46 } 47 feature_flags = get_features_enabled() 48 49 # Host is e.g. ol-web4.blah.archive.org ; we just want the first subdomain 50 first_subdomain = host.split('.')[0] or 'unknown' 51 stats.increment('ol.servers.%s.started' % first_subdomain) 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openlibrary/plugins/openlibrary/sentry.py b/openlibrary/plugins/openlibrary/sentry.py --- a/openlibrary/plugins/openlibrary/sentry.py +++ b/openlibrary/plugins/openlibrary/sentry.py @@ -5,6 +5,8 @@ import infogami from infogami.utils import delegate +from openlibrary.plugins.openlibrary.status import get_software_version + logger = logging.getLogger("openlibrary.sentry") @@ -19,5 +21,6 @@ return sentry_sdk.init(dsn=infogami.config.sentry.dsn, - environment=infogami.config.sentry.environment) + environment=infogami.config.sentry.environment, + release=get_software_version()) delegate.add_exception_hook(lambda: sentry_sdk.capture_exception()) diff --git a/openlibrary/plugins/openlibrary/status.py b/openlibrary/plugins/openlibrary/status.py --- a/openlibrary/plugins/openlibrary/status.py +++ b/openlibrary/plugins/openlibrary/status.py @@ -2,8 +2,8 @@ import datetime import socket -import subprocess import sys +from subprocess import PIPE, Popen, STDOUT from infogami import config from infogami.utils import delegate @@ -25,8 +25,10 @@ if status_info and isinstance(status_info, dict) else None) -def get_software_version(): - return subprocess.Popen("git rev-parse --short HEAD --".split(), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).stdout.read().strip() + +def get_software_version(): # -> str: + cmd = "git rev-parse --short HEAD --".split() + return str(Popen(cmd, stdout=PIPE, stderr=STDOUT).stdout.read().decode().strip()) def get_features_enabled(): return config.features @@ -34,12 +36,9 @@ def setup(): "Basic startup status for the server" global status_info, feature_flags - version = get_software_version() - if bytes != str: # Python 3 - version = version.decode("utf-8") host = socket.gethostname() status_info = { - "Software version": version, + "Software version": get_software_version(), "Python version": sys.version.split()[0], "Host": host, "Start time": datetime.datetime.utcnow(),
{"golden_diff": "diff --git a/openlibrary/plugins/openlibrary/sentry.py b/openlibrary/plugins/openlibrary/sentry.py\n--- a/openlibrary/plugins/openlibrary/sentry.py\n+++ b/openlibrary/plugins/openlibrary/sentry.py\n@@ -5,6 +5,8 @@\n import infogami\n from infogami.utils import delegate\n \n+from openlibrary.plugins.openlibrary.status import get_software_version\n+\n logger = logging.getLogger(\"openlibrary.sentry\")\n \n \n@@ -19,5 +21,6 @@\n return\n \n sentry_sdk.init(dsn=infogami.config.sentry.dsn,\n- environment=infogami.config.sentry.environment)\n+ environment=infogami.config.sentry.environment,\n+ release=get_software_version())\n delegate.add_exception_hook(lambda: sentry_sdk.capture_exception())\ndiff --git a/openlibrary/plugins/openlibrary/status.py b/openlibrary/plugins/openlibrary/status.py\n--- a/openlibrary/plugins/openlibrary/status.py\n+++ b/openlibrary/plugins/openlibrary/status.py\n@@ -2,8 +2,8 @@\n \n import datetime\n import socket\n-import subprocess\n import sys\n+from subprocess import PIPE, Popen, STDOUT\n \n from infogami import config\n from infogami.utils import delegate\n@@ -25,8 +25,10 @@\n if status_info and isinstance(status_info, dict) \n else None)\n \n-def get_software_version():\n- return subprocess.Popen(\"git rev-parse --short HEAD --\".split(), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).stdout.read().strip()\n+\n+def get_software_version(): # -> str:\n+ cmd = \"git rev-parse --short HEAD --\".split()\n+ return str(Popen(cmd, stdout=PIPE, stderr=STDOUT).stdout.read().decode().strip())\n \n def get_features_enabled():\n return config.features\n@@ -34,12 +36,9 @@\n def setup():\n \"Basic startup status for the server\"\n global status_info, feature_flags\n- version = get_software_version()\n- if bytes != str: # Python 3\n- version = version.decode(\"utf-8\")\n host = socket.gethostname()\n status_info = {\n- \"Software version\": version,\n+ \"Software version\": get_software_version(),\n \"Python version\": sys.version.split()[0],\n \"Host\": host,\n \"Start time\": datetime.datetime.utcnow(),\n", "issue": "Sentry should include deployment SHA\nSentry has options for specifying the SHA of the current code, so you can see when an error was introduced. We currently don't take advantage of this.\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n@cdrini \r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nimport sentry_sdk\n\nimport infogami\nfrom infogami.utils import delegate\n\nlogger = logging.getLogger(\"openlibrary.sentry\")\n\n\ndef is_enabled():\n return hasattr(infogami.config, 'sentry') and infogami.config.sentry.enabled\n\n\ndef setup():\n logger.info(\"Setting up sentry (enabled={})\".format(is_enabled()))\n\n if not is_enabled():\n return\n\n sentry_sdk.init(dsn=infogami.config.sentry.dsn,\n environment=infogami.config.sentry.environment)\n delegate.add_exception_hook(lambda: sentry_sdk.capture_exception())\n", "path": "openlibrary/plugins/openlibrary/sentry.py"}, {"content": "import web\n\nimport datetime\nimport socket\nimport subprocess\nimport sys\n\nfrom infogami import config\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template, public\nfrom openlibrary.core import stats\n\nstatus_info = {}\nfeature_flags = {}\n\nclass status(delegate.page):\n def GET(self):\n template = render_template(\"status\", status_info, feature_flags)\n template.v2 = True\n return template\n\n@public\ndef get_git_revision_short_hash():\n return (status_info.get('Software version')\n if status_info and isinstance(status_info, dict) \n else None)\n\ndef get_software_version():\n return subprocess.Popen(\"git rev-parse --short HEAD --\".split(), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).stdout.read().strip()\n\ndef get_features_enabled():\n return config.features\n\ndef setup():\n \"Basic startup status for the server\"\n global status_info, feature_flags\n version = get_software_version()\n if bytes != str: # Python 3\n version = version.decode(\"utf-8\")\n host = socket.gethostname()\n status_info = {\n \"Software version\": version,\n \"Python version\": sys.version.split()[0],\n \"Host\": host,\n \"Start time\": datetime.datetime.utcnow(),\n }\n feature_flags = get_features_enabled()\n\n # Host is e.g. ol-web4.blah.archive.org ; we just want the first subdomain\n first_subdomain = host.split('.')[0] or 'unknown'\n stats.increment('ol.servers.%s.started' % first_subdomain)\n", "path": "openlibrary/plugins/openlibrary/status.py"}], "after_files": [{"content": "import logging\n\nimport sentry_sdk\n\nimport infogami\nfrom infogami.utils import delegate\n\nfrom openlibrary.plugins.openlibrary.status import get_software_version\n\nlogger = logging.getLogger(\"openlibrary.sentry\")\n\n\ndef is_enabled():\n return hasattr(infogami.config, 'sentry') and infogami.config.sentry.enabled\n\n\ndef setup():\n logger.info(\"Setting up sentry (enabled={})\".format(is_enabled()))\n\n if not is_enabled():\n return\n\n sentry_sdk.init(dsn=infogami.config.sentry.dsn,\n environment=infogami.config.sentry.environment,\n release=get_software_version())\n delegate.add_exception_hook(lambda: sentry_sdk.capture_exception())\n", "path": "openlibrary/plugins/openlibrary/sentry.py"}, {"content": "import web\n\nimport datetime\nimport socket\nimport sys\nfrom subprocess import PIPE, Popen, STDOUT\n\nfrom infogami import config\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template, public\nfrom openlibrary.core import stats\n\nstatus_info = {}\nfeature_flags = {}\n\nclass status(delegate.page):\n def GET(self):\n template = render_template(\"status\", status_info, feature_flags)\n template.v2 = True\n return template\n\n@public\ndef get_git_revision_short_hash():\n return (status_info.get('Software version')\n if status_info and isinstance(status_info, dict) \n else None)\n\n\ndef get_software_version(): # -> str:\n cmd = \"git rev-parse --short HEAD --\".split()\n return str(Popen(cmd, stdout=PIPE, stderr=STDOUT).stdout.read().decode().strip())\n\ndef get_features_enabled():\n return config.features\n\ndef setup():\n \"Basic startup status for the server\"\n global status_info, feature_flags\n host = socket.gethostname()\n status_info = {\n \"Software version\": get_software_version(),\n \"Python version\": sys.version.split()[0],\n \"Host\": host,\n \"Start time\": datetime.datetime.utcnow(),\n }\n feature_flags = get_features_enabled()\n\n # Host is e.g. ol-web4.blah.archive.org ; we just want the first subdomain\n first_subdomain = host.split('.')[0] or 'unknown'\n stats.increment('ol.servers.%s.started' % first_subdomain)\n", "path": "openlibrary/plugins/openlibrary/status.py"}]}
1,027
513
gh_patches_debug_23368
rasdani/github-patches
git_diff
pypa__pip-1723
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Multiple calls to `pip.main()` produce duplicate output For example: ``` >>> import pip >>> pip.main(['install']) You must give at least one requirement to install (see "pip help install") 0 >>> pip.main(['install']) You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") 0 >>> pip.main(['install']) You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") 0 >>> pip.main(['install']) You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") 0 >>> pip.main(['install']) You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") You must give at least one requirement to install (see "pip help install") 0 ``` Pip version: pip 1.6.dev1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/log.py` Content: ``` 1 """Logging 2 """ 3 4 import sys 5 import os 6 import logging 7 8 from pip._vendor import colorama, pkg_resources 9 10 11 def _color_wrap(*colors): 12 def wrapped(inp): 13 return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) 14 return wrapped 15 16 17 def should_color(consumer, environ, std=(sys.stdout, sys.stderr)): 18 real_consumer = ( 19 consumer if not isinstance(consumer, colorama.AnsiToWin32) 20 else consumer.wrapped 21 ) 22 23 # If consumer isn't stdout or stderr we shouldn't colorize it 24 if real_consumer not in std: 25 return False 26 27 # If consumer is a tty we should color it 28 if hasattr(real_consumer, "isatty") and real_consumer.isatty(): 29 return True 30 31 # If we have an ASNI term we should color it 32 if environ.get("TERM") == "ANSI": 33 return True 34 35 # If anything else we should not color it 36 return False 37 38 39 def should_warn(current_version, removal_version): 40 # Our Significant digits on versions is 2, so remove everything but the 41 # first two places. 42 current_version = ".".join(current_version.split(".")[:2]) 43 removal_version = ".".join(removal_version.split(".")[:2]) 44 45 # Our warning threshold is one minor version before removal, so we 46 # decrement the minor version by one 47 major, minor = removal_version.split(".") 48 minor = str(int(minor) - 1) 49 warn_version = ".".join([major, minor]) 50 51 # Test if our current_version should be a warn 52 return (pkg_resources.parse_version(current_version) 53 < pkg_resources.parse_version(warn_version)) 54 55 56 class Logger(object): 57 """ 58 Logging object for use in command-line script. Allows ranges of 59 levels, to avoid some redundancy of displayed information. 60 """ 61 VERBOSE_DEBUG = logging.DEBUG - 1 62 DEBUG = logging.DEBUG 63 INFO = logging.INFO 64 NOTIFY = (logging.INFO + logging.WARN) / 2 65 WARN = WARNING = logging.WARN 66 ERROR = logging.ERROR 67 FATAL = logging.FATAL 68 69 LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL] 70 71 COLORS = { 72 WARN: _color_wrap(colorama.Fore.YELLOW), 73 ERROR: _color_wrap(colorama.Fore.RED), 74 FATAL: _color_wrap(colorama.Fore.RED), 75 } 76 77 def __init__(self): 78 self.consumers = [] 79 self.indent = 0 80 self.explicit_levels = False 81 self.in_progress = None 82 self.in_progress_hanging = False 83 84 def add_consumers(self, *consumers): 85 if sys.platform.startswith("win"): 86 for level, consumer in consumers: 87 if hasattr(consumer, "write"): 88 self.consumers.append( 89 (level, colorama.AnsiToWin32(consumer)), 90 ) 91 else: 92 self.consumers.append((level, consumer)) 93 else: 94 self.consumers.extend(consumers) 95 96 def debug(self, msg, *args, **kw): 97 self.log(self.DEBUG, msg, *args, **kw) 98 99 def info(self, msg, *args, **kw): 100 self.log(self.INFO, msg, *args, **kw) 101 102 def notify(self, msg, *args, **kw): 103 self.log(self.NOTIFY, msg, *args, **kw) 104 105 def warn(self, msg, *args, **kw): 106 self.log(self.WARN, msg, *args, **kw) 107 108 def error(self, msg, *args, **kw): 109 self.log(self.ERROR, msg, *args, **kw) 110 111 def fatal(self, msg, *args, **kw): 112 self.log(self.FATAL, msg, *args, **kw) 113 114 def deprecated(self, removal_version, msg, *args, **kwargs): 115 """ 116 Logs deprecation message which is log level WARN if the 117 ``removal_version`` is > 1 minor release away and log level ERROR 118 otherwise. 119 120 removal_version should be the version that the deprecated feature is 121 expected to be removed in, so something that will not exist in 122 version 1.7, but will in 1.6 would have a removal_version of 1.7. 123 """ 124 from pip import __version__ 125 126 if should_warn(__version__, removal_version): 127 self.warn(msg, *args, **kwargs) 128 else: 129 self.error(msg, *args, **kwargs) 130 131 def log(self, level, msg, *args, **kw): 132 if args: 133 if kw: 134 raise TypeError( 135 "You may give positional or keyword arguments, not both") 136 args = args or kw 137 138 # render 139 if args: 140 rendered = msg % args 141 else: 142 rendered = msg 143 rendered = ' ' * self.indent + rendered 144 if self.explicit_levels: 145 # FIXME: should this be a name, not a level number? 146 rendered = '%02i %s' % (level, rendered) 147 148 for consumer_level, consumer in self.consumers: 149 if self.level_matches(level, consumer_level): 150 if (self.in_progress_hanging 151 and consumer in (sys.stdout, sys.stderr)): 152 self.in_progress_hanging = False 153 sys.stdout.write('\n') 154 sys.stdout.flush() 155 if hasattr(consumer, 'write'): 156 write_content = rendered + '\n' 157 if should_color(consumer, os.environ): 158 # We are printing to stdout or stderr and it supports 159 # colors so render our text colored 160 colorizer = self.COLORS.get(level, lambda x: x) 161 write_content = colorizer(write_content) 162 163 consumer.write(write_content) 164 if hasattr(consumer, 'flush'): 165 consumer.flush() 166 else: 167 consumer(rendered) 168 169 def _show_progress(self): 170 """Should we display download progress?""" 171 return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty()) 172 173 def start_progress(self, msg): 174 assert not self.in_progress, ( 175 "Tried to start_progress(%r) while in_progress %r" 176 % (msg, self.in_progress)) 177 if self._show_progress(): 178 sys.stdout.write(' ' * self.indent + msg) 179 sys.stdout.flush() 180 self.in_progress_hanging = True 181 else: 182 self.in_progress_hanging = False 183 self.in_progress = msg 184 self.last_message = None 185 186 def end_progress(self, msg='done.'): 187 assert self.in_progress, ( 188 "Tried to end_progress without start_progress") 189 if self._show_progress(): 190 if not self.in_progress_hanging: 191 # Some message has been printed out since start_progress 192 sys.stdout.write('...' + self.in_progress + msg + '\n') 193 sys.stdout.flush() 194 else: 195 # These erase any messages shown with show_progress 196 # (besides .'s) 197 logger.show_progress('') 198 logger.show_progress('') 199 sys.stdout.write(msg + '\n') 200 sys.stdout.flush() 201 self.in_progress = None 202 self.in_progress_hanging = False 203 204 def show_progress(self, message=None): 205 """If we are in a progress scope, and no log messages have been 206 shown, write out another '.'""" 207 if self.in_progress_hanging: 208 if message is None: 209 sys.stdout.write('.') 210 sys.stdout.flush() 211 else: 212 if self.last_message: 213 padding = ' ' * max( 214 0, 215 len(self.last_message) - len(message) 216 ) 217 else: 218 padding = '' 219 sys.stdout.write( 220 '\r%s%s%s%s' % 221 (' ' * self.indent, self.in_progress, message, padding) 222 ) 223 sys.stdout.flush() 224 self.last_message = message 225 226 def stdout_level_matches(self, level): 227 """Returns true if a message at this level will go to stdout""" 228 return self.level_matches(level, self._stdout_level()) 229 230 def _stdout_level(self): 231 """Returns the level that stdout runs at""" 232 for level, consumer in self.consumers: 233 if consumer is sys.stdout: 234 return level 235 return self.FATAL 236 237 def level_matches(self, level, consumer_level): 238 """ 239 >>> l = Logger() 240 >>> l.level_matches(3, 4) 241 False 242 >>> l.level_matches(3, 2) 243 True 244 >>> l.level_matches(slice(None, 3), 3) 245 False 246 >>> l.level_matches(slice(None, 3), 2) 247 True 248 >>> l.level_matches(slice(1, 3), 1) 249 True 250 >>> l.level_matches(slice(2, 3), 1) 251 False 252 """ 253 if isinstance(level, slice): 254 start, stop = level.start, level.stop 255 if start is not None and start > consumer_level: 256 return False 257 if stop is not None or stop <= consumer_level: 258 return False 259 return True 260 else: 261 return level >= consumer_level 262 263 @classmethod 264 def level_for_integer(cls, level): 265 levels = cls.LEVELS 266 if level < 0: 267 return levels[0] 268 if level >= len(levels): 269 return levels[-1] 270 return levels[level] 271 272 def move_stdout_to_stderr(self): 273 to_remove = [] 274 to_add = [] 275 for consumer_level, consumer in self.consumers: 276 if consumer == sys.stdout: 277 to_remove.append((consumer_level, consumer)) 278 to_add.append((consumer_level, sys.stderr)) 279 for item in to_remove: 280 self.consumers.remove(item) 281 self.consumers.extend(to_add) 282 283 logger = Logger() 284 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/log.py b/pip/log.py --- a/pip/log.py +++ b/pip/log.py @@ -82,16 +82,23 @@ self.in_progress_hanging = False def add_consumers(self, *consumers): - if sys.platform.startswith("win"): - for level, consumer in consumers: - if hasattr(consumer, "write"): - self.consumers.append( - (level, colorama.AnsiToWin32(consumer)), - ) - else: - self.consumers.append((level, consumer)) - else: - self.consumers.extend(consumers) + for level, consumer in consumers: + # Try to check for duplicate consumers before adding them + for chk_level, chk_consumer in self.consumers: + # Account for coloroma wrapped streams + if isinstance(chk_consumer, colorama.AnsiToWin32): + chk_consumer = chk_consumer.wrapped + + if (level, consumer) == (chk_level, chk_consumer): + break + # If we didn't find a duplicate, then add it + else: + # Colorize consumer for Windows + if sys.platform.startswith('win') \ + and hasattr(consumer, 'write'): + consumer = colorama.AnsiToWin32(consumer) + + self.consumers.append((level, consumer)) def debug(self, msg, *args, **kw): self.log(self.DEBUG, msg, *args, **kw)
{"golden_diff": "diff --git a/pip/log.py b/pip/log.py\n--- a/pip/log.py\n+++ b/pip/log.py\n@@ -82,16 +82,23 @@\n self.in_progress_hanging = False\n \n def add_consumers(self, *consumers):\n- if sys.platform.startswith(\"win\"):\n- for level, consumer in consumers:\n- if hasattr(consumer, \"write\"):\n- self.consumers.append(\n- (level, colorama.AnsiToWin32(consumer)),\n- )\n- else:\n- self.consumers.append((level, consumer))\n- else:\n- self.consumers.extend(consumers)\n+ for level, consumer in consumers:\n+ # Try to check for duplicate consumers before adding them\n+ for chk_level, chk_consumer in self.consumers:\n+ # Account for coloroma wrapped streams\n+ if isinstance(chk_consumer, colorama.AnsiToWin32):\n+ chk_consumer = chk_consumer.wrapped\n+\n+ if (level, consumer) == (chk_level, chk_consumer):\n+ break\n+ # If we didn't find a duplicate, then add it\n+ else:\n+ # Colorize consumer for Windows\n+ if sys.platform.startswith('win') \\\n+ and hasattr(consumer, 'write'):\n+ consumer = colorama.AnsiToWin32(consumer)\n+\n+ self.consumers.append((level, consumer))\n \n def debug(self, msg, *args, **kw):\n self.log(self.DEBUG, msg, *args, **kw)\n", "issue": "Multiple calls to `pip.main()` produce duplicate output\nFor example:\n\n```\n>>> import pip\n>>> pip.main(['install'])\nYou must give at least one requirement to install (see \"pip help install\")\n0\n>>> pip.main(['install'])\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\n0\n>>> pip.main(['install'])\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\n0\n>>> pip.main(['install'])\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\n0\n>>> pip.main(['install'])\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\nYou must give at least one requirement to install (see \"pip help install\")\n0\n```\n\nPip version: pip 1.6.dev1\n\n", "before_files": [{"content": "\"\"\"Logging\n\"\"\"\n\nimport sys\nimport os\nimport logging\n\nfrom pip._vendor import colorama, pkg_resources\n\n\ndef _color_wrap(*colors):\n def wrapped(inp):\n return \"\".join(list(colors) + [inp, colorama.Style.RESET_ALL])\n return wrapped\n\n\ndef should_color(consumer, environ, std=(sys.stdout, sys.stderr)):\n real_consumer = (\n consumer if not isinstance(consumer, colorama.AnsiToWin32)\n else consumer.wrapped\n )\n\n # If consumer isn't stdout or stderr we shouldn't colorize it\n if real_consumer not in std:\n return False\n\n # If consumer is a tty we should color it\n if hasattr(real_consumer, \"isatty\") and real_consumer.isatty():\n return True\n\n # If we have an ASNI term we should color it\n if environ.get(\"TERM\") == \"ANSI\":\n return True\n\n # If anything else we should not color it\n return False\n\n\ndef should_warn(current_version, removal_version):\n # Our Significant digits on versions is 2, so remove everything but the\n # first two places.\n current_version = \".\".join(current_version.split(\".\")[:2])\n removal_version = \".\".join(removal_version.split(\".\")[:2])\n\n # Our warning threshold is one minor version before removal, so we\n # decrement the minor version by one\n major, minor = removal_version.split(\".\")\n minor = str(int(minor) - 1)\n warn_version = \".\".join([major, minor])\n\n # Test if our current_version should be a warn\n return (pkg_resources.parse_version(current_version)\n < pkg_resources.parse_version(warn_version))\n\n\nclass Logger(object):\n \"\"\"\n Logging object for use in command-line script. Allows ranges of\n levels, to avoid some redundancy of displayed information.\n \"\"\"\n VERBOSE_DEBUG = logging.DEBUG - 1\n DEBUG = logging.DEBUG\n INFO = logging.INFO\n NOTIFY = (logging.INFO + logging.WARN) / 2\n WARN = WARNING = logging.WARN\n ERROR = logging.ERROR\n FATAL = logging.FATAL\n\n LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]\n\n COLORS = {\n WARN: _color_wrap(colorama.Fore.YELLOW),\n ERROR: _color_wrap(colorama.Fore.RED),\n FATAL: _color_wrap(colorama.Fore.RED),\n }\n\n def __init__(self):\n self.consumers = []\n self.indent = 0\n self.explicit_levels = False\n self.in_progress = None\n self.in_progress_hanging = False\n\n def add_consumers(self, *consumers):\n if sys.platform.startswith(\"win\"):\n for level, consumer in consumers:\n if hasattr(consumer, \"write\"):\n self.consumers.append(\n (level, colorama.AnsiToWin32(consumer)),\n )\n else:\n self.consumers.append((level, consumer))\n else:\n self.consumers.extend(consumers)\n\n def debug(self, msg, *args, **kw):\n self.log(self.DEBUG, msg, *args, **kw)\n\n def info(self, msg, *args, **kw):\n self.log(self.INFO, msg, *args, **kw)\n\n def notify(self, msg, *args, **kw):\n self.log(self.NOTIFY, msg, *args, **kw)\n\n def warn(self, msg, *args, **kw):\n self.log(self.WARN, msg, *args, **kw)\n\n def error(self, msg, *args, **kw):\n self.log(self.ERROR, msg, *args, **kw)\n\n def fatal(self, msg, *args, **kw):\n self.log(self.FATAL, msg, *args, **kw)\n\n def deprecated(self, removal_version, msg, *args, **kwargs):\n \"\"\"\n Logs deprecation message which is log level WARN if the\n ``removal_version`` is > 1 minor release away and log level ERROR\n otherwise.\n\n removal_version should be the version that the deprecated feature is\n expected to be removed in, so something that will not exist in\n version 1.7, but will in 1.6 would have a removal_version of 1.7.\n \"\"\"\n from pip import __version__\n\n if should_warn(__version__, removal_version):\n self.warn(msg, *args, **kwargs)\n else:\n self.error(msg, *args, **kwargs)\n\n def log(self, level, msg, *args, **kw):\n if args:\n if kw:\n raise TypeError(\n \"You may give positional or keyword arguments, not both\")\n args = args or kw\n\n # render\n if args:\n rendered = msg % args\n else:\n rendered = msg\n rendered = ' ' * self.indent + rendered\n if self.explicit_levels:\n # FIXME: should this be a name, not a level number?\n rendered = '%02i %s' % (level, rendered)\n\n for consumer_level, consumer in self.consumers:\n if self.level_matches(level, consumer_level):\n if (self.in_progress_hanging\n and consumer in (sys.stdout, sys.stderr)):\n self.in_progress_hanging = False\n sys.stdout.write('\\n')\n sys.stdout.flush()\n if hasattr(consumer, 'write'):\n write_content = rendered + '\\n'\n if should_color(consumer, os.environ):\n # We are printing to stdout or stderr and it supports\n # colors so render our text colored\n colorizer = self.COLORS.get(level, lambda x: x)\n write_content = colorizer(write_content)\n\n consumer.write(write_content)\n if hasattr(consumer, 'flush'):\n consumer.flush()\n else:\n consumer(rendered)\n\n def _show_progress(self):\n \"\"\"Should we display download progress?\"\"\"\n return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())\n\n def start_progress(self, msg):\n assert not self.in_progress, (\n \"Tried to start_progress(%r) while in_progress %r\"\n % (msg, self.in_progress))\n if self._show_progress():\n sys.stdout.write(' ' * self.indent + msg)\n sys.stdout.flush()\n self.in_progress_hanging = True\n else:\n self.in_progress_hanging = False\n self.in_progress = msg\n self.last_message = None\n\n def end_progress(self, msg='done.'):\n assert self.in_progress, (\n \"Tried to end_progress without start_progress\")\n if self._show_progress():\n if not self.in_progress_hanging:\n # Some message has been printed out since start_progress\n sys.stdout.write('...' + self.in_progress + msg + '\\n')\n sys.stdout.flush()\n else:\n # These erase any messages shown with show_progress\n # (besides .'s)\n logger.show_progress('')\n logger.show_progress('')\n sys.stdout.write(msg + '\\n')\n sys.stdout.flush()\n self.in_progress = None\n self.in_progress_hanging = False\n\n def show_progress(self, message=None):\n \"\"\"If we are in a progress scope, and no log messages have been\n shown, write out another '.'\"\"\"\n if self.in_progress_hanging:\n if message is None:\n sys.stdout.write('.')\n sys.stdout.flush()\n else:\n if self.last_message:\n padding = ' ' * max(\n 0,\n len(self.last_message) - len(message)\n )\n else:\n padding = ''\n sys.stdout.write(\n '\\r%s%s%s%s' %\n (' ' * self.indent, self.in_progress, message, padding)\n )\n sys.stdout.flush()\n self.last_message = message\n\n def stdout_level_matches(self, level):\n \"\"\"Returns true if a message at this level will go to stdout\"\"\"\n return self.level_matches(level, self._stdout_level())\n\n def _stdout_level(self):\n \"\"\"Returns the level that stdout runs at\"\"\"\n for level, consumer in self.consumers:\n if consumer is sys.stdout:\n return level\n return self.FATAL\n\n def level_matches(self, level, consumer_level):\n \"\"\"\n >>> l = Logger()\n >>> l.level_matches(3, 4)\n False\n >>> l.level_matches(3, 2)\n True\n >>> l.level_matches(slice(None, 3), 3)\n False\n >>> l.level_matches(slice(None, 3), 2)\n True\n >>> l.level_matches(slice(1, 3), 1)\n True\n >>> l.level_matches(slice(2, 3), 1)\n False\n \"\"\"\n if isinstance(level, slice):\n start, stop = level.start, level.stop\n if start is not None and start > consumer_level:\n return False\n if stop is not None or stop <= consumer_level:\n return False\n return True\n else:\n return level >= consumer_level\n\n @classmethod\n def level_for_integer(cls, level):\n levels = cls.LEVELS\n if level < 0:\n return levels[0]\n if level >= len(levels):\n return levels[-1]\n return levels[level]\n\n def move_stdout_to_stderr(self):\n to_remove = []\n to_add = []\n for consumer_level, consumer in self.consumers:\n if consumer == sys.stdout:\n to_remove.append((consumer_level, consumer))\n to_add.append((consumer_level, sys.stderr))\n for item in to_remove:\n self.consumers.remove(item)\n self.consumers.extend(to_add)\n\nlogger = Logger()\n", "path": "pip/log.py"}], "after_files": [{"content": "\"\"\"Logging\n\"\"\"\n\nimport sys\nimport os\nimport logging\n\nfrom pip._vendor import colorama, pkg_resources\n\n\ndef _color_wrap(*colors):\n def wrapped(inp):\n return \"\".join(list(colors) + [inp, colorama.Style.RESET_ALL])\n return wrapped\n\n\ndef should_color(consumer, environ, std=(sys.stdout, sys.stderr)):\n real_consumer = (\n consumer if not isinstance(consumer, colorama.AnsiToWin32)\n else consumer.wrapped\n )\n\n # If consumer isn't stdout or stderr we shouldn't colorize it\n if real_consumer not in std:\n return False\n\n # If consumer is a tty we should color it\n if hasattr(real_consumer, \"isatty\") and real_consumer.isatty():\n return True\n\n # If we have an ASNI term we should color it\n if environ.get(\"TERM\") == \"ANSI\":\n return True\n\n # If anything else we should not color it\n return False\n\n\ndef should_warn(current_version, removal_version):\n # Our Significant digits on versions is 2, so remove everything but the\n # first two places.\n current_version = \".\".join(current_version.split(\".\")[:2])\n removal_version = \".\".join(removal_version.split(\".\")[:2])\n\n # Our warning threshold is one minor version before removal, so we\n # decrement the minor version by one\n major, minor = removal_version.split(\".\")\n minor = str(int(minor) - 1)\n warn_version = \".\".join([major, minor])\n\n # Test if our current_version should be a warn\n return (pkg_resources.parse_version(current_version)\n < pkg_resources.parse_version(warn_version))\n\n\nclass Logger(object):\n \"\"\"\n Logging object for use in command-line script. Allows ranges of\n levels, to avoid some redundancy of displayed information.\n \"\"\"\n VERBOSE_DEBUG = logging.DEBUG - 1\n DEBUG = logging.DEBUG\n INFO = logging.INFO\n NOTIFY = (logging.INFO + logging.WARN) / 2\n WARN = WARNING = logging.WARN\n ERROR = logging.ERROR\n FATAL = logging.FATAL\n\n LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]\n\n COLORS = {\n WARN: _color_wrap(colorama.Fore.YELLOW),\n ERROR: _color_wrap(colorama.Fore.RED),\n FATAL: _color_wrap(colorama.Fore.RED),\n }\n\n def __init__(self):\n self.consumers = []\n self.indent = 0\n self.explicit_levels = False\n self.in_progress = None\n self.in_progress_hanging = False\n\n def add_consumers(self, *consumers):\n for level, consumer in consumers:\n # Try to check for duplicate consumers before adding them\n for chk_level, chk_consumer in self.consumers:\n # Account for coloroma wrapped streams\n if isinstance(chk_consumer, colorama.AnsiToWin32):\n chk_consumer = chk_consumer.wrapped\n\n if (level, consumer) == (chk_level, chk_consumer):\n break\n # If we didn't find a duplicate, then add it\n else:\n # Colorize consumer for Windows\n if sys.platform.startswith('win') \\\n and hasattr(consumer, 'write'):\n consumer = colorama.AnsiToWin32(consumer)\n\n self.consumers.append((level, consumer))\n\n def debug(self, msg, *args, **kw):\n self.log(self.DEBUG, msg, *args, **kw)\n\n def info(self, msg, *args, **kw):\n self.log(self.INFO, msg, *args, **kw)\n\n def notify(self, msg, *args, **kw):\n self.log(self.NOTIFY, msg, *args, **kw)\n\n def warn(self, msg, *args, **kw):\n self.log(self.WARN, msg, *args, **kw)\n\n def error(self, msg, *args, **kw):\n self.log(self.ERROR, msg, *args, **kw)\n\n def fatal(self, msg, *args, **kw):\n self.log(self.FATAL, msg, *args, **kw)\n\n def deprecated(self, removal_version, msg, *args, **kwargs):\n \"\"\"\n Logs deprecation message which is log level WARN if the\n ``removal_version`` is > 1 minor release away and log level ERROR\n otherwise.\n\n removal_version should be the version that the deprecated feature is\n expected to be removed in, so something that will not exist in\n version 1.7, but will in 1.6 would have a removal_version of 1.7.\n \"\"\"\n from pip import __version__\n\n if should_warn(__version__, removal_version):\n self.warn(msg, *args, **kwargs)\n else:\n self.error(msg, *args, **kwargs)\n\n def log(self, level, msg, *args, **kw):\n if args:\n if kw:\n raise TypeError(\n \"You may give positional or keyword arguments, not both\")\n args = args or kw\n\n # render\n if args:\n rendered = msg % args\n else:\n rendered = msg\n rendered = ' ' * self.indent + rendered\n if self.explicit_levels:\n # FIXME: should this be a name, not a level number?\n rendered = '%02i %s' % (level, rendered)\n\n for consumer_level, consumer in self.consumers:\n if self.level_matches(level, consumer_level):\n if (self.in_progress_hanging\n and consumer in (sys.stdout, sys.stderr)):\n self.in_progress_hanging = False\n sys.stdout.write('\\n')\n sys.stdout.flush()\n if hasattr(consumer, 'write'):\n write_content = rendered + '\\n'\n if should_color(consumer, os.environ):\n # We are printing to stdout or stderr and it supports\n # colors so render our text colored\n colorizer = self.COLORS.get(level, lambda x: x)\n write_content = colorizer(write_content)\n\n consumer.write(write_content)\n if hasattr(consumer, 'flush'):\n consumer.flush()\n else:\n consumer(rendered)\n\n def _show_progress(self):\n \"\"\"Should we display download progress?\"\"\"\n return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())\n\n def start_progress(self, msg):\n assert not self.in_progress, (\n \"Tried to start_progress(%r) while in_progress %r\"\n % (msg, self.in_progress))\n if self._show_progress():\n sys.stdout.write(' ' * self.indent + msg)\n sys.stdout.flush()\n self.in_progress_hanging = True\n else:\n self.in_progress_hanging = False\n self.in_progress = msg\n self.last_message = None\n\n def end_progress(self, msg='done.'):\n assert self.in_progress, (\n \"Tried to end_progress without start_progress\")\n if self._show_progress():\n if not self.in_progress_hanging:\n # Some message has been printed out since start_progress\n sys.stdout.write('...' + self.in_progress + msg + '\\n')\n sys.stdout.flush()\n else:\n # These erase any messages shown with show_progress\n # (besides .'s)\n logger.show_progress('')\n logger.show_progress('')\n sys.stdout.write(msg + '\\n')\n sys.stdout.flush()\n self.in_progress = None\n self.in_progress_hanging = False\n\n def show_progress(self, message=None):\n \"\"\"If we are in a progress scope, and no log messages have been\n shown, write out another '.'\"\"\"\n if self.in_progress_hanging:\n if message is None:\n sys.stdout.write('.')\n sys.stdout.flush()\n else:\n if self.last_message:\n padding = ' ' * max(\n 0,\n len(self.last_message) - len(message)\n )\n else:\n padding = ''\n sys.stdout.write(\n '\\r%s%s%s%s' %\n (' ' * self.indent, self.in_progress, message, padding)\n )\n sys.stdout.flush()\n self.last_message = message\n\n def stdout_level_matches(self, level):\n \"\"\"Returns true if a message at this level will go to stdout\"\"\"\n return self.level_matches(level, self._stdout_level())\n\n def _stdout_level(self):\n \"\"\"Returns the level that stdout runs at\"\"\"\n for level, consumer in self.consumers:\n if consumer is sys.stdout:\n return level\n return self.FATAL\n\n def level_matches(self, level, consumer_level):\n \"\"\"\n >>> l = Logger()\n >>> l.level_matches(3, 4)\n False\n >>> l.level_matches(3, 2)\n True\n >>> l.level_matches(slice(None, 3), 3)\n False\n >>> l.level_matches(slice(None, 3), 2)\n True\n >>> l.level_matches(slice(1, 3), 1)\n True\n >>> l.level_matches(slice(2, 3), 1)\n False\n \"\"\"\n if isinstance(level, slice):\n start, stop = level.start, level.stop\n if start is not None and start > consumer_level:\n return False\n if stop is not None or stop <= consumer_level:\n return False\n return True\n else:\n return level >= consumer_level\n\n @classmethod\n def level_for_integer(cls, level):\n levels = cls.LEVELS\n if level < 0:\n return levels[0]\n if level >= len(levels):\n return levels[-1]\n return levels[level]\n\n def move_stdout_to_stderr(self):\n to_remove = []\n to_add = []\n for consumer_level, consumer in self.consumers:\n if consumer == sys.stdout:\n to_remove.append((consumer_level, consumer))\n to_add.append((consumer_level, sys.stderr))\n for item in to_remove:\n self.consumers.remove(item)\n self.consumers.extend(to_add)\n\nlogger = Logger()\n", "path": "pip/log.py"}]}
3,423
349
gh_patches_debug_5772
rasdani/github-patches
git_diff
liqd__a4-meinberlin-471
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- link to profile breaks if space in username --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/embed/middleware.py` Content: ``` 1 class AjaxPathMiddleware(object): 2 """Append request path as a header. 3 4 In an ajax request, redirects are handled implicitly, so it it not possible 5 to know the path of the page where you end up. This middleware adds that 6 information in a header. 7 """ 8 9 def process_response(self, request, response): 10 response['x-ajax-path'] = request.path 11 return response 12 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/embed/middleware.py b/apps/embed/middleware.py --- a/apps/embed/middleware.py +++ b/apps/embed/middleware.py @@ -1,3 +1,6 @@ +from django.utils.http import urlquote + + class AjaxPathMiddleware(object): """Append request path as a header. @@ -7,5 +10,5 @@ """ def process_response(self, request, response): - response['x-ajax-path'] = request.path + response['x-ajax-path'] = urlquote(request.path) return response
{"golden_diff": "diff --git a/apps/embed/middleware.py b/apps/embed/middleware.py\n--- a/apps/embed/middleware.py\n+++ b/apps/embed/middleware.py\n@@ -1,3 +1,6 @@\n+from django.utils.http import urlquote\n+\n+\n class AjaxPathMiddleware(object):\n \"\"\"Append request path as a header.\n \n@@ -7,5 +10,5 @@\n \"\"\"\n \n def process_response(self, request, response):\n- response['x-ajax-path'] = request.path\n+ response['x-ajax-path'] = urlquote(request.path)\n return response\n", "issue": "link to profile breaks if space in username\n\n", "before_files": [{"content": "class AjaxPathMiddleware(object):\n \"\"\"Append request path as a header.\n\n In an ajax request, redirects are handled implicitly, so it it not possible\n to know the path of the page where you end up. This middleware adds that\n information in a header.\n \"\"\"\n\n def process_response(self, request, response):\n response['x-ajax-path'] = request.path\n return response\n", "path": "apps/embed/middleware.py"}], "after_files": [{"content": "from django.utils.http import urlquote\n\n\nclass AjaxPathMiddleware(object):\n \"\"\"Append request path as a header.\n\n In an ajax request, redirects are handled implicitly, so it it not possible\n to know the path of the page where you end up. This middleware adds that\n information in a header.\n \"\"\"\n\n def process_response(self, request, response):\n response['x-ajax-path'] = urlquote(request.path)\n return response\n", "path": "apps/embed/middleware.py"}]}
368
123
gh_patches_debug_33908
rasdani/github-patches
git_diff
saleor__saleor-5148
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fetch user address by its ID ### What I'm trying to achieve I need to create a checkout in the saleor using a separate service that uses a service account. To do that I need to provide billing and shipping addresses, both of which I store in the saleor in user's addresses and store only IDs on my side. There is currently no way to fetch a single address by its ID using a query. ### Describe a proposed solution Add query: `address(id: ID!): Address` that is accessible for at least a service account, preferably by address owner too. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/graphql/account/resolvers.py` Content: ``` 1 from itertools import chain 2 from typing import Optional 3 4 import graphene 5 import graphene_django_optimizer as gql_optimizer 6 from django.db.models import QuerySet 7 from graphql_jwt.exceptions import PermissionDenied 8 from i18naddress import get_validation_rules 9 10 from ...account import models 11 from ...core.permissions import AccountPermissions 12 from ...payment import gateway 13 from ...payment.utils import fetch_customer_id 14 from ..utils import filter_by_query_param, sort_queryset 15 from .sorters import ServiceAccountSortField, UserSortField, UserSortingInput 16 from .types import AddressValidationData, ChoiceValue 17 from .utils import get_allowed_fields_camel_case, get_required_fields_camel_case 18 19 USER_SEARCH_FIELDS = ( 20 "email", 21 "first_name", 22 "last_name", 23 "default_shipping_address__first_name", 24 "default_shipping_address__last_name", 25 "default_shipping_address__city", 26 "default_shipping_address__country", 27 ) 28 29 30 def sort_users(qs: QuerySet, sort_by: UserSortingInput) -> QuerySet: 31 if sort_by: 32 return sort_queryset(qs, sort_by, UserSortField) 33 return qs.order_by("email") 34 35 36 def resolve_customers(info, query, sort_by=None, **_kwargs): 37 qs = models.User.objects.customers() 38 qs = filter_by_query_param( 39 queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS 40 ) 41 qs = sort_users(qs, sort_by) 42 qs = qs.distinct() 43 return gql_optimizer.query(qs, info) 44 45 46 def resolve_staff_users(info, query, sort_by=None, **_kwargs): 47 qs = models.User.objects.staff() 48 qs = filter_by_query_param( 49 queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS 50 ) 51 qs = sort_users(qs, sort_by) 52 qs = qs.distinct() 53 return gql_optimizer.query(qs, info) 54 55 56 def resolve_user(info, id): 57 requester = info.context.user or info.context.service_account 58 if requester: 59 _model, user_pk = graphene.Node.from_global_id(id) 60 if requester.has_perms( 61 [AccountPermissions.MANAGE_STAFF, AccountPermissions.MANAGE_USERS] 62 ): 63 return models.User.objects.filter(pk=user_pk).first() 64 if requester.has_perm(AccountPermissions.MANAGE_STAFF): 65 return models.User.objects.staff().filter(pk=user_pk).first() 66 if requester.has_perm(AccountPermissions.MANAGE_USERS): 67 return models.User.objects.customers().filter(pk=user_pk).first() 68 return PermissionDenied() 69 70 71 def resolve_service_accounts(info, sort_by=None, **_kwargs): 72 qs = models.ServiceAccount.objects.all() 73 qs = sort_queryset(qs, sort_by, ServiceAccountSortField) 74 return gql_optimizer.query(qs, info) 75 76 77 def resolve_address_validation_rules( 78 info, 79 country_code: str, 80 country_area: Optional[str], 81 city: Optional[str], 82 city_area: Optional[str], 83 ): 84 85 params = { 86 "country_code": country_code, 87 "country_area": country_area, 88 "city": city, 89 "city_area": city_area, 90 } 91 rules = get_validation_rules(params) 92 return AddressValidationData( 93 country_code=rules.country_code, 94 country_name=rules.country_name, 95 address_format=rules.address_format, 96 address_latin_format=rules.address_latin_format, 97 allowed_fields=get_allowed_fields_camel_case(rules.allowed_fields), 98 required_fields=get_required_fields_camel_case(rules.required_fields), 99 upper_fields=rules.upper_fields, 100 country_area_type=rules.country_area_type, 101 country_area_choices=[ 102 ChoiceValue(area[0], area[1]) for area in rules.country_area_choices 103 ], 104 city_type=rules.city_type, 105 city_choices=[ChoiceValue(area[0], area[1]) for area in rules.city_choices], 106 city_area_type=rules.city_type, 107 city_area_choices=[ 108 ChoiceValue(area[0], area[1]) for area in rules.city_area_choices 109 ], 110 postal_code_type=rules.postal_code_type, 111 postal_code_matchers=[ 112 compiled.pattern for compiled in rules.postal_code_matchers 113 ], 114 postal_code_examples=rules.postal_code_examples, 115 postal_code_prefix=rules.postal_code_prefix, 116 ) 117 118 119 def resolve_payment_sources(user: models.User): 120 stored_customer_accounts = ( 121 (gtw["name"], fetch_customer_id(user, gtw["name"])) 122 for gtw in gateway.list_gateways() 123 ) 124 return list( 125 chain( 126 *[ 127 prepare_graphql_payment_sources_type( 128 gateway.list_payment_sources(gtw, customer_id) 129 ) 130 for gtw, customer_id in stored_customer_accounts 131 if customer_id is not None 132 ] 133 ) 134 ) 135 136 137 def prepare_graphql_payment_sources_type(payment_sources): 138 sources = [] 139 for src in payment_sources: 140 sources.append( 141 { 142 "gateway": src.gateway, 143 "credit_card_info": { 144 "last_digits": src.credit_card_info.last_4, 145 "exp_year": src.credit_card_info.exp_year, 146 "exp_month": src.credit_card_info.exp_month, 147 "brand": "", 148 "first_digits": "", 149 }, 150 } 151 ) 152 return sources 153 ``` Path: `saleor/graphql/account/schema.py` Content: ``` 1 import graphene 2 from graphql_jwt.decorators import login_required 3 4 from ...core.permissions import AccountPermissions 5 from ..core.fields import FilterInputConnectionField 6 from ..core.types import FilterInputObjectType 7 from ..decorators import one_of_permissions_required, permission_required 8 from .bulk_mutations import CustomerBulkDelete, StaffBulkDelete, UserBulkSetActive 9 from .enums import CountryCodeEnum 10 from .filters import CustomerFilter, ServiceAccountFilter, StaffUserFilter 11 from .mutations.account import ( 12 AccountAddressCreate, 13 AccountAddressDelete, 14 AccountAddressUpdate, 15 AccountDelete, 16 AccountRegister, 17 AccountRequestDeletion, 18 AccountSetDefaultAddress, 19 AccountUpdate, 20 AccountUpdateMeta, 21 ) 22 from .mutations.base import ( 23 PasswordChange, 24 RequestPasswordReset, 25 SetPassword, 26 UserClearMeta, 27 UserUpdateMeta, 28 ) 29 from .mutations.service_account import ( 30 ServiceAccountClearPrivateMeta, 31 ServiceAccountCreate, 32 ServiceAccountDelete, 33 ServiceAccountTokenCreate, 34 ServiceAccountTokenDelete, 35 ServiceAccountUpdate, 36 ServiceAccountUpdatePrivateMeta, 37 ) 38 from .mutations.staff import ( 39 AddressCreate, 40 AddressDelete, 41 AddressSetDefault, 42 AddressUpdate, 43 CustomerCreate, 44 CustomerDelete, 45 CustomerUpdate, 46 StaffCreate, 47 StaffDelete, 48 StaffUpdate, 49 UserAvatarDelete, 50 UserAvatarUpdate, 51 UserClearPrivateMeta, 52 UserUpdatePrivateMeta, 53 ) 54 from .resolvers import ( 55 resolve_address_validation_rules, 56 resolve_customers, 57 resolve_service_accounts, 58 resolve_staff_users, 59 resolve_user, 60 ) 61 from .sorters import ServiceAccountSortingInput, UserSortingInput 62 from .types import AddressValidationData, ServiceAccount, User 63 64 65 class CustomerFilterInput(FilterInputObjectType): 66 class Meta: 67 filterset_class = CustomerFilter 68 69 70 class StaffUserInput(FilterInputObjectType): 71 class Meta: 72 filterset_class = StaffUserFilter 73 74 75 class ServiceAccountFilterInput(FilterInputObjectType): 76 class Meta: 77 filterset_class = ServiceAccountFilter 78 79 80 class AccountQueries(graphene.ObjectType): 81 address_validation_rules = graphene.Field( 82 AddressValidationData, 83 description="Returns address validation rules.", 84 country_code=graphene.Argument( 85 CountryCodeEnum, 86 description="Two-letter ISO 3166-1 country code.", 87 required=True, 88 ), 89 country_area=graphene.Argument( 90 graphene.String, description="Designation of a region, province or state." 91 ), 92 city=graphene.Argument(graphene.String, description="City or a town name."), 93 city_area=graphene.Argument( 94 graphene.String, description="Sublocality like a district." 95 ), 96 ) 97 customers = FilterInputConnectionField( 98 User, 99 filter=CustomerFilterInput(description="Filtering options for customers."), 100 sort_by=UserSortingInput(description="Sort customers."), 101 description="List of the shop's customers.", 102 ) 103 me = graphene.Field(User, description="Return the currently authenticated user.") 104 staff_users = FilterInputConnectionField( 105 User, 106 filter=StaffUserInput(description="Filtering options for staff users."), 107 sort_by=UserSortingInput(description="Sort staff users."), 108 description="List of the shop's staff users.", 109 ) 110 service_accounts = FilterInputConnectionField( 111 ServiceAccount, 112 filter=ServiceAccountFilterInput( 113 description="Filtering options for service accounts." 114 ), 115 sort_by=ServiceAccountSortingInput(description="Sort service accounts."), 116 description="List of the service accounts.", 117 ) 118 service_account = graphene.Field( 119 ServiceAccount, 120 id=graphene.Argument( 121 graphene.ID, description="ID of the service account.", required=True 122 ), 123 description="Look up a service account by ID.", 124 ) 125 126 user = graphene.Field( 127 User, 128 id=graphene.Argument(graphene.ID, description="ID of the user.", required=True), 129 description="Look up a user by ID.", 130 ) 131 132 def resolve_address_validation_rules( 133 self, info, country_code, country_area=None, city=None, city_area=None 134 ): 135 return resolve_address_validation_rules( 136 info, 137 country_code, 138 country_area=country_area, 139 city=city, 140 city_area=city_area, 141 ) 142 143 @permission_required(AccountPermissions.MANAGE_SERVICE_ACCOUNTS) 144 def resolve_service_accounts(self, info, **kwargs): 145 return resolve_service_accounts(info, **kwargs) 146 147 @permission_required(AccountPermissions.MANAGE_SERVICE_ACCOUNTS) 148 def resolve_service_account(self, info, id): 149 return graphene.Node.get_node_from_global_id(info, id, ServiceAccount) 150 151 @permission_required(AccountPermissions.MANAGE_USERS) 152 def resolve_customers(self, info, query=None, **kwargs): 153 return resolve_customers(info, query=query, **kwargs) 154 155 @login_required 156 def resolve_me(self, info): 157 return info.context.user 158 159 @permission_required(AccountPermissions.MANAGE_STAFF) 160 def resolve_staff_users(self, info, query=None, **kwargs): 161 return resolve_staff_users(info, query=query, **kwargs) 162 163 @one_of_permissions_required( 164 [AccountPermissions.MANAGE_STAFF, AccountPermissions.MANAGE_USERS] 165 ) 166 def resolve_user(self, info, id): 167 return resolve_user(info, id) 168 169 170 class AccountMutations(graphene.ObjectType): 171 # Base mutations 172 request_password_reset = RequestPasswordReset.Field() 173 set_password = SetPassword.Field() 174 password_change = PasswordChange.Field() 175 176 # Account mutations 177 account_address_create = AccountAddressCreate.Field() 178 account_address_update = AccountAddressUpdate.Field() 179 account_address_delete = AccountAddressDelete.Field() 180 account_set_default_address = AccountSetDefaultAddress.Field() 181 182 account_register = AccountRegister.Field() 183 account_update = AccountUpdate.Field() 184 account_request_deletion = AccountRequestDeletion.Field() 185 account_delete = AccountDelete.Field() 186 187 account_update_meta = AccountUpdateMeta.Field() 188 189 # Staff mutation 190 address_create = AddressCreate.Field() 191 address_update = AddressUpdate.Field() 192 address_delete = AddressDelete.Field() 193 address_set_default = AddressSetDefault.Field() 194 195 customer_create = CustomerCreate.Field() 196 customer_update = CustomerUpdate.Field() 197 customer_delete = CustomerDelete.Field() 198 customer_bulk_delete = CustomerBulkDelete.Field() 199 200 staff_create = StaffCreate.Field() 201 staff_update = StaffUpdate.Field() 202 staff_delete = StaffDelete.Field() 203 staff_bulk_delete = StaffBulkDelete.Field() 204 205 user_avatar_update = UserAvatarUpdate.Field() 206 user_avatar_delete = UserAvatarDelete.Field() 207 user_bulk_set_active = UserBulkSetActive.Field() 208 209 user_update_metadata = UserUpdateMeta.Field() 210 user_clear_metadata = UserClearMeta.Field() 211 212 user_update_private_metadata = UserUpdatePrivateMeta.Field() 213 user_clear_private_metadata = UserClearPrivateMeta.Field() 214 215 service_account_create = ServiceAccountCreate.Field() 216 service_account_update = ServiceAccountUpdate.Field() 217 service_account_delete = ServiceAccountDelete.Field() 218 219 service_account_update_private_metadata = ServiceAccountUpdatePrivateMeta.Field() 220 service_account_clear_private_metadata = ServiceAccountClearPrivateMeta.Field() 221 222 service_account_token_create = ServiceAccountTokenCreate.Field() 223 service_account_token_delete = ServiceAccountTokenDelete.Field() 224 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/graphql/account/resolvers.py b/saleor/graphql/account/resolvers.py --- a/saleor/graphql/account/resolvers.py +++ b/saleor/graphql/account/resolvers.py @@ -150,3 +150,14 @@ } ) return sources + + +def resolve_address(info, id): + user = info.context.user + service_account = info.context.service_account + _model, address_pk = graphene.Node.from_global_id(id) + if service_account: + return models.Address.objects.filter(pk=address_pk).first() + if user and not user.is_anonymous: + return user.addresses.filter(id=address_pk).first() + return PermissionDenied() diff --git a/saleor/graphql/account/schema.py b/saleor/graphql/account/schema.py --- a/saleor/graphql/account/schema.py +++ b/saleor/graphql/account/schema.py @@ -52,6 +52,7 @@ UserUpdatePrivateMeta, ) from .resolvers import ( + resolve_address, resolve_address_validation_rules, resolve_customers, resolve_service_accounts, @@ -59,7 +60,7 @@ resolve_user, ) from .sorters import ServiceAccountSortingInput, UserSortingInput -from .types import AddressValidationData, ServiceAccount, User +from .types import Address, AddressValidationData, ServiceAccount, User class CustomerFilterInput(FilterInputObjectType): @@ -94,6 +95,13 @@ graphene.String, description="Sublocality like a district." ), ) + address = graphene.Field( + Address, + id=graphene.Argument( + graphene.ID, description="ID of an address.", required=True + ), + description="Look up an address by ID.", + ) customers = FilterInputConnectionField( User, filter=CustomerFilterInput(description="Filtering options for customers."), @@ -166,6 +174,9 @@ def resolve_user(self, info, id): return resolve_user(info, id) + def resolve_address(self, info, id): + return resolve_address(info, id) + class AccountMutations(graphene.ObjectType): # Base mutations
{"golden_diff": "diff --git a/saleor/graphql/account/resolvers.py b/saleor/graphql/account/resolvers.py\n--- a/saleor/graphql/account/resolvers.py\n+++ b/saleor/graphql/account/resolvers.py\n@@ -150,3 +150,14 @@\n }\n )\n return sources\n+\n+\n+def resolve_address(info, id):\n+ user = info.context.user\n+ service_account = info.context.service_account\n+ _model, address_pk = graphene.Node.from_global_id(id)\n+ if service_account:\n+ return models.Address.objects.filter(pk=address_pk).first()\n+ if user and not user.is_anonymous:\n+ return user.addresses.filter(id=address_pk).first()\n+ return PermissionDenied()\ndiff --git a/saleor/graphql/account/schema.py b/saleor/graphql/account/schema.py\n--- a/saleor/graphql/account/schema.py\n+++ b/saleor/graphql/account/schema.py\n@@ -52,6 +52,7 @@\n UserUpdatePrivateMeta,\n )\n from .resolvers import (\n+ resolve_address,\n resolve_address_validation_rules,\n resolve_customers,\n resolve_service_accounts,\n@@ -59,7 +60,7 @@\n resolve_user,\n )\n from .sorters import ServiceAccountSortingInput, UserSortingInput\n-from .types import AddressValidationData, ServiceAccount, User\n+from .types import Address, AddressValidationData, ServiceAccount, User\n \n \n class CustomerFilterInput(FilterInputObjectType):\n@@ -94,6 +95,13 @@\n graphene.String, description=\"Sublocality like a district.\"\n ),\n )\n+ address = graphene.Field(\n+ Address,\n+ id=graphene.Argument(\n+ graphene.ID, description=\"ID of an address.\", required=True\n+ ),\n+ description=\"Look up an address by ID.\",\n+ )\n customers = FilterInputConnectionField(\n User,\n filter=CustomerFilterInput(description=\"Filtering options for customers.\"),\n@@ -166,6 +174,9 @@\n def resolve_user(self, info, id):\n return resolve_user(info, id)\n \n+ def resolve_address(self, info, id):\n+ return resolve_address(info, id)\n+\n \n class AccountMutations(graphene.ObjectType):\n # Base mutations\n", "issue": "Fetch user address by its ID\n### What I'm trying to achieve\r\nI need to create a checkout in the saleor using a separate service that uses a service account. To do that I need to provide billing and shipping addresses, both of which I store in the saleor in user's addresses and store only IDs on my side.\r\nThere is currently no way to fetch a single address by its ID using a query.\r\n\r\n### Describe a proposed solution\r\nAdd query:\r\n`address(id: ID!): Address`\r\nthat is accessible for at least a service account, preferably by address owner too. \r\n\r\n\n", "before_files": [{"content": "from itertools import chain\nfrom typing import Optional\n\nimport graphene\nimport graphene_django_optimizer as gql_optimizer\nfrom django.db.models import QuerySet\nfrom graphql_jwt.exceptions import PermissionDenied\nfrom i18naddress import get_validation_rules\n\nfrom ...account import models\nfrom ...core.permissions import AccountPermissions\nfrom ...payment import gateway\nfrom ...payment.utils import fetch_customer_id\nfrom ..utils import filter_by_query_param, sort_queryset\nfrom .sorters import ServiceAccountSortField, UserSortField, UserSortingInput\nfrom .types import AddressValidationData, ChoiceValue\nfrom .utils import get_allowed_fields_camel_case, get_required_fields_camel_case\n\nUSER_SEARCH_FIELDS = (\n \"email\",\n \"first_name\",\n \"last_name\",\n \"default_shipping_address__first_name\",\n \"default_shipping_address__last_name\",\n \"default_shipping_address__city\",\n \"default_shipping_address__country\",\n)\n\n\ndef sort_users(qs: QuerySet, sort_by: UserSortingInput) -> QuerySet:\n if sort_by:\n return sort_queryset(qs, sort_by, UserSortField)\n return qs.order_by(\"email\")\n\n\ndef resolve_customers(info, query, sort_by=None, **_kwargs):\n qs = models.User.objects.customers()\n qs = filter_by_query_param(\n queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS\n )\n qs = sort_users(qs, sort_by)\n qs = qs.distinct()\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_staff_users(info, query, sort_by=None, **_kwargs):\n qs = models.User.objects.staff()\n qs = filter_by_query_param(\n queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS\n )\n qs = sort_users(qs, sort_by)\n qs = qs.distinct()\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_user(info, id):\n requester = info.context.user or info.context.service_account\n if requester:\n _model, user_pk = graphene.Node.from_global_id(id)\n if requester.has_perms(\n [AccountPermissions.MANAGE_STAFF, AccountPermissions.MANAGE_USERS]\n ):\n return models.User.objects.filter(pk=user_pk).first()\n if requester.has_perm(AccountPermissions.MANAGE_STAFF):\n return models.User.objects.staff().filter(pk=user_pk).first()\n if requester.has_perm(AccountPermissions.MANAGE_USERS):\n return models.User.objects.customers().filter(pk=user_pk).first()\n return PermissionDenied()\n\n\ndef resolve_service_accounts(info, sort_by=None, **_kwargs):\n qs = models.ServiceAccount.objects.all()\n qs = sort_queryset(qs, sort_by, ServiceAccountSortField)\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_address_validation_rules(\n info,\n country_code: str,\n country_area: Optional[str],\n city: Optional[str],\n city_area: Optional[str],\n):\n\n params = {\n \"country_code\": country_code,\n \"country_area\": country_area,\n \"city\": city,\n \"city_area\": city_area,\n }\n rules = get_validation_rules(params)\n return AddressValidationData(\n country_code=rules.country_code,\n country_name=rules.country_name,\n address_format=rules.address_format,\n address_latin_format=rules.address_latin_format,\n allowed_fields=get_allowed_fields_camel_case(rules.allowed_fields),\n required_fields=get_required_fields_camel_case(rules.required_fields),\n upper_fields=rules.upper_fields,\n country_area_type=rules.country_area_type,\n country_area_choices=[\n ChoiceValue(area[0], area[1]) for area in rules.country_area_choices\n ],\n city_type=rules.city_type,\n city_choices=[ChoiceValue(area[0], area[1]) for area in rules.city_choices],\n city_area_type=rules.city_type,\n city_area_choices=[\n ChoiceValue(area[0], area[1]) for area in rules.city_area_choices\n ],\n postal_code_type=rules.postal_code_type,\n postal_code_matchers=[\n compiled.pattern for compiled in rules.postal_code_matchers\n ],\n postal_code_examples=rules.postal_code_examples,\n postal_code_prefix=rules.postal_code_prefix,\n )\n\n\ndef resolve_payment_sources(user: models.User):\n stored_customer_accounts = (\n (gtw[\"name\"], fetch_customer_id(user, gtw[\"name\"]))\n for gtw in gateway.list_gateways()\n )\n return list(\n chain(\n *[\n prepare_graphql_payment_sources_type(\n gateway.list_payment_sources(gtw, customer_id)\n )\n for gtw, customer_id in stored_customer_accounts\n if customer_id is not None\n ]\n )\n )\n\n\ndef prepare_graphql_payment_sources_type(payment_sources):\n sources = []\n for src in payment_sources:\n sources.append(\n {\n \"gateway\": src.gateway,\n \"credit_card_info\": {\n \"last_digits\": src.credit_card_info.last_4,\n \"exp_year\": src.credit_card_info.exp_year,\n \"exp_month\": src.credit_card_info.exp_month,\n \"brand\": \"\",\n \"first_digits\": \"\",\n },\n }\n )\n return sources\n", "path": "saleor/graphql/account/resolvers.py"}, {"content": "import graphene\nfrom graphql_jwt.decorators import login_required\n\nfrom ...core.permissions import AccountPermissions\nfrom ..core.fields import FilterInputConnectionField\nfrom ..core.types import FilterInputObjectType\nfrom ..decorators import one_of_permissions_required, permission_required\nfrom .bulk_mutations import CustomerBulkDelete, StaffBulkDelete, UserBulkSetActive\nfrom .enums import CountryCodeEnum\nfrom .filters import CustomerFilter, ServiceAccountFilter, StaffUserFilter\nfrom .mutations.account import (\n AccountAddressCreate,\n AccountAddressDelete,\n AccountAddressUpdate,\n AccountDelete,\n AccountRegister,\n AccountRequestDeletion,\n AccountSetDefaultAddress,\n AccountUpdate,\n AccountUpdateMeta,\n)\nfrom .mutations.base import (\n PasswordChange,\n RequestPasswordReset,\n SetPassword,\n UserClearMeta,\n UserUpdateMeta,\n)\nfrom .mutations.service_account import (\n ServiceAccountClearPrivateMeta,\n ServiceAccountCreate,\n ServiceAccountDelete,\n ServiceAccountTokenCreate,\n ServiceAccountTokenDelete,\n ServiceAccountUpdate,\n ServiceAccountUpdatePrivateMeta,\n)\nfrom .mutations.staff import (\n AddressCreate,\n AddressDelete,\n AddressSetDefault,\n AddressUpdate,\n CustomerCreate,\n CustomerDelete,\n CustomerUpdate,\n StaffCreate,\n StaffDelete,\n StaffUpdate,\n UserAvatarDelete,\n UserAvatarUpdate,\n UserClearPrivateMeta,\n UserUpdatePrivateMeta,\n)\nfrom .resolvers import (\n resolve_address_validation_rules,\n resolve_customers,\n resolve_service_accounts,\n resolve_staff_users,\n resolve_user,\n)\nfrom .sorters import ServiceAccountSortingInput, UserSortingInput\nfrom .types import AddressValidationData, ServiceAccount, User\n\n\nclass CustomerFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = CustomerFilter\n\n\nclass StaffUserInput(FilterInputObjectType):\n class Meta:\n filterset_class = StaffUserFilter\n\n\nclass ServiceAccountFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = ServiceAccountFilter\n\n\nclass AccountQueries(graphene.ObjectType):\n address_validation_rules = graphene.Field(\n AddressValidationData,\n description=\"Returns address validation rules.\",\n country_code=graphene.Argument(\n CountryCodeEnum,\n description=\"Two-letter ISO 3166-1 country code.\",\n required=True,\n ),\n country_area=graphene.Argument(\n graphene.String, description=\"Designation of a region, province or state.\"\n ),\n city=graphene.Argument(graphene.String, description=\"City or a town name.\"),\n city_area=graphene.Argument(\n graphene.String, description=\"Sublocality like a district.\"\n ),\n )\n customers = FilterInputConnectionField(\n User,\n filter=CustomerFilterInput(description=\"Filtering options for customers.\"),\n sort_by=UserSortingInput(description=\"Sort customers.\"),\n description=\"List of the shop's customers.\",\n )\n me = graphene.Field(User, description=\"Return the currently authenticated user.\")\n staff_users = FilterInputConnectionField(\n User,\n filter=StaffUserInput(description=\"Filtering options for staff users.\"),\n sort_by=UserSortingInput(description=\"Sort staff users.\"),\n description=\"List of the shop's staff users.\",\n )\n service_accounts = FilterInputConnectionField(\n ServiceAccount,\n filter=ServiceAccountFilterInput(\n description=\"Filtering options for service accounts.\"\n ),\n sort_by=ServiceAccountSortingInput(description=\"Sort service accounts.\"),\n description=\"List of the service accounts.\",\n )\n service_account = graphene.Field(\n ServiceAccount,\n id=graphene.Argument(\n graphene.ID, description=\"ID of the service account.\", required=True\n ),\n description=\"Look up a service account by ID.\",\n )\n\n user = graphene.Field(\n User,\n id=graphene.Argument(graphene.ID, description=\"ID of the user.\", required=True),\n description=\"Look up a user by ID.\",\n )\n\n def resolve_address_validation_rules(\n self, info, country_code, country_area=None, city=None, city_area=None\n ):\n return resolve_address_validation_rules(\n info,\n country_code,\n country_area=country_area,\n city=city,\n city_area=city_area,\n )\n\n @permission_required(AccountPermissions.MANAGE_SERVICE_ACCOUNTS)\n def resolve_service_accounts(self, info, **kwargs):\n return resolve_service_accounts(info, **kwargs)\n\n @permission_required(AccountPermissions.MANAGE_SERVICE_ACCOUNTS)\n def resolve_service_account(self, info, id):\n return graphene.Node.get_node_from_global_id(info, id, ServiceAccount)\n\n @permission_required(AccountPermissions.MANAGE_USERS)\n def resolve_customers(self, info, query=None, **kwargs):\n return resolve_customers(info, query=query, **kwargs)\n\n @login_required\n def resolve_me(self, info):\n return info.context.user\n\n @permission_required(AccountPermissions.MANAGE_STAFF)\n def resolve_staff_users(self, info, query=None, **kwargs):\n return resolve_staff_users(info, query=query, **kwargs)\n\n @one_of_permissions_required(\n [AccountPermissions.MANAGE_STAFF, AccountPermissions.MANAGE_USERS]\n )\n def resolve_user(self, info, id):\n return resolve_user(info, id)\n\n\nclass AccountMutations(graphene.ObjectType):\n # Base mutations\n request_password_reset = RequestPasswordReset.Field()\n set_password = SetPassword.Field()\n password_change = PasswordChange.Field()\n\n # Account mutations\n account_address_create = AccountAddressCreate.Field()\n account_address_update = AccountAddressUpdate.Field()\n account_address_delete = AccountAddressDelete.Field()\n account_set_default_address = AccountSetDefaultAddress.Field()\n\n account_register = AccountRegister.Field()\n account_update = AccountUpdate.Field()\n account_request_deletion = AccountRequestDeletion.Field()\n account_delete = AccountDelete.Field()\n\n account_update_meta = AccountUpdateMeta.Field()\n\n # Staff mutation\n address_create = AddressCreate.Field()\n address_update = AddressUpdate.Field()\n address_delete = AddressDelete.Field()\n address_set_default = AddressSetDefault.Field()\n\n customer_create = CustomerCreate.Field()\n customer_update = CustomerUpdate.Field()\n customer_delete = CustomerDelete.Field()\n customer_bulk_delete = CustomerBulkDelete.Field()\n\n staff_create = StaffCreate.Field()\n staff_update = StaffUpdate.Field()\n staff_delete = StaffDelete.Field()\n staff_bulk_delete = StaffBulkDelete.Field()\n\n user_avatar_update = UserAvatarUpdate.Field()\n user_avatar_delete = UserAvatarDelete.Field()\n user_bulk_set_active = UserBulkSetActive.Field()\n\n user_update_metadata = UserUpdateMeta.Field()\n user_clear_metadata = UserClearMeta.Field()\n\n user_update_private_metadata = UserUpdatePrivateMeta.Field()\n user_clear_private_metadata = UserClearPrivateMeta.Field()\n\n service_account_create = ServiceAccountCreate.Field()\n service_account_update = ServiceAccountUpdate.Field()\n service_account_delete = ServiceAccountDelete.Field()\n\n service_account_update_private_metadata = ServiceAccountUpdatePrivateMeta.Field()\n service_account_clear_private_metadata = ServiceAccountClearPrivateMeta.Field()\n\n service_account_token_create = ServiceAccountTokenCreate.Field()\n service_account_token_delete = ServiceAccountTokenDelete.Field()\n", "path": "saleor/graphql/account/schema.py"}], "after_files": [{"content": "from itertools import chain\nfrom typing import Optional\n\nimport graphene\nimport graphene_django_optimizer as gql_optimizer\nfrom django.db.models import QuerySet\nfrom graphql_jwt.exceptions import PermissionDenied\nfrom i18naddress import get_validation_rules\n\nfrom ...account import models\nfrom ...core.permissions import AccountPermissions\nfrom ...payment import gateway\nfrom ...payment.utils import fetch_customer_id\nfrom ..utils import filter_by_query_param, sort_queryset\nfrom .sorters import ServiceAccountSortField, UserSortField, UserSortingInput\nfrom .types import AddressValidationData, ChoiceValue\nfrom .utils import get_allowed_fields_camel_case, get_required_fields_camel_case\n\nUSER_SEARCH_FIELDS = (\n \"email\",\n \"first_name\",\n \"last_name\",\n \"default_shipping_address__first_name\",\n \"default_shipping_address__last_name\",\n \"default_shipping_address__city\",\n \"default_shipping_address__country\",\n)\n\n\ndef sort_users(qs: QuerySet, sort_by: UserSortingInput) -> QuerySet:\n if sort_by:\n return sort_queryset(qs, sort_by, UserSortField)\n return qs.order_by(\"email\")\n\n\ndef resolve_customers(info, query, sort_by=None, **_kwargs):\n qs = models.User.objects.customers()\n qs = filter_by_query_param(\n queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS\n )\n qs = sort_users(qs, sort_by)\n qs = qs.distinct()\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_staff_users(info, query, sort_by=None, **_kwargs):\n qs = models.User.objects.staff()\n qs = filter_by_query_param(\n queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS\n )\n qs = sort_users(qs, sort_by)\n qs = qs.distinct()\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_user(info, id):\n requester = info.context.user or info.context.service_account\n if requester:\n _model, user_pk = graphene.Node.from_global_id(id)\n if requester.has_perms(\n [AccountPermissions.MANAGE_STAFF, AccountPermissions.MANAGE_USERS]\n ):\n return models.User.objects.filter(pk=user_pk).first()\n if requester.has_perm(AccountPermissions.MANAGE_STAFF):\n return models.User.objects.staff().filter(pk=user_pk).first()\n if requester.has_perm(AccountPermissions.MANAGE_USERS):\n return models.User.objects.customers().filter(pk=user_pk).first()\n return PermissionDenied()\n\n\ndef resolve_service_accounts(info, sort_by=None, **_kwargs):\n qs = models.ServiceAccount.objects.all()\n qs = sort_queryset(qs, sort_by, ServiceAccountSortField)\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_address_validation_rules(\n info,\n country_code: str,\n country_area: Optional[str],\n city: Optional[str],\n city_area: Optional[str],\n):\n\n params = {\n \"country_code\": country_code,\n \"country_area\": country_area,\n \"city\": city,\n \"city_area\": city_area,\n }\n rules = get_validation_rules(params)\n return AddressValidationData(\n country_code=rules.country_code,\n country_name=rules.country_name,\n address_format=rules.address_format,\n address_latin_format=rules.address_latin_format,\n allowed_fields=get_allowed_fields_camel_case(rules.allowed_fields),\n required_fields=get_required_fields_camel_case(rules.required_fields),\n upper_fields=rules.upper_fields,\n country_area_type=rules.country_area_type,\n country_area_choices=[\n ChoiceValue(area[0], area[1]) for area in rules.country_area_choices\n ],\n city_type=rules.city_type,\n city_choices=[ChoiceValue(area[0], area[1]) for area in rules.city_choices],\n city_area_type=rules.city_type,\n city_area_choices=[\n ChoiceValue(area[0], area[1]) for area in rules.city_area_choices\n ],\n postal_code_type=rules.postal_code_type,\n postal_code_matchers=[\n compiled.pattern for compiled in rules.postal_code_matchers\n ],\n postal_code_examples=rules.postal_code_examples,\n postal_code_prefix=rules.postal_code_prefix,\n )\n\n\ndef resolve_payment_sources(user: models.User):\n stored_customer_accounts = (\n (gtw[\"name\"], fetch_customer_id(user, gtw[\"name\"]))\n for gtw in gateway.list_gateways()\n )\n return list(\n chain(\n *[\n prepare_graphql_payment_sources_type(\n gateway.list_payment_sources(gtw, customer_id)\n )\n for gtw, customer_id in stored_customer_accounts\n if customer_id is not None\n ]\n )\n )\n\n\ndef prepare_graphql_payment_sources_type(payment_sources):\n sources = []\n for src in payment_sources:\n sources.append(\n {\n \"gateway\": src.gateway,\n \"credit_card_info\": {\n \"last_digits\": src.credit_card_info.last_4,\n \"exp_year\": src.credit_card_info.exp_year,\n \"exp_month\": src.credit_card_info.exp_month,\n \"brand\": \"\",\n \"first_digits\": \"\",\n },\n }\n )\n return sources\n\n\ndef resolve_address(info, id):\n user = info.context.user\n service_account = info.context.service_account\n _model, address_pk = graphene.Node.from_global_id(id)\n if service_account:\n return models.Address.objects.filter(pk=address_pk).first()\n if user and not user.is_anonymous:\n return user.addresses.filter(id=address_pk).first()\n return PermissionDenied()\n", "path": "saleor/graphql/account/resolvers.py"}, {"content": "import graphene\nfrom graphql_jwt.decorators import login_required\n\nfrom ...core.permissions import AccountPermissions\nfrom ..core.fields import FilterInputConnectionField\nfrom ..core.types import FilterInputObjectType\nfrom ..decorators import one_of_permissions_required, permission_required\nfrom .bulk_mutations import CustomerBulkDelete, StaffBulkDelete, UserBulkSetActive\nfrom .enums import CountryCodeEnum\nfrom .filters import CustomerFilter, ServiceAccountFilter, StaffUserFilter\nfrom .mutations.account import (\n AccountAddressCreate,\n AccountAddressDelete,\n AccountAddressUpdate,\n AccountDelete,\n AccountRegister,\n AccountRequestDeletion,\n AccountSetDefaultAddress,\n AccountUpdate,\n AccountUpdateMeta,\n)\nfrom .mutations.base import (\n PasswordChange,\n RequestPasswordReset,\n SetPassword,\n UserClearMeta,\n UserUpdateMeta,\n)\nfrom .mutations.service_account import (\n ServiceAccountClearPrivateMeta,\n ServiceAccountCreate,\n ServiceAccountDelete,\n ServiceAccountTokenCreate,\n ServiceAccountTokenDelete,\n ServiceAccountUpdate,\n ServiceAccountUpdatePrivateMeta,\n)\nfrom .mutations.staff import (\n AddressCreate,\n AddressDelete,\n AddressSetDefault,\n AddressUpdate,\n CustomerCreate,\n CustomerDelete,\n CustomerUpdate,\n StaffCreate,\n StaffDelete,\n StaffUpdate,\n UserAvatarDelete,\n UserAvatarUpdate,\n UserClearPrivateMeta,\n UserUpdatePrivateMeta,\n)\nfrom .resolvers import (\n resolve_address,\n resolve_address_validation_rules,\n resolve_customers,\n resolve_service_accounts,\n resolve_staff_users,\n resolve_user,\n)\nfrom .sorters import ServiceAccountSortingInput, UserSortingInput\nfrom .types import Address, AddressValidationData, ServiceAccount, User\n\n\nclass CustomerFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = CustomerFilter\n\n\nclass StaffUserInput(FilterInputObjectType):\n class Meta:\n filterset_class = StaffUserFilter\n\n\nclass ServiceAccountFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = ServiceAccountFilter\n\n\nclass AccountQueries(graphene.ObjectType):\n address_validation_rules = graphene.Field(\n AddressValidationData,\n description=\"Returns address validation rules.\",\n country_code=graphene.Argument(\n CountryCodeEnum,\n description=\"Two-letter ISO 3166-1 country code.\",\n required=True,\n ),\n country_area=graphene.Argument(\n graphene.String, description=\"Designation of a region, province or state.\"\n ),\n city=graphene.Argument(graphene.String, description=\"City or a town name.\"),\n city_area=graphene.Argument(\n graphene.String, description=\"Sublocality like a district.\"\n ),\n )\n address = graphene.Field(\n Address,\n id=graphene.Argument(\n graphene.ID, description=\"ID of an address.\", required=True\n ),\n description=\"Look up an address by ID.\",\n )\n customers = FilterInputConnectionField(\n User,\n filter=CustomerFilterInput(description=\"Filtering options for customers.\"),\n sort_by=UserSortingInput(description=\"Sort customers.\"),\n description=\"List of the shop's customers.\",\n )\n me = graphene.Field(User, description=\"Return the currently authenticated user.\")\n staff_users = FilterInputConnectionField(\n User,\n filter=StaffUserInput(description=\"Filtering options for staff users.\"),\n sort_by=UserSortingInput(description=\"Sort staff users.\"),\n description=\"List of the shop's staff users.\",\n )\n service_accounts = FilterInputConnectionField(\n ServiceAccount,\n filter=ServiceAccountFilterInput(\n description=\"Filtering options for service accounts.\"\n ),\n sort_by=ServiceAccountSortingInput(description=\"Sort service accounts.\"),\n description=\"List of the service accounts.\",\n )\n service_account = graphene.Field(\n ServiceAccount,\n id=graphene.Argument(\n graphene.ID, description=\"ID of the service account.\", required=True\n ),\n description=\"Look up a service account by ID.\",\n )\n\n user = graphene.Field(\n User,\n id=graphene.Argument(graphene.ID, description=\"ID of the user.\", required=True),\n description=\"Look up a user by ID.\",\n )\n\n def resolve_address_validation_rules(\n self, info, country_code, country_area=None, city=None, city_area=None\n ):\n return resolve_address_validation_rules(\n info,\n country_code,\n country_area=country_area,\n city=city,\n city_area=city_area,\n )\n\n @permission_required(AccountPermissions.MANAGE_SERVICE_ACCOUNTS)\n def resolve_service_accounts(self, info, **kwargs):\n return resolve_service_accounts(info, **kwargs)\n\n @permission_required(AccountPermissions.MANAGE_SERVICE_ACCOUNTS)\n def resolve_service_account(self, info, id):\n return graphene.Node.get_node_from_global_id(info, id, ServiceAccount)\n\n @permission_required(AccountPermissions.MANAGE_USERS)\n def resolve_customers(self, info, query=None, **kwargs):\n return resolve_customers(info, query=query, **kwargs)\n\n @login_required\n def resolve_me(self, info):\n return info.context.user\n\n @permission_required(AccountPermissions.MANAGE_STAFF)\n def resolve_staff_users(self, info, query=None, **kwargs):\n return resolve_staff_users(info, query=query, **kwargs)\n\n @one_of_permissions_required(\n [AccountPermissions.MANAGE_STAFF, AccountPermissions.MANAGE_USERS]\n )\n def resolve_user(self, info, id):\n return resolve_user(info, id)\n\n def resolve_address(self, info, id):\n return resolve_address(info, id)\n\n\nclass AccountMutations(graphene.ObjectType):\n # Base mutations\n request_password_reset = RequestPasswordReset.Field()\n set_password = SetPassword.Field()\n password_change = PasswordChange.Field()\n\n # Account mutations\n account_address_create = AccountAddressCreate.Field()\n account_address_update = AccountAddressUpdate.Field()\n account_address_delete = AccountAddressDelete.Field()\n account_set_default_address = AccountSetDefaultAddress.Field()\n\n account_register = AccountRegister.Field()\n account_update = AccountUpdate.Field()\n account_request_deletion = AccountRequestDeletion.Field()\n account_delete = AccountDelete.Field()\n\n account_update_meta = AccountUpdateMeta.Field()\n\n # Staff mutation\n address_create = AddressCreate.Field()\n address_update = AddressUpdate.Field()\n address_delete = AddressDelete.Field()\n address_set_default = AddressSetDefault.Field()\n\n customer_create = CustomerCreate.Field()\n customer_update = CustomerUpdate.Field()\n customer_delete = CustomerDelete.Field()\n customer_bulk_delete = CustomerBulkDelete.Field()\n\n staff_create = StaffCreate.Field()\n staff_update = StaffUpdate.Field()\n staff_delete = StaffDelete.Field()\n staff_bulk_delete = StaffBulkDelete.Field()\n\n user_avatar_update = UserAvatarUpdate.Field()\n user_avatar_delete = UserAvatarDelete.Field()\n user_bulk_set_active = UserBulkSetActive.Field()\n\n user_update_metadata = UserUpdateMeta.Field()\n user_clear_metadata = UserClearMeta.Field()\n\n user_update_private_metadata = UserUpdatePrivateMeta.Field()\n user_clear_private_metadata = UserClearPrivateMeta.Field()\n\n service_account_create = ServiceAccountCreate.Field()\n service_account_update = ServiceAccountUpdate.Field()\n service_account_delete = ServiceAccountDelete.Field()\n\n service_account_update_private_metadata = ServiceAccountUpdatePrivateMeta.Field()\n service_account_clear_private_metadata = ServiceAccountClearPrivateMeta.Field()\n\n service_account_token_create = ServiceAccountTokenCreate.Field()\n service_account_token_delete = ServiceAccountTokenDelete.Field()\n", "path": "saleor/graphql/account/schema.py"}]}
3,938
491
gh_patches_debug_11574
rasdani/github-patches
git_diff
sunpy__sunpy-5293
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provide an example of splitting sections of an attr query out of the Fido.search method. @Cadair's had this snippet of code ``` python import datetime from sunpy.net import vso from sunpy.time import parse_time # Start time and end time for the AIA search start = parse_time('2014/07/17T10:01:30') stop = start + datetime.timedelta(seconds=12) stop_hmi = start + datetime.timedelta(seconds=30) # Define two VSO Searches for the AIA data and the HMI data search_aia = (vso.attrs.Time(start, stop), vso.attrs.Instrument('AIA')) search_hmi = (vso.attrs.Time(start, stop_hmi), vso.attrs.Instrument('HMI'), vso.attrs.Physobs('LOS_magnetic_field')) # Create the VSO Client vsoClient = vso.VSOClient() # Query VSO for both searches using the or operator `|` results = vsoClient.query(search_aia | search_hmi) ``` That used to work but now I get this error. ``` python TypeError: unsupported operand type(s) for |: 'tuple' and 'tuple' ``` Should this operation be possible? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/acquiring_data/searching_vso.py` Content: ``` 1 """ 2 ====================================== 3 Searching and downloading from the VSO 4 ====================================== 5 6 How to download data from the VSO with Fido. 7 """ 8 import astropy.units as u 9 10 from sunpy.net import Fido 11 from sunpy.net import attrs as a 12 13 ############################################################################### 14 # `sunpy.net.Fido` is the primary interface to search for and download data and 15 # will search the VSO when appropriate. The following example searches for all 16 # SOHO/EIT images between the times defined below by defining a 17 # timerange (`~sunpy.net.attrs.Time`) and the instrument (`~sunpy.net.attrs.Instrument`). 18 19 attrs_time = a.Time('2005/01/01 00:10', '2005/01/01 00:15') 20 result = Fido.search(attrs_time, a.Instrument.eit) 21 22 ############################################################################### 23 # Let's inspect the results. 24 25 print(result) 26 27 ############################################################################### 28 # The following shows how to download the results. If we 29 # don't provide a path it will download the file into the sunpy data directory. 30 # The output provides the path of the downloaded files. 31 32 downloaded_files = Fido.fetch(result) 33 print(downloaded_files) 34 35 ############################################################################### 36 # More complicated queries can be constructed by using relational operators. 37 # For example, it is possible to query two wavelengths at the same time with 38 # the OR operator (|). 39 40 result = Fido.search(a.Time('2020/03/04 00:00', '2020/03/04 00:02'), 41 a.Instrument.aia, 42 a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom)) 43 print(result) 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/acquiring_data/searching_vso.py b/examples/acquiring_data/searching_vso.py --- a/examples/acquiring_data/searching_vso.py +++ b/examples/acquiring_data/searching_vso.py @@ -41,3 +41,15 @@ a.Instrument.aia, a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom)) print(result) + +############################################################################### +# We can even combine entire queries in this manner. +# Here we will define two searches for the AIA and HMI data. +# But unlike other examples, we have to ``&`` the individual queries. + +search_aia = (a.Time('2020/03/04 00:00', '2020/03/04 00:01') & a.Instrument.aia) +search_hmi = (a.Time('2020/03/04 00:00', '2020/03/04 00:01') + & a.Instrument.hmi & a.Physobs.los_magnetic_field) + +result = Fido.search(search_aia | search_hmi) +print(result)
{"golden_diff": "diff --git a/examples/acquiring_data/searching_vso.py b/examples/acquiring_data/searching_vso.py\n--- a/examples/acquiring_data/searching_vso.py\n+++ b/examples/acquiring_data/searching_vso.py\n@@ -41,3 +41,15 @@\n a.Instrument.aia,\n a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom))\n print(result)\n+\n+###############################################################################\n+# We can even combine entire queries in this manner.\n+# Here we will define two searches for the AIA and HMI data.\n+# But unlike other examples, we have to ``&`` the individual queries.\n+\n+search_aia = (a.Time('2020/03/04 00:00', '2020/03/04 00:01') & a.Instrument.aia)\n+search_hmi = (a.Time('2020/03/04 00:00', '2020/03/04 00:01')\n+ & a.Instrument.hmi & a.Physobs.los_magnetic_field)\n+\n+result = Fido.search(search_aia | search_hmi)\n+print(result)\n", "issue": "Provide an example of splitting sections of an attr query out of the Fido.search method.\n@Cadair's had this snippet of code\r\n\r\n``` python\r\nimport datetime\r\nfrom sunpy.net import vso\r\nfrom sunpy.time import parse_time\r\n\r\n# Start time and end time for the AIA search\r\nstart = parse_time('2014/07/17T10:01:30')\r\nstop = start + datetime.timedelta(seconds=12)\r\nstop_hmi = start + datetime.timedelta(seconds=30)\r\n\r\n# Define two VSO Searches for the AIA data and the HMI data\r\nsearch_aia = (vso.attrs.Time(start, stop), vso.attrs.Instrument('AIA'))\r\nsearch_hmi = (vso.attrs.Time(start, stop_hmi), vso.attrs.Instrument('HMI'),\r\n vso.attrs.Physobs('LOS_magnetic_field'))\r\n\r\n# Create the VSO Client\r\nvsoClient = vso.VSOClient()\r\n\r\n# Query VSO for both searches using the or operator `|`\r\nresults = vsoClient.query(search_aia | search_hmi)\r\n```\r\n\r\nThat used to work but now I get this error. \r\n\r\n``` python\r\nTypeError: unsupported operand type(s) for |: 'tuple' and 'tuple'\r\n```\r\n\r\nShould this operation be possible? \r\n\n", "before_files": [{"content": "\"\"\"\n======================================\nSearching and downloading from the VSO\n======================================\n\nHow to download data from the VSO with Fido.\n\"\"\"\nimport astropy.units as u\n\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\n\n###############################################################################\n# `sunpy.net.Fido` is the primary interface to search for and download data and\n# will search the VSO when appropriate. The following example searches for all\n# SOHO/EIT images between the times defined below by defining a\n# timerange (`~sunpy.net.attrs.Time`) and the instrument (`~sunpy.net.attrs.Instrument`).\n\nattrs_time = a.Time('2005/01/01 00:10', '2005/01/01 00:15')\nresult = Fido.search(attrs_time, a.Instrument.eit)\n\n###############################################################################\n# Let's inspect the results.\n\nprint(result)\n\n###############################################################################\n# The following shows how to download the results. If we\n# don't provide a path it will download the file into the sunpy data directory.\n# The output provides the path of the downloaded files.\n\ndownloaded_files = Fido.fetch(result)\nprint(downloaded_files)\n\n###############################################################################\n# More complicated queries can be constructed by using relational operators.\n# For example, it is possible to query two wavelengths at the same time with\n# the OR operator (|).\n\nresult = Fido.search(a.Time('2020/03/04 00:00', '2020/03/04 00:02'),\n a.Instrument.aia,\n a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom))\nprint(result)\n", "path": "examples/acquiring_data/searching_vso.py"}], "after_files": [{"content": "\"\"\"\n======================================\nSearching and downloading from the VSO\n======================================\n\nHow to download data from the VSO with Fido.\n\"\"\"\nimport astropy.units as u\n\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\n\n###############################################################################\n# `sunpy.net.Fido` is the primary interface to search for and download data and\n# will search the VSO when appropriate. The following example searches for all\n# SOHO/EIT images between the times defined below by defining a\n# timerange (`~sunpy.net.attrs.Time`) and the instrument (`~sunpy.net.attrs.Instrument`).\n\nattrs_time = a.Time('2005/01/01 00:10', '2005/01/01 00:15')\nresult = Fido.search(attrs_time, a.Instrument.eit)\n\n###############################################################################\n# Let's inspect the results.\n\nprint(result)\n\n###############################################################################\n# The following shows how to download the results. If we\n# don't provide a path it will download the file into the sunpy data directory.\n# The output provides the path of the downloaded files.\n\ndownloaded_files = Fido.fetch(result)\nprint(downloaded_files)\n\n###############################################################################\n# More complicated queries can be constructed by using relational operators.\n# For example, it is possible to query two wavelengths at the same time with\n# the OR operator (|).\n\nresult = Fido.search(a.Time('2020/03/04 00:00', '2020/03/04 00:02'),\n a.Instrument.aia,\n a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom))\nprint(result)\n\n###############################################################################\n# We can even combine entire queries in this manner.\n# Here we will define two searches for the AIA and HMI data.\n# But unlike other examples, we have to ``&`` the individual queries.\n\nsearch_aia = (a.Time('2020/03/04 00:00', '2020/03/04 00:01') & a.Instrument.aia)\nsearch_hmi = (a.Time('2020/03/04 00:00', '2020/03/04 00:01')\n & a.Instrument.hmi & a.Physobs.los_magnetic_field)\n\nresult = Fido.search(search_aia | search_hmi)\nprint(result)\n", "path": "examples/acquiring_data/searching_vso.py"}]}
995
274
gh_patches_debug_16113
rasdani/github-patches
git_diff
opendatacube__datacube-core-1450
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Distorted data along fractional Dask chunks when loading data with `dc.load` and Dask ### Expected behaviour When loading data with Open Data Cube, we expect to be able to use Dask to efficiently parallelise workflows and reduce peak memory usage. Part of using Dask is an implicit assumption that data loaded by Dask will be identical to data loaded without Dask. ### Actual behaviour There appears to be a bug in `datacube.load` that is causing data loaded with Dask to be distorted along edges of Dask chunks. We first noticed data being blurred/stretched along the right side of our data loads when loaded with Dask - see animation below which shows the effect along the right side of the image: ![dask_vs_nodask](https://github.com/opendatacube/datacube-core/assets/17680388/61d9b798-62af-42ee-b600-d4dc5a283be7) Looking into this deeper, it appears that if a non-"nearest" resampling method is used (e.g. "cubic", "bilinear") and a Dask chunk extends off the right side of a data load, the data from that chunk will be strongly distorted. For example, if `dask_chunks={"time": 1, "x": 2048, "y": 2048}` and our data load is 2100 pixels wide, the right-most 52 pixels will be affected. If Dask is not used, `dc.load` works perfectly. ### Steps to reproduce the behaviour We've put together a Jupyter Notebook with a reproducible example here - it includes several examples of the issue, including some visualisations of how much this affects loaded data: https://gist.github.com/robbibt/4d6905d5d13137bb55362c4a6f1c18b8 For example, here is the difference between data loaded with and without Dask - note the band of strong differences along the right-hand side of the image: ![image](https://github.com/opendatacube/datacube-core/assets/17680388/a9609c86-ff5e-451c-a78c-771c65bb346b) ### Environment information * Which ``datacube --version`` are you using? 1.8.12 * What datacube deployment/enviornment are you running against? Prod DEA Sandbox --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `datacube/storage/_read.py` Content: ``` 1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information 2 # 3 # Copyright (c) 2015-2023 ODC Contributors 4 # SPDX-License-Identifier: Apache-2.0 5 """ Dataset -> Raster 6 """ 7 from affine import Affine 8 import numpy as np 9 from typing import Optional, Tuple 10 11 from ..utils.math import is_almost_int, valid_mask 12 13 from ..utils.geometry import ( 14 roi_shape, 15 roi_is_empty, 16 roi_is_full, 17 roi_pad, 18 GeoBox, 19 w_, 20 warp_affine, 21 rio_reproject, 22 compute_reproject_roi) 23 24 from ..utils.geometry._warp import is_resampling_nn, Resampling, Nodata 25 from ..utils.geometry import gbox as gbx 26 27 28 def rdr_geobox(rdr) -> GeoBox: 29 """ Construct GeoBox from opened dataset reader. 30 """ 31 h, w = rdr.shape 32 return GeoBox(w, h, rdr.transform, rdr.crs) 33 34 35 def can_paste(rr, stol=1e-3, ttol=1e-2): 36 """ 37 Take result of compute_reproject_roi and check if can read(possibly with scale) and paste, 38 or do we need to read then reproject. 39 40 :returns: (True, None) if one can just read and paste 41 :returns: (False, Reason) if pasting is not possible, so need to reproject after reading 42 """ 43 if not rr.is_st: # not linear or not Scale + Translation 44 return False, "not ST" 45 46 scale = rr.scale 47 if not is_almost_int(scale, stol): # non-integer scaling 48 return False, "non-integer scale" 49 50 scale = np.round(scale) 51 A = rr.transform.linear # src -> dst 52 A = A*Affine.scale(scale, scale) # src.overview[scale] -> dst 53 54 (sx, _, tx, # tx, ty are in dst pixel space 55 _, sy, ty, 56 *_) = A 57 58 if any(abs(abs(s) - 1) > stol 59 for s in (sx, sy)): # not equal scaling across axis? 60 return False, "sx!=sy, probably" 61 62 ny, nx = (n/scale 63 for n in roi_shape(rr.roi_src)) 64 65 # src_roi doesn't divide by scale properly: 66 # example 3x7 scaled down by factor of 2 67 if not all(is_almost_int(n, stol) for n in (nx, ny)): 68 return False, "src_roi doesn't align for scale" 69 70 # TODO: probably need to deal with sub-pixel translation here, if we want 71 # to ignore sub-pixel translation and dst roi is 1 pixel bigger than src it 72 # should still be ok to paste after cropping dst roi by one pixel on the 73 # appropriate side. As it stands sub-pixel translation will be ignored only 74 # in some cases. 75 76 # scaled down shape doesn't match dst shape 77 s_shape = (int(ny), int(nx)) 78 if s_shape != roi_shape(rr.roi_dst): 79 return False, "src_roi/scale != dst_roi" 80 81 # final check: sub-pixel translation 82 if not all(is_almost_int(t, ttol) for t in (tx, ty)): 83 return False, "sub-pixel translation" 84 85 return True, None 86 87 88 def pick_read_scale(scale: float, rdr=None, tol=1e-3): 89 assert scale > 0 90 # First find nearest integer scale 91 # Scale down to nearest integer, unless we can scale up by less than tol 92 # 93 # 2.999999 -> 3 94 # 2.8 -> 2 95 # 0.3 -> 1 96 97 if scale < 1: 98 return 1 99 100 if is_almost_int(scale, tol): 101 scale = np.round(scale) 102 103 scale = int(scale) 104 105 if rdr is not None: 106 # TODO: check available overviews in rdr 107 pass 108 109 return scale 110 111 112 def read_time_slice(rdr, 113 dst: np.ndarray, 114 dst_gbox: GeoBox, 115 resampling: Resampling, 116 dst_nodata: Nodata, 117 extra_dim_index: Optional[int] = None) -> Tuple[slice, slice]: 118 """ From opened reader object read into `dst` 119 120 :returns: affected destination region 121 """ 122 assert dst.shape == dst_gbox.shape 123 src_gbox = rdr_geobox(rdr) 124 125 rr = compute_reproject_roi(src_gbox, dst_gbox) 126 127 if roi_is_empty(rr.roi_dst): 128 return rr.roi_dst 129 130 is_nn = is_resampling_nn(resampling) 131 scale = pick_read_scale(rr.scale, rdr) 132 133 paste_ok, _ = can_paste(rr, ttol=0.9 if is_nn else 0.01) 134 135 def norm_read_args(roi, shape, extra_dim_index): 136 if roi_is_full(roi, rdr.shape): 137 roi = None 138 139 if roi is None and shape == rdr.shape: 140 shape = None 141 142 w = w_[roi] 143 144 # Build 3D read window 145 # Note: Might be a good idea to natively support nD read windows. 146 if extra_dim_index is not None: 147 if w is None: 148 w = () 149 return (extra_dim_index,) + w, shape 150 else: 151 # 2D read window 152 return w, shape 153 154 if paste_ok: 155 A = rr.transform.linear 156 sx, sy = A.a, A.e 157 158 dst = dst[rr.roi_dst] 159 pix = rdr.read(*norm_read_args(rr.roi_src, dst.shape, extra_dim_index)) 160 161 if sx < 0: 162 pix = pix[:, ::-1] 163 if sy < 0: 164 pix = pix[::-1, :] 165 166 if rdr.nodata is None: 167 np.copyto(dst, pix) 168 else: 169 np.copyto(dst, pix, where=valid_mask(pix, rdr.nodata)) 170 else: 171 if rr.is_st: 172 # add padding on src/dst ROIs, it was set to tight bounds 173 # TODO: this should probably happen inside compute_reproject_roi 174 rr.roi_dst = roi_pad(rr.roi_dst, 1, dst_gbox.shape) 175 rr.roi_src = roi_pad(rr.roi_src, 1, src_gbox.shape) 176 177 dst = dst[rr.roi_dst] 178 dst_gbox = dst_gbox[rr.roi_dst] 179 src_gbox = src_gbox[rr.roi_src] 180 if scale > 1: 181 src_gbox = gbx.zoom_out(src_gbox, scale) 182 183 pix = rdr.read(*norm_read_args(rr.roi_src, src_gbox.shape, extra_dim_index)) 184 185 if rr.transform.linear is not None: 186 A = (~src_gbox.transform)*dst_gbox.transform 187 warp_affine(pix, dst, A, resampling, 188 src_nodata=rdr.nodata, dst_nodata=dst_nodata) 189 else: 190 rio_reproject(pix, dst, src_gbox, dst_gbox, resampling, 191 src_nodata=rdr.nodata, dst_nodata=dst_nodata) 192 193 return rr.roi_dst 194 195 196 def read_time_slice_v2(rdr, 197 dst_gbox: GeoBox, 198 resampling: Resampling, 199 dst_nodata: Nodata) -> Tuple[Optional[np.ndarray], 200 Tuple[slice, slice]]: 201 """ From opened reader object read into `dst` 202 203 :returns: pixels read and ROI of dst_gbox that was affected 204 """ 205 # pylint: disable=too-many-locals 206 src_gbox = rdr_geobox(rdr) 207 208 rr = compute_reproject_roi(src_gbox, dst_gbox) 209 210 if roi_is_empty(rr.roi_dst): 211 return None, rr.roi_dst 212 213 is_nn = is_resampling_nn(resampling) 214 scale = pick_read_scale(rr.scale, rdr) 215 216 paste_ok, _ = can_paste(rr, ttol=0.9 if is_nn else 0.01) 217 218 def norm_read_args(roi, shape): 219 if roi_is_full(roi, rdr.shape): 220 roi = None 221 222 if roi is None and shape == rdr.shape: 223 shape = None 224 225 return roi, shape 226 227 if paste_ok: 228 read_shape = roi_shape(rr.roi_dst) 229 A = rr.transform.linear 230 sx, sy = A.a, A.e 231 232 pix = rdr.read(*norm_read_args(rr.roi_src, read_shape)).result() 233 234 if sx < 0: 235 pix = pix[:, ::-1] 236 if sy < 0: 237 pix = pix[::-1, :] 238 239 # normalise nodata to be equal to `dst_nodata` 240 if rdr.nodata is not None and rdr.nodata != dst_nodata: 241 pix[pix == rdr.nodata] = dst_nodata 242 243 dst = pix 244 else: 245 if rr.is_st: 246 # add padding on src/dst ROIs, it was set to tight bounds 247 # TODO: this should probably happen inside compute_reproject_roi 248 rr.roi_dst = roi_pad(rr.roi_dst, 1, dst_gbox.shape) 249 rr.roi_src = roi_pad(rr.roi_src, 1, src_gbox.shape) 250 251 dst_gbox = dst_gbox[rr.roi_dst] 252 src_gbox = src_gbox[rr.roi_src] 253 if scale > 1: 254 src_gbox = gbx.zoom_out(src_gbox, scale) 255 256 dst = np.full(dst_gbox.shape, dst_nodata, dtype=rdr.dtype) 257 pix = rdr.read(*norm_read_args(rr.roi_src, src_gbox.shape)).result() 258 259 if rr.transform.linear is not None: 260 A = (~src_gbox.transform)*dst_gbox.transform 261 warp_affine(pix, dst, A, resampling, 262 src_nodata=rdr.nodata, dst_nodata=dst_nodata) 263 else: 264 rio_reproject(pix, dst, src_gbox, dst_gbox, resampling, 265 src_nodata=rdr.nodata, dst_nodata=dst_nodata) 266 267 return dst, rr.roi_dst 268 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/datacube/storage/_read.py b/datacube/storage/_read.py --- a/datacube/storage/_read.py +++ b/datacube/storage/_read.py @@ -129,6 +129,7 @@ is_nn = is_resampling_nn(resampling) scale = pick_read_scale(rr.scale, rdr) + scale_x, scale_y = [pick_read_scale(s) for s in rr.scale2] paste_ok, _ = can_paste(rr, ttol=0.9 if is_nn else 0.01) @@ -188,7 +189,8 @@ src_nodata=rdr.nodata, dst_nodata=dst_nodata) else: rio_reproject(pix, dst, src_gbox, dst_gbox, resampling, - src_nodata=rdr.nodata, dst_nodata=dst_nodata) + src_nodata=rdr.nodata, dst_nodata=dst_nodata, + XSCALE=scale_x, YSCALE=scale_y) return rr.roi_dst
{"golden_diff": "diff --git a/datacube/storage/_read.py b/datacube/storage/_read.py\n--- a/datacube/storage/_read.py\n+++ b/datacube/storage/_read.py\n@@ -129,6 +129,7 @@\n \n is_nn = is_resampling_nn(resampling)\n scale = pick_read_scale(rr.scale, rdr)\n+ scale_x, scale_y = [pick_read_scale(s) for s in rr.scale2]\n \n paste_ok, _ = can_paste(rr, ttol=0.9 if is_nn else 0.01)\n \n@@ -188,7 +189,8 @@\n src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n else:\n rio_reproject(pix, dst, src_gbox, dst_gbox, resampling,\n- src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n+ src_nodata=rdr.nodata, dst_nodata=dst_nodata,\n+ XSCALE=scale_x, YSCALE=scale_y)\n \n return rr.roi_dst\n", "issue": "Distorted data along fractional Dask chunks when loading data with `dc.load` and Dask\n### Expected behaviour\r\nWhen loading data with Open Data Cube, we expect to be able to use Dask to efficiently parallelise workflows and reduce peak memory usage. Part of using Dask is an implicit assumption that data loaded by Dask will be identical to data loaded without Dask.\r\n\r\n### Actual behaviour\r\nThere appears to be a bug in `datacube.load` that is causing data loaded with Dask to be distorted along edges of Dask chunks. We first noticed data being blurred/stretched along the right side of our data loads when loaded with Dask - see animation below which shows the effect along the right side of the image:\r\n \r\n![dask_vs_nodask](https://github.com/opendatacube/datacube-core/assets/17680388/61d9b798-62af-42ee-b600-d4dc5a283be7)\r\n\r\nLooking into this deeper, it appears that if a non-\"nearest\" resampling method is used (e.g. \"cubic\", \"bilinear\") and a Dask chunk extends off the right side of a data load, the data from that chunk will be strongly distorted. For example, if `dask_chunks={\"time\": 1, \"x\": 2048, \"y\": 2048}` and our data load is 2100 pixels wide, the right-most 52 pixels will be affected. If Dask is not used, `dc.load` works perfectly. \r\n\r\n### Steps to reproduce the behaviour\r\nWe've put together a Jupyter Notebook with a reproducible example here - it includes several examples of the issue, including some visualisations of how much this affects loaded data: https://gist.github.com/robbibt/4d6905d5d13137bb55362c4a6f1c18b8\r\n\r\nFor example, here is the difference between data loaded with and without Dask - note the band of strong differences along the right-hand side of the image:\r\n![image](https://github.com/opendatacube/datacube-core/assets/17680388/a9609c86-ff5e-451c-a78c-771c65bb346b)\r\n\r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using? 1.8.12\r\n* What datacube deployment/enviornment are you running against?\r\nProd DEA Sandbox\r\n\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\" Dataset -> Raster\n\"\"\"\nfrom affine import Affine\nimport numpy as np\nfrom typing import Optional, Tuple\n\nfrom ..utils.math import is_almost_int, valid_mask\n\nfrom ..utils.geometry import (\n roi_shape,\n roi_is_empty,\n roi_is_full,\n roi_pad,\n GeoBox,\n w_,\n warp_affine,\n rio_reproject,\n compute_reproject_roi)\n\nfrom ..utils.geometry._warp import is_resampling_nn, Resampling, Nodata\nfrom ..utils.geometry import gbox as gbx\n\n\ndef rdr_geobox(rdr) -> GeoBox:\n \"\"\" Construct GeoBox from opened dataset reader.\n \"\"\"\n h, w = rdr.shape\n return GeoBox(w, h, rdr.transform, rdr.crs)\n\n\ndef can_paste(rr, stol=1e-3, ttol=1e-2):\n \"\"\"\n Take result of compute_reproject_roi and check if can read(possibly with scale) and paste,\n or do we need to read then reproject.\n\n :returns: (True, None) if one can just read and paste\n :returns: (False, Reason) if pasting is not possible, so need to reproject after reading\n \"\"\"\n if not rr.is_st: # not linear or not Scale + Translation\n return False, \"not ST\"\n\n scale = rr.scale\n if not is_almost_int(scale, stol): # non-integer scaling\n return False, \"non-integer scale\"\n\n scale = np.round(scale)\n A = rr.transform.linear # src -> dst\n A = A*Affine.scale(scale, scale) # src.overview[scale] -> dst\n\n (sx, _, tx, # tx, ty are in dst pixel space\n _, sy, ty,\n *_) = A\n\n if any(abs(abs(s) - 1) > stol\n for s in (sx, sy)): # not equal scaling across axis?\n return False, \"sx!=sy, probably\"\n\n ny, nx = (n/scale\n for n in roi_shape(rr.roi_src))\n\n # src_roi doesn't divide by scale properly:\n # example 3x7 scaled down by factor of 2\n if not all(is_almost_int(n, stol) for n in (nx, ny)):\n return False, \"src_roi doesn't align for scale\"\n\n # TODO: probably need to deal with sub-pixel translation here, if we want\n # to ignore sub-pixel translation and dst roi is 1 pixel bigger than src it\n # should still be ok to paste after cropping dst roi by one pixel on the\n # appropriate side. As it stands sub-pixel translation will be ignored only\n # in some cases.\n\n # scaled down shape doesn't match dst shape\n s_shape = (int(ny), int(nx))\n if s_shape != roi_shape(rr.roi_dst):\n return False, \"src_roi/scale != dst_roi\"\n\n # final check: sub-pixel translation\n if not all(is_almost_int(t, ttol) for t in (tx, ty)):\n return False, \"sub-pixel translation\"\n\n return True, None\n\n\ndef pick_read_scale(scale: float, rdr=None, tol=1e-3):\n assert scale > 0\n # First find nearest integer scale\n # Scale down to nearest integer, unless we can scale up by less than tol\n #\n # 2.999999 -> 3\n # 2.8 -> 2\n # 0.3 -> 1\n\n if scale < 1:\n return 1\n\n if is_almost_int(scale, tol):\n scale = np.round(scale)\n\n scale = int(scale)\n\n if rdr is not None:\n # TODO: check available overviews in rdr\n pass\n\n return scale\n\n\ndef read_time_slice(rdr,\n dst: np.ndarray,\n dst_gbox: GeoBox,\n resampling: Resampling,\n dst_nodata: Nodata,\n extra_dim_index: Optional[int] = None) -> Tuple[slice, slice]:\n \"\"\" From opened reader object read into `dst`\n\n :returns: affected destination region\n \"\"\"\n assert dst.shape == dst_gbox.shape\n src_gbox = rdr_geobox(rdr)\n\n rr = compute_reproject_roi(src_gbox, dst_gbox)\n\n if roi_is_empty(rr.roi_dst):\n return rr.roi_dst\n\n is_nn = is_resampling_nn(resampling)\n scale = pick_read_scale(rr.scale, rdr)\n\n paste_ok, _ = can_paste(rr, ttol=0.9 if is_nn else 0.01)\n\n def norm_read_args(roi, shape, extra_dim_index):\n if roi_is_full(roi, rdr.shape):\n roi = None\n\n if roi is None and shape == rdr.shape:\n shape = None\n\n w = w_[roi]\n\n # Build 3D read window\n # Note: Might be a good idea to natively support nD read windows.\n if extra_dim_index is not None:\n if w is None:\n w = ()\n return (extra_dim_index,) + w, shape\n else:\n # 2D read window\n return w, shape\n\n if paste_ok:\n A = rr.transform.linear\n sx, sy = A.a, A.e\n\n dst = dst[rr.roi_dst]\n pix = rdr.read(*norm_read_args(rr.roi_src, dst.shape, extra_dim_index))\n\n if sx < 0:\n pix = pix[:, ::-1]\n if sy < 0:\n pix = pix[::-1, :]\n\n if rdr.nodata is None:\n np.copyto(dst, pix)\n else:\n np.copyto(dst, pix, where=valid_mask(pix, rdr.nodata))\n else:\n if rr.is_st:\n # add padding on src/dst ROIs, it was set to tight bounds\n # TODO: this should probably happen inside compute_reproject_roi\n rr.roi_dst = roi_pad(rr.roi_dst, 1, dst_gbox.shape)\n rr.roi_src = roi_pad(rr.roi_src, 1, src_gbox.shape)\n\n dst = dst[rr.roi_dst]\n dst_gbox = dst_gbox[rr.roi_dst]\n src_gbox = src_gbox[rr.roi_src]\n if scale > 1:\n src_gbox = gbx.zoom_out(src_gbox, scale)\n\n pix = rdr.read(*norm_read_args(rr.roi_src, src_gbox.shape, extra_dim_index))\n\n if rr.transform.linear is not None:\n A = (~src_gbox.transform)*dst_gbox.transform\n warp_affine(pix, dst, A, resampling,\n src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n else:\n rio_reproject(pix, dst, src_gbox, dst_gbox, resampling,\n src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n\n return rr.roi_dst\n\n\ndef read_time_slice_v2(rdr,\n dst_gbox: GeoBox,\n resampling: Resampling,\n dst_nodata: Nodata) -> Tuple[Optional[np.ndarray],\n Tuple[slice, slice]]:\n \"\"\" From opened reader object read into `dst`\n\n :returns: pixels read and ROI of dst_gbox that was affected\n \"\"\"\n # pylint: disable=too-many-locals\n src_gbox = rdr_geobox(rdr)\n\n rr = compute_reproject_roi(src_gbox, dst_gbox)\n\n if roi_is_empty(rr.roi_dst):\n return None, rr.roi_dst\n\n is_nn = is_resampling_nn(resampling)\n scale = pick_read_scale(rr.scale, rdr)\n\n paste_ok, _ = can_paste(rr, ttol=0.9 if is_nn else 0.01)\n\n def norm_read_args(roi, shape):\n if roi_is_full(roi, rdr.shape):\n roi = None\n\n if roi is None and shape == rdr.shape:\n shape = None\n\n return roi, shape\n\n if paste_ok:\n read_shape = roi_shape(rr.roi_dst)\n A = rr.transform.linear\n sx, sy = A.a, A.e\n\n pix = rdr.read(*norm_read_args(rr.roi_src, read_shape)).result()\n\n if sx < 0:\n pix = pix[:, ::-1]\n if sy < 0:\n pix = pix[::-1, :]\n\n # normalise nodata to be equal to `dst_nodata`\n if rdr.nodata is not None and rdr.nodata != dst_nodata:\n pix[pix == rdr.nodata] = dst_nodata\n\n dst = pix\n else:\n if rr.is_st:\n # add padding on src/dst ROIs, it was set to tight bounds\n # TODO: this should probably happen inside compute_reproject_roi\n rr.roi_dst = roi_pad(rr.roi_dst, 1, dst_gbox.shape)\n rr.roi_src = roi_pad(rr.roi_src, 1, src_gbox.shape)\n\n dst_gbox = dst_gbox[rr.roi_dst]\n src_gbox = src_gbox[rr.roi_src]\n if scale > 1:\n src_gbox = gbx.zoom_out(src_gbox, scale)\n\n dst = np.full(dst_gbox.shape, dst_nodata, dtype=rdr.dtype)\n pix = rdr.read(*norm_read_args(rr.roi_src, src_gbox.shape)).result()\n\n if rr.transform.linear is not None:\n A = (~src_gbox.transform)*dst_gbox.transform\n warp_affine(pix, dst, A, resampling,\n src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n else:\n rio_reproject(pix, dst, src_gbox, dst_gbox, resampling,\n src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n\n return dst, rr.roi_dst\n", "path": "datacube/storage/_read.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\" Dataset -> Raster\n\"\"\"\nfrom affine import Affine\nimport numpy as np\nfrom typing import Optional, Tuple\n\nfrom ..utils.math import is_almost_int, valid_mask\n\nfrom ..utils.geometry import (\n roi_shape,\n roi_is_empty,\n roi_is_full,\n roi_pad,\n GeoBox,\n w_,\n warp_affine,\n rio_reproject,\n compute_reproject_roi)\n\nfrom ..utils.geometry._warp import is_resampling_nn, Resampling, Nodata\nfrom ..utils.geometry import gbox as gbx\n\n\ndef rdr_geobox(rdr) -> GeoBox:\n \"\"\" Construct GeoBox from opened dataset reader.\n \"\"\"\n h, w = rdr.shape\n return GeoBox(w, h, rdr.transform, rdr.crs)\n\n\ndef can_paste(rr, stol=1e-3, ttol=1e-2):\n \"\"\"\n Take result of compute_reproject_roi and check if can read(possibly with scale) and paste,\n or do we need to read then reproject.\n\n :returns: (True, None) if one can just read and paste\n :returns: (False, Reason) if pasting is not possible, so need to reproject after reading\n \"\"\"\n if not rr.is_st: # not linear or not Scale + Translation\n return False, \"not ST\"\n\n scale = rr.scale\n if not is_almost_int(scale, stol): # non-integer scaling\n return False, \"non-integer scale\"\n\n scale = np.round(scale)\n A = rr.transform.linear # src -> dst\n A = A*Affine.scale(scale, scale) # src.overview[scale] -> dst\n\n (sx, _, tx, # tx, ty are in dst pixel space\n _, sy, ty,\n *_) = A\n\n if any(abs(abs(s) - 1) > stol\n for s in (sx, sy)): # not equal scaling across axis?\n return False, \"sx!=sy, probably\"\n\n ny, nx = (n/scale\n for n in roi_shape(rr.roi_src))\n\n # src_roi doesn't divide by scale properly:\n # example 3x7 scaled down by factor of 2\n if not all(is_almost_int(n, stol) for n in (nx, ny)):\n return False, \"src_roi doesn't align for scale\"\n\n # TODO: probably need to deal with sub-pixel translation here, if we want\n # to ignore sub-pixel translation and dst roi is 1 pixel bigger than src it\n # should still be ok to paste after cropping dst roi by one pixel on the\n # appropriate side. As it stands sub-pixel translation will be ignored only\n # in some cases.\n\n # scaled down shape doesn't match dst shape\n s_shape = (int(ny), int(nx))\n if s_shape != roi_shape(rr.roi_dst):\n return False, \"src_roi/scale != dst_roi\"\n\n # final check: sub-pixel translation\n if not all(is_almost_int(t, ttol) for t in (tx, ty)):\n return False, \"sub-pixel translation\"\n\n return True, None\n\n\ndef pick_read_scale(scale: float, rdr=None, tol=1e-3):\n assert scale > 0\n # First find nearest integer scale\n # Scale down to nearest integer, unless we can scale up by less than tol\n #\n # 2.999999 -> 3\n # 2.8 -> 2\n # 0.3 -> 1\n\n if scale < 1:\n return 1\n\n if is_almost_int(scale, tol):\n scale = np.round(scale)\n\n scale = int(scale)\n\n if rdr is not None:\n # TODO: check available overviews in rdr\n pass\n\n return scale\n\n\ndef read_time_slice(rdr,\n dst: np.ndarray,\n dst_gbox: GeoBox,\n resampling: Resampling,\n dst_nodata: Nodata,\n extra_dim_index: Optional[int] = None) -> Tuple[slice, slice]:\n \"\"\" From opened reader object read into `dst`\n\n :returns: affected destination region\n \"\"\"\n assert dst.shape == dst_gbox.shape\n src_gbox = rdr_geobox(rdr)\n\n rr = compute_reproject_roi(src_gbox, dst_gbox)\n\n if roi_is_empty(rr.roi_dst):\n return rr.roi_dst\n\n is_nn = is_resampling_nn(resampling)\n scale = pick_read_scale(rr.scale, rdr)\n scale_x, scale_y = [pick_read_scale(s) for s in rr.scale2]\n\n paste_ok, _ = can_paste(rr, ttol=0.9 if is_nn else 0.01)\n\n def norm_read_args(roi, shape, extra_dim_index):\n if roi_is_full(roi, rdr.shape):\n roi = None\n\n if roi is None and shape == rdr.shape:\n shape = None\n\n w = w_[roi]\n\n # Build 3D read window\n # Note: Might be a good idea to natively support nD read windows.\n if extra_dim_index is not None:\n if w is None:\n w = ()\n return (extra_dim_index,) + w, shape\n else:\n # 2D read window\n return w, shape\n\n if paste_ok:\n A = rr.transform.linear\n sx, sy = A.a, A.e\n\n dst = dst[rr.roi_dst]\n pix = rdr.read(*norm_read_args(rr.roi_src, dst.shape, extra_dim_index))\n\n if sx < 0:\n pix = pix[:, ::-1]\n if sy < 0:\n pix = pix[::-1, :]\n\n if rdr.nodata is None:\n np.copyto(dst, pix)\n else:\n np.copyto(dst, pix, where=valid_mask(pix, rdr.nodata))\n else:\n if rr.is_st:\n # add padding on src/dst ROIs, it was set to tight bounds\n # TODO: this should probably happen inside compute_reproject_roi\n rr.roi_dst = roi_pad(rr.roi_dst, 1, dst_gbox.shape)\n rr.roi_src = roi_pad(rr.roi_src, 1, src_gbox.shape)\n\n dst = dst[rr.roi_dst]\n dst_gbox = dst_gbox[rr.roi_dst]\n src_gbox = src_gbox[rr.roi_src]\n if scale > 1:\n src_gbox = gbx.zoom_out(src_gbox, scale)\n\n pix = rdr.read(*norm_read_args(rr.roi_src, src_gbox.shape, extra_dim_index))\n\n if rr.transform.linear is not None:\n A = (~src_gbox.transform)*dst_gbox.transform\n warp_affine(pix, dst, A, resampling,\n src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n else:\n rio_reproject(pix, dst, src_gbox, dst_gbox, resampling,\n src_nodata=rdr.nodata, dst_nodata=dst_nodata,\n XSCALE=scale_x, YSCALE=scale_y)\n\n return rr.roi_dst\n\n\ndef read_time_slice_v2(rdr,\n dst_gbox: GeoBox,\n resampling: Resampling,\n dst_nodata: Nodata) -> Tuple[Optional[np.ndarray],\n Tuple[slice, slice]]:\n \"\"\" From opened reader object read into `dst`\n\n :returns: pixels read and ROI of dst_gbox that was affected\n \"\"\"\n # pylint: disable=too-many-locals\n src_gbox = rdr_geobox(rdr)\n\n rr = compute_reproject_roi(src_gbox, dst_gbox)\n\n if roi_is_empty(rr.roi_dst):\n return None, rr.roi_dst\n\n is_nn = is_resampling_nn(resampling)\n scale = pick_read_scale(rr.scale, rdr)\n\n paste_ok, _ = can_paste(rr, ttol=0.9 if is_nn else 0.01)\n\n def norm_read_args(roi, shape):\n if roi_is_full(roi, rdr.shape):\n roi = None\n\n if roi is None and shape == rdr.shape:\n shape = None\n\n return roi, shape\n\n if paste_ok:\n read_shape = roi_shape(rr.roi_dst)\n A = rr.transform.linear\n sx, sy = A.a, A.e\n\n pix = rdr.read(*norm_read_args(rr.roi_src, read_shape)).result()\n\n if sx < 0:\n pix = pix[:, ::-1]\n if sy < 0:\n pix = pix[::-1, :]\n\n # normalise nodata to be equal to `dst_nodata`\n if rdr.nodata is not None and rdr.nodata != dst_nodata:\n pix[pix == rdr.nodata] = dst_nodata\n\n dst = pix\n else:\n if rr.is_st:\n # add padding on src/dst ROIs, it was set to tight bounds\n # TODO: this should probably happen inside compute_reproject_roi\n rr.roi_dst = roi_pad(rr.roi_dst, 1, dst_gbox.shape)\n rr.roi_src = roi_pad(rr.roi_src, 1, src_gbox.shape)\n\n dst_gbox = dst_gbox[rr.roi_dst]\n src_gbox = src_gbox[rr.roi_src]\n if scale > 1:\n src_gbox = gbx.zoom_out(src_gbox, scale)\n\n dst = np.full(dst_gbox.shape, dst_nodata, dtype=rdr.dtype)\n pix = rdr.read(*norm_read_args(rr.roi_src, src_gbox.shape)).result()\n\n if rr.transform.linear is not None:\n A = (~src_gbox.transform)*dst_gbox.transform\n warp_affine(pix, dst, A, resampling,\n src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n else:\n rio_reproject(pix, dst, src_gbox, dst_gbox, resampling,\n src_nodata=rdr.nodata, dst_nodata=dst_nodata)\n\n return dst, rr.roi_dst\n", "path": "datacube/storage/_read.py"}]}
3,795
235
gh_patches_debug_37534
rasdani/github-patches
git_diff
dbt-labs__dbt-core-8561
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [CT-2848] [Bug] `dbt show` adding decimal places to non-decimal values ### Is this a regression in a recent version of dbt-core? - [X] I believe this is a regression in dbt-core functionality - [X] I have searched the existing issues, and I could not find an existing issue for this regression ### Current Behavior `dbt show` is adding decimal places to numeric/integer values incorrectly. This is causing problems for users in the dbt Cloud IDE, as the IDE uses that command to render the preview tab. ### Expected/Previous Behavior Previously (we think!) this was not an issue, and decimal places were properly handled by dbt show ### Steps To Reproduce 1. use dbt-snowflake>=1.5.0 2. create a model in your project: ```sql # in my_model.sql select cast(1 as numeric) as _numeric, cast(1 as integer) as _integer, cast(1 as decimal) as _decimal ``` 3. run `dbt show -s my_model --output JSON` 4. get this! ```zsh ❯ dbt show -s my_model --output json 20:33:15 Running with dbt=1.5.3 20:33:16 Registered adapter: snowflake=1.5.2 20:33:16 Unable to do partial parsing because of a version mismatch 20:33:17 Found 3 models, 4 tests, 0 snapshots, 0 analyses, 436 macros, 0 operations, 0 seed files, 0 sources, 0 exposures, 0 metrics, 0 groups 20:33:17 20:33:18 Concurrency: 8 threads (target='dev') 20:33:18 20:33:19 { "node": "my_model", "show": [ { "_NUMERIC": 1.0, "_INTEGER": 1.0, "_DECIMAL": 1.0 } ] } ``` ### Relevant log output _No response_ ### Environment ```markdown - OS: mac - Python: Python 3.9.16 - dbt (working version): unknown - dbt (regression version): 1.5.3 ``` ### Which database adapter are you using with dbt? snowflake ### Additional Context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/dbt/clients/agate_helper.py` Content: ``` 1 from codecs import BOM_UTF8 2 3 import agate 4 import datetime 5 import isodate 6 import json 7 import dbt.utils 8 from typing import Iterable, List, Dict, Union, Optional, Any 9 10 from dbt.exceptions import DbtRuntimeError 11 12 BOM = BOM_UTF8.decode("utf-8") # '\ufeff' 13 14 15 class Number(agate.data_types.Number): 16 # undo the change in https://github.com/wireservice/agate/pull/733 17 # i.e. do not cast True and False to numeric 1 and 0 18 def cast(self, d): 19 if type(d) == bool: 20 raise agate.exceptions.CastError("Do not cast True to 1 or False to 0.") 21 else: 22 return super().cast(d) 23 24 25 class ISODateTime(agate.data_types.DateTime): 26 def cast(self, d): 27 # this is agate.data_types.DateTime.cast with the "clever" bits removed 28 # so we only handle ISO8601 stuff 29 if isinstance(d, datetime.datetime) or d is None: 30 return d 31 elif isinstance(d, datetime.date): 32 return datetime.datetime.combine(d, datetime.time(0, 0, 0)) 33 elif isinstance(d, str): 34 d = d.strip() 35 if d.lower() in self.null_values: 36 return None 37 try: 38 return isodate.parse_datetime(d) 39 except: # noqa 40 pass 41 42 raise agate.exceptions.CastError('Can not parse value "%s" as datetime.' % d) 43 44 45 def build_type_tester( 46 text_columns: Iterable[str], string_null_values: Optional[Iterable[str]] = ("null", "") 47 ) -> agate.TypeTester: 48 49 types = [ 50 Number(null_values=("null", "")), 51 agate.data_types.Date(null_values=("null", ""), date_format="%Y-%m-%d"), 52 agate.data_types.DateTime(null_values=("null", ""), datetime_format="%Y-%m-%d %H:%M:%S"), 53 ISODateTime(null_values=("null", "")), 54 agate.data_types.Boolean( 55 true_values=("true",), false_values=("false",), null_values=("null", "") 56 ), 57 agate.data_types.Text(null_values=string_null_values), 58 ] 59 force = {k: agate.data_types.Text(null_values=string_null_values) for k in text_columns} 60 return agate.TypeTester(force=force, types=types) 61 62 63 DEFAULT_TYPE_TESTER = build_type_tester(()) 64 65 66 def table_from_rows( 67 rows: List[Any], 68 column_names: Iterable[str], 69 text_only_columns: Optional[Iterable[str]] = None, 70 ) -> agate.Table: 71 if text_only_columns is None: 72 column_types = DEFAULT_TYPE_TESTER 73 else: 74 # If text_only_columns are present, prevent coercing empty string or 75 # literal 'null' strings to a None representation. 76 column_types = build_type_tester(text_only_columns, string_null_values=()) 77 78 return agate.Table(rows, column_names, column_types=column_types) 79 80 81 def table_from_data(data, column_names: Iterable[str]) -> agate.Table: 82 "Convert a list of dictionaries into an Agate table" 83 84 # The agate table is generated from a list of dicts, so the column order 85 # from `data` is not preserved. We can use `select` to reorder the columns 86 # 87 # If there is no data, create an empty table with the specified columns 88 89 if len(data) == 0: 90 return agate.Table([], column_names=column_names) 91 else: 92 table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER) 93 return table.select(column_names) 94 95 96 def table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table: 97 """ 98 Convert a list of dictionaries into an Agate table. This method does not 99 coerce string values into more specific types (eg. '005' will not be 100 coerced to '5'). Additionally, this method does not coerce values to 101 None (eg. '' or 'null' will retain their string literal representations). 102 """ 103 104 rows = [] 105 text_only_columns = set() 106 for _row in data: 107 row = [] 108 for col_name in column_names: 109 value = _row[col_name] 110 if isinstance(value, (dict, list, tuple)): 111 # Represent container types as json strings 112 value = json.dumps(value, cls=dbt.utils.JSONEncoder) 113 text_only_columns.add(col_name) 114 elif isinstance(value, str): 115 text_only_columns.add(col_name) 116 row.append(value) 117 118 rows.append(row) 119 120 return table_from_rows( 121 rows=rows, column_names=column_names, text_only_columns=text_only_columns 122 ) 123 124 125 def empty_table(): 126 "Returns an empty Agate table. To be used in place of None" 127 128 return agate.Table(rows=[]) 129 130 131 def as_matrix(table): 132 "Return an agate table as a matrix of data sans columns" 133 134 return [r.values() for r in table.rows.values()] 135 136 137 def from_csv(abspath, text_columns, delimiter=","): 138 type_tester = build_type_tester(text_columns=text_columns) 139 with open(abspath, encoding="utf-8") as fp: 140 if fp.read(1) != BOM: 141 fp.seek(0) 142 return agate.Table.from_csv(fp, column_types=type_tester, delimiter=delimiter) 143 144 145 class _NullMarker: 146 pass 147 148 149 NullableAgateType = Union[agate.data_types.DataType, _NullMarker] 150 151 152 class ColumnTypeBuilder(Dict[str, NullableAgateType]): 153 def __init__(self): 154 super().__init__() 155 156 def __setitem__(self, key, value): 157 if key not in self: 158 super().__setitem__(key, value) 159 return 160 161 existing_type = self[key] 162 if isinstance(existing_type, _NullMarker): 163 # overwrite 164 super().__setitem__(key, value) 165 elif isinstance(value, _NullMarker): 166 # use the existing value 167 return 168 elif not isinstance(value, type(existing_type)): 169 # actual type mismatch! 170 raise DbtRuntimeError( 171 f"Tables contain columns with the same names ({key}), " 172 f"but different types ({value} vs {existing_type})" 173 ) 174 175 def finalize(self) -> Dict[str, agate.data_types.DataType]: 176 result: Dict[str, agate.data_types.DataType] = {} 177 for key, value in self.items(): 178 if isinstance(value, _NullMarker): 179 # this is what agate would do. 180 result[key] = agate.data_types.Number() 181 else: 182 result[key] = value 183 return result 184 185 186 def _merged_column_types(tables: List[agate.Table]) -> Dict[str, agate.data_types.DataType]: 187 # this is a lot like agate.Table.merge, but with handling for all-null 188 # rows being "any type". 189 new_columns: ColumnTypeBuilder = ColumnTypeBuilder() 190 for table in tables: 191 for i in range(len(table.columns)): 192 column_name: str = table.column_names[i] 193 column_type: NullableAgateType = table.column_types[i] 194 # avoid over-sensitive type inference 195 if all(x is None for x in table.columns[column_name]): 196 column_type = _NullMarker() 197 new_columns[column_name] = column_type 198 199 return new_columns.finalize() 200 201 202 def merge_tables(tables: List[agate.Table]) -> agate.Table: 203 """This is similar to agate.Table.merge, but it handles rows of all 'null' 204 values more gracefully during merges. 205 """ 206 new_columns = _merged_column_types(tables) 207 column_names = tuple(new_columns.keys()) 208 column_types = tuple(new_columns.values()) 209 210 rows: List[agate.Row] = [] 211 for table in tables: 212 if table.column_names == column_names and table.column_types == column_types: 213 rows.extend(table.rows) 214 else: 215 for row in table.rows: 216 data = [row.get(name, None) for name in column_names] 217 rows.append(agate.Row(data, column_names)) 218 # _is_fork to tell agate that we already made things into `Row`s. 219 return agate.Table(rows, column_names, column_types, _is_fork=True) 220 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py --- a/core/dbt/clients/agate_helper.py +++ b/core/dbt/clients/agate_helper.py @@ -12,6 +12,20 @@ BOM = BOM_UTF8.decode("utf-8") # '\ufeff' +class Integer(agate.data_types.DataType): + def cast(self, d): + # by default agate will cast none as a Number + # but we need to cast it as an Integer to preserve + # the type when merging and unioning tables + if type(d) == int or d is None: + return d + else: + raise agate.exceptions.CastError('Can not parse value "%s" as Integer.' % d) + + def jsonify(self, d): + return d + + class Number(agate.data_types.Number): # undo the change in https://github.com/wireservice/agate/pull/733 # i.e. do not cast True and False to numeric 1 and 0 @@ -47,6 +61,7 @@ ) -> agate.TypeTester: types = [ + Integer(null_values=("null", "")), Number(null_values=("null", "")), agate.data_types.Date(null_values=("null", ""), date_format="%Y-%m-%d"), agate.data_types.DateTime(null_values=("null", ""), datetime_format="%Y-%m-%d %H:%M:%S"), @@ -165,6 +180,13 @@ elif isinstance(value, _NullMarker): # use the existing value return + # when one table column is Number while another is Integer, force the column to Number on merge + elif isinstance(value, Integer) and isinstance(existing_type, agate.data_types.Number): + # use the existing value + return + elif isinstance(existing_type, Integer) and isinstance(value, agate.data_types.Number): + # overwrite + super().__setitem__(key, value) elif not isinstance(value, type(existing_type)): # actual type mismatch! raise DbtRuntimeError( @@ -176,8 +198,9 @@ result: Dict[str, agate.data_types.DataType] = {} for key, value in self.items(): if isinstance(value, _NullMarker): - # this is what agate would do. - result[key] = agate.data_types.Number() + # agate would make it a Number but we'll make it Integer so that if this column + # gets merged with another Integer column, it won't get forced to a Number + result[key] = Integer() else: result[key] = value return result
{"golden_diff": "diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py\n--- a/core/dbt/clients/agate_helper.py\n+++ b/core/dbt/clients/agate_helper.py\n@@ -12,6 +12,20 @@\n BOM = BOM_UTF8.decode(\"utf-8\") # '\\ufeff'\n \n \n+class Integer(agate.data_types.DataType):\n+ def cast(self, d):\n+ # by default agate will cast none as a Number\n+ # but we need to cast it as an Integer to preserve\n+ # the type when merging and unioning tables\n+ if type(d) == int or d is None:\n+ return d\n+ else:\n+ raise agate.exceptions.CastError('Can not parse value \"%s\" as Integer.' % d)\n+\n+ def jsonify(self, d):\n+ return d\n+\n+\n class Number(agate.data_types.Number):\n # undo the change in https://github.com/wireservice/agate/pull/733\n # i.e. do not cast True and False to numeric 1 and 0\n@@ -47,6 +61,7 @@\n ) -> agate.TypeTester:\n \n types = [\n+ Integer(null_values=(\"null\", \"\")),\n Number(null_values=(\"null\", \"\")),\n agate.data_types.Date(null_values=(\"null\", \"\"), date_format=\"%Y-%m-%d\"),\n agate.data_types.DateTime(null_values=(\"null\", \"\"), datetime_format=\"%Y-%m-%d %H:%M:%S\"),\n@@ -165,6 +180,13 @@\n elif isinstance(value, _NullMarker):\n # use the existing value\n return\n+ # when one table column is Number while another is Integer, force the column to Number on merge\n+ elif isinstance(value, Integer) and isinstance(existing_type, agate.data_types.Number):\n+ # use the existing value\n+ return\n+ elif isinstance(existing_type, Integer) and isinstance(value, agate.data_types.Number):\n+ # overwrite\n+ super().__setitem__(key, value)\n elif not isinstance(value, type(existing_type)):\n # actual type mismatch!\n raise DbtRuntimeError(\n@@ -176,8 +198,9 @@\n result: Dict[str, agate.data_types.DataType] = {}\n for key, value in self.items():\n if isinstance(value, _NullMarker):\n- # this is what agate would do.\n- result[key] = agate.data_types.Number()\n+ # agate would make it a Number but we'll make it Integer so that if this column\n+ # gets merged with another Integer column, it won't get forced to a Number\n+ result[key] = Integer()\n else:\n result[key] = value\n return result\n", "issue": "[CT-2848] [Bug] `dbt show` adding decimal places to non-decimal values\n### Is this a regression in a recent version of dbt-core?\r\n\r\n- [X] I believe this is a regression in dbt-core functionality\r\n- [X] I have searched the existing issues, and I could not find an existing issue for this regression\r\n\r\n### Current Behavior\r\n\r\n`dbt show` is adding decimal places to numeric/integer values incorrectly. This is causing problems for users in the dbt Cloud IDE, as the IDE uses that command to render the preview tab. \r\n\r\n### Expected/Previous Behavior\r\n\r\nPreviously (we think!) this was not an issue, and decimal places were properly handled by dbt show\r\n\r\n### Steps To Reproduce\r\n\r\n1. use dbt-snowflake>=1.5.0\r\n2. create a model in your project:\r\n\r\n```sql\r\n# in my_model.sql\r\nselect\r\n cast(1 as numeric) as _numeric,\r\n cast(1 as integer) as _integer,\r\n cast(1 as decimal) as _decimal\r\n```\r\n3. run `dbt show -s my_model --output JSON`\r\n4. get this!\r\n```zsh\r\n\u276f dbt show -s my_model --output json\r\n20:33:15 Running with dbt=1.5.3\r\n20:33:16 Registered adapter: snowflake=1.5.2\r\n20:33:16 Unable to do partial parsing because of a version mismatch\r\n20:33:17 Found 3 models, 4 tests, 0 snapshots, 0 analyses, 436 macros, 0 operations, 0 seed files, 0 sources, 0 exposures, 0 metrics, 0 groups\r\n20:33:17 \r\n20:33:18 Concurrency: 8 threads (target='dev')\r\n20:33:18 \r\n20:33:19 {\r\n \"node\": \"my_model\",\r\n \"show\": [\r\n {\r\n \"_NUMERIC\": 1.0,\r\n \"_INTEGER\": 1.0,\r\n \"_DECIMAL\": 1.0\r\n }\r\n ]\r\n}\r\n```\r\n\r\n### Relevant log output\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: mac\r\n- Python: Python 3.9.16\r\n- dbt (working version): unknown\r\n- dbt (regression version): 1.5.3\r\n```\r\n\r\n\r\n### Which database adapter are you using with dbt?\r\n\r\nsnowflake\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "from codecs import BOM_UTF8\n\nimport agate\nimport datetime\nimport isodate\nimport json\nimport dbt.utils\nfrom typing import Iterable, List, Dict, Union, Optional, Any\n\nfrom dbt.exceptions import DbtRuntimeError\n\nBOM = BOM_UTF8.decode(\"utf-8\") # '\\ufeff'\n\n\nclass Number(agate.data_types.Number):\n # undo the change in https://github.com/wireservice/agate/pull/733\n # i.e. do not cast True and False to numeric 1 and 0\n def cast(self, d):\n if type(d) == bool:\n raise agate.exceptions.CastError(\"Do not cast True to 1 or False to 0.\")\n else:\n return super().cast(d)\n\n\nclass ISODateTime(agate.data_types.DateTime):\n def cast(self, d):\n # this is agate.data_types.DateTime.cast with the \"clever\" bits removed\n # so we only handle ISO8601 stuff\n if isinstance(d, datetime.datetime) or d is None:\n return d\n elif isinstance(d, datetime.date):\n return datetime.datetime.combine(d, datetime.time(0, 0, 0))\n elif isinstance(d, str):\n d = d.strip()\n if d.lower() in self.null_values:\n return None\n try:\n return isodate.parse_datetime(d)\n except: # noqa\n pass\n\n raise agate.exceptions.CastError('Can not parse value \"%s\" as datetime.' % d)\n\n\ndef build_type_tester(\n text_columns: Iterable[str], string_null_values: Optional[Iterable[str]] = (\"null\", \"\")\n) -> agate.TypeTester:\n\n types = [\n Number(null_values=(\"null\", \"\")),\n agate.data_types.Date(null_values=(\"null\", \"\"), date_format=\"%Y-%m-%d\"),\n agate.data_types.DateTime(null_values=(\"null\", \"\"), datetime_format=\"%Y-%m-%d %H:%M:%S\"),\n ISODateTime(null_values=(\"null\", \"\")),\n agate.data_types.Boolean(\n true_values=(\"true\",), false_values=(\"false\",), null_values=(\"null\", \"\")\n ),\n agate.data_types.Text(null_values=string_null_values),\n ]\n force = {k: agate.data_types.Text(null_values=string_null_values) for k in text_columns}\n return agate.TypeTester(force=force, types=types)\n\n\nDEFAULT_TYPE_TESTER = build_type_tester(())\n\n\ndef table_from_rows(\n rows: List[Any],\n column_names: Iterable[str],\n text_only_columns: Optional[Iterable[str]] = None,\n) -> agate.Table:\n if text_only_columns is None:\n column_types = DEFAULT_TYPE_TESTER\n else:\n # If text_only_columns are present, prevent coercing empty string or\n # literal 'null' strings to a None representation.\n column_types = build_type_tester(text_only_columns, string_null_values=())\n\n return agate.Table(rows, column_names, column_types=column_types)\n\n\ndef table_from_data(data, column_names: Iterable[str]) -> agate.Table:\n \"Convert a list of dictionaries into an Agate table\"\n\n # The agate table is generated from a list of dicts, so the column order\n # from `data` is not preserved. We can use `select` to reorder the columns\n #\n # If there is no data, create an empty table with the specified columns\n\n if len(data) == 0:\n return agate.Table([], column_names=column_names)\n else:\n table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)\n return table.select(column_names)\n\n\ndef table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table:\n \"\"\"\n Convert a list of dictionaries into an Agate table. This method does not\n coerce string values into more specific types (eg. '005' will not be\n coerced to '5'). Additionally, this method does not coerce values to\n None (eg. '' or 'null' will retain their string literal representations).\n \"\"\"\n\n rows = []\n text_only_columns = set()\n for _row in data:\n row = []\n for col_name in column_names:\n value = _row[col_name]\n if isinstance(value, (dict, list, tuple)):\n # Represent container types as json strings\n value = json.dumps(value, cls=dbt.utils.JSONEncoder)\n text_only_columns.add(col_name)\n elif isinstance(value, str):\n text_only_columns.add(col_name)\n row.append(value)\n\n rows.append(row)\n\n return table_from_rows(\n rows=rows, column_names=column_names, text_only_columns=text_only_columns\n )\n\n\ndef empty_table():\n \"Returns an empty Agate table. To be used in place of None\"\n\n return agate.Table(rows=[])\n\n\ndef as_matrix(table):\n \"Return an agate table as a matrix of data sans columns\"\n\n return [r.values() for r in table.rows.values()]\n\n\ndef from_csv(abspath, text_columns, delimiter=\",\"):\n type_tester = build_type_tester(text_columns=text_columns)\n with open(abspath, encoding=\"utf-8\") as fp:\n if fp.read(1) != BOM:\n fp.seek(0)\n return agate.Table.from_csv(fp, column_types=type_tester, delimiter=delimiter)\n\n\nclass _NullMarker:\n pass\n\n\nNullableAgateType = Union[agate.data_types.DataType, _NullMarker]\n\n\nclass ColumnTypeBuilder(Dict[str, NullableAgateType]):\n def __init__(self):\n super().__init__()\n\n def __setitem__(self, key, value):\n if key not in self:\n super().__setitem__(key, value)\n return\n\n existing_type = self[key]\n if isinstance(existing_type, _NullMarker):\n # overwrite\n super().__setitem__(key, value)\n elif isinstance(value, _NullMarker):\n # use the existing value\n return\n elif not isinstance(value, type(existing_type)):\n # actual type mismatch!\n raise DbtRuntimeError(\n f\"Tables contain columns with the same names ({key}), \"\n f\"but different types ({value} vs {existing_type})\"\n )\n\n def finalize(self) -> Dict[str, agate.data_types.DataType]:\n result: Dict[str, agate.data_types.DataType] = {}\n for key, value in self.items():\n if isinstance(value, _NullMarker):\n # this is what agate would do.\n result[key] = agate.data_types.Number()\n else:\n result[key] = value\n return result\n\n\ndef _merged_column_types(tables: List[agate.Table]) -> Dict[str, agate.data_types.DataType]:\n # this is a lot like agate.Table.merge, but with handling for all-null\n # rows being \"any type\".\n new_columns: ColumnTypeBuilder = ColumnTypeBuilder()\n for table in tables:\n for i in range(len(table.columns)):\n column_name: str = table.column_names[i]\n column_type: NullableAgateType = table.column_types[i]\n # avoid over-sensitive type inference\n if all(x is None for x in table.columns[column_name]):\n column_type = _NullMarker()\n new_columns[column_name] = column_type\n\n return new_columns.finalize()\n\n\ndef merge_tables(tables: List[agate.Table]) -> agate.Table:\n \"\"\"This is similar to agate.Table.merge, but it handles rows of all 'null'\n values more gracefully during merges.\n \"\"\"\n new_columns = _merged_column_types(tables)\n column_names = tuple(new_columns.keys())\n column_types = tuple(new_columns.values())\n\n rows: List[agate.Row] = []\n for table in tables:\n if table.column_names == column_names and table.column_types == column_types:\n rows.extend(table.rows)\n else:\n for row in table.rows:\n data = [row.get(name, None) for name in column_names]\n rows.append(agate.Row(data, column_names))\n # _is_fork to tell agate that we already made things into `Row`s.\n return agate.Table(rows, column_names, column_types, _is_fork=True)\n", "path": "core/dbt/clients/agate_helper.py"}], "after_files": [{"content": "from codecs import BOM_UTF8\n\nimport agate\nimport datetime\nimport isodate\nimport json\nimport dbt.utils\nfrom typing import Iterable, List, Dict, Union, Optional, Any\n\nfrom dbt.exceptions import DbtRuntimeError\n\nBOM = BOM_UTF8.decode(\"utf-8\") # '\\ufeff'\n\n\nclass Integer(agate.data_types.DataType):\n def cast(self, d):\n # by default agate will cast none as a Number\n # but we need to cast it as an Integer to preserve\n # the type when merging and unioning tables\n if type(d) == int or d is None:\n return d\n else:\n raise agate.exceptions.CastError('Can not parse value \"%s\" as Integer.' % d)\n\n def jsonify(self, d):\n return d\n\n\nclass Number(agate.data_types.Number):\n # undo the change in https://github.com/wireservice/agate/pull/733\n # i.e. do not cast True and False to numeric 1 and 0\n def cast(self, d):\n if type(d) == bool:\n raise agate.exceptions.CastError(\"Do not cast True to 1 or False to 0.\")\n else:\n return super().cast(d)\n\n\nclass ISODateTime(agate.data_types.DateTime):\n def cast(self, d):\n # this is agate.data_types.DateTime.cast with the \"clever\" bits removed\n # so we only handle ISO8601 stuff\n if isinstance(d, datetime.datetime) or d is None:\n return d\n elif isinstance(d, datetime.date):\n return datetime.datetime.combine(d, datetime.time(0, 0, 0))\n elif isinstance(d, str):\n d = d.strip()\n if d.lower() in self.null_values:\n return None\n try:\n return isodate.parse_datetime(d)\n except: # noqa\n pass\n\n raise agate.exceptions.CastError('Can not parse value \"%s\" as datetime.' % d)\n\n\ndef build_type_tester(\n text_columns: Iterable[str], string_null_values: Optional[Iterable[str]] = (\"null\", \"\")\n) -> agate.TypeTester:\n\n types = [\n Integer(null_values=(\"null\", \"\")),\n Number(null_values=(\"null\", \"\")),\n agate.data_types.Date(null_values=(\"null\", \"\"), date_format=\"%Y-%m-%d\"),\n agate.data_types.DateTime(null_values=(\"null\", \"\"), datetime_format=\"%Y-%m-%d %H:%M:%S\"),\n ISODateTime(null_values=(\"null\", \"\")),\n agate.data_types.Boolean(\n true_values=(\"true\",), false_values=(\"false\",), null_values=(\"null\", \"\")\n ),\n agate.data_types.Text(null_values=string_null_values),\n ]\n force = {k: agate.data_types.Text(null_values=string_null_values) for k in text_columns}\n return agate.TypeTester(force=force, types=types)\n\n\nDEFAULT_TYPE_TESTER = build_type_tester(())\n\n\ndef table_from_rows(\n rows: List[Any],\n column_names: Iterable[str],\n text_only_columns: Optional[Iterable[str]] = None,\n) -> agate.Table:\n if text_only_columns is None:\n column_types = DEFAULT_TYPE_TESTER\n else:\n # If text_only_columns are present, prevent coercing empty string or\n # literal 'null' strings to a None representation.\n column_types = build_type_tester(text_only_columns, string_null_values=())\n\n return agate.Table(rows, column_names, column_types=column_types)\n\n\ndef table_from_data(data, column_names: Iterable[str]) -> agate.Table:\n \"Convert a list of dictionaries into an Agate table\"\n\n # The agate table is generated from a list of dicts, so the column order\n # from `data` is not preserved. We can use `select` to reorder the columns\n #\n # If there is no data, create an empty table with the specified columns\n\n if len(data) == 0:\n return agate.Table([], column_names=column_names)\n else:\n table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)\n return table.select(column_names)\n\n\ndef table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table:\n \"\"\"\n Convert a list of dictionaries into an Agate table. This method does not\n coerce string values into more specific types (eg. '005' will not be\n coerced to '5'). Additionally, this method does not coerce values to\n None (eg. '' or 'null' will retain their string literal representations).\n \"\"\"\n\n rows = []\n text_only_columns = set()\n for _row in data:\n row = []\n for col_name in column_names:\n value = _row[col_name]\n if isinstance(value, (dict, list, tuple)):\n # Represent container types as json strings\n value = json.dumps(value, cls=dbt.utils.JSONEncoder)\n text_only_columns.add(col_name)\n elif isinstance(value, str):\n text_only_columns.add(col_name)\n row.append(value)\n\n rows.append(row)\n\n return table_from_rows(\n rows=rows, column_names=column_names, text_only_columns=text_only_columns\n )\n\n\ndef empty_table():\n \"Returns an empty Agate table. To be used in place of None\"\n\n return agate.Table(rows=[])\n\n\ndef as_matrix(table):\n \"Return an agate table as a matrix of data sans columns\"\n\n return [r.values() for r in table.rows.values()]\n\n\ndef from_csv(abspath, text_columns, delimiter=\",\"):\n type_tester = build_type_tester(text_columns=text_columns)\n with open(abspath, encoding=\"utf-8\") as fp:\n if fp.read(1) != BOM:\n fp.seek(0)\n return agate.Table.from_csv(fp, column_types=type_tester, delimiter=delimiter)\n\n\nclass _NullMarker:\n pass\n\n\nNullableAgateType = Union[agate.data_types.DataType, _NullMarker]\n\n\nclass ColumnTypeBuilder(Dict[str, NullableAgateType]):\n def __init__(self):\n super().__init__()\n\n def __setitem__(self, key, value):\n if key not in self:\n super().__setitem__(key, value)\n return\n\n existing_type = self[key]\n if isinstance(existing_type, _NullMarker):\n # overwrite\n super().__setitem__(key, value)\n elif isinstance(value, _NullMarker):\n # use the existing value\n return\n # when one table column is Number while another is Integer, force the column to Number on merge\n elif isinstance(value, Integer) and isinstance(existing_type, agate.data_types.Number):\n # use the existing value\n return\n elif isinstance(existing_type, Integer) and isinstance(value, agate.data_types.Number):\n # overwrite\n super().__setitem__(key, value)\n elif not isinstance(value, type(existing_type)):\n # actual type mismatch!\n raise DbtRuntimeError(\n f\"Tables contain columns with the same names ({key}), \"\n f\"but different types ({value} vs {existing_type})\"\n )\n\n def finalize(self) -> Dict[str, agate.data_types.DataType]:\n result: Dict[str, agate.data_types.DataType] = {}\n for key, value in self.items():\n if isinstance(value, _NullMarker):\n # agate would make it a Number but we'll make it Integer so that if this column\n # gets merged with another Integer column, it won't get forced to a Number\n result[key] = Integer()\n else:\n result[key] = value\n return result\n\n\ndef _merged_column_types(tables: List[agate.Table]) -> Dict[str, agate.data_types.DataType]:\n # this is a lot like agate.Table.merge, but with handling for all-null\n # rows being \"any type\".\n new_columns: ColumnTypeBuilder = ColumnTypeBuilder()\n for table in tables:\n for i in range(len(table.columns)):\n column_name: str = table.column_names[i]\n column_type: NullableAgateType = table.column_types[i]\n # avoid over-sensitive type inference\n if all(x is None for x in table.columns[column_name]):\n column_type = _NullMarker()\n new_columns[column_name] = column_type\n\n return new_columns.finalize()\n\n\ndef merge_tables(tables: List[agate.Table]) -> agate.Table:\n \"\"\"This is similar to agate.Table.merge, but it handles rows of all 'null'\n values more gracefully during merges.\n \"\"\"\n new_columns = _merged_column_types(tables)\n column_names = tuple(new_columns.keys())\n column_types = tuple(new_columns.values())\n\n rows: List[agate.Row] = []\n for table in tables:\n if table.column_names == column_names and table.column_types == column_types:\n rows.extend(table.rows)\n else:\n for row in table.rows:\n data = [row.get(name, None) for name in column_names]\n rows.append(agate.Row(data, column_names))\n # _is_fork to tell agate that we already made things into `Row`s.\n return agate.Table(rows, column_names, column_types, _is_fork=True)\n", "path": "core/dbt/clients/agate_helper.py"}]}
3,170
616
gh_patches_debug_27225
rasdani/github-patches
git_diff
getpelican__pelican-536
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pagination fails when files don't have an extension The pagination process assumes that files will have extensions (i.e. usually ".html"). This fails if the URL scheme for the site uses extension-less names. My settings.py has: ``` ARTICLE_URL = '{slug}' ARTICLE_SAVE_AS = '{slug}' AUTHOR_URL = 'author/{name}.html' CATEGORY_URL = 'category/{name}' CATEGORY_SAVE_AS = 'category/{name}' TAG_URL = 'tag/{name}' TAG_SAVE_AS = 'tag/{name}' ``` This means that, for example, the linux tag is listed at `http://example.com/tag/linux` -- note: no extension. Using commit 1580f7f (current master as of this writing), only the final page of tags is generated, because the same file is rewritten for each page because the name doesn't change. I have a patch for this and will submit a PR. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pelican/writers.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from __future__ import with_statement 3 4 import os 5 import re 6 import locale 7 import logging 8 9 from codecs import open 10 from functools import partial 11 from feedgenerator import Atom1Feed, Rss201rev2Feed 12 from jinja2 import Markup 13 from pelican.paginator import Paginator 14 from pelican.utils import get_relative_path, set_date_tzinfo 15 16 logger = logging.getLogger(__name__) 17 18 19 class Writer(object): 20 21 def __init__(self, output_path, settings=None): 22 self.output_path = output_path 23 self.reminder = dict() 24 self.settings = settings or {} 25 26 def _create_new_feed(self, feed_type, context): 27 feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed 28 sitename = Markup(context['SITENAME']).striptags() 29 feed = feed_class( 30 title=sitename, 31 link=(self.site_url + '/'), 32 feed_url=self.feed_url, 33 description=context.get('SITESUBTITLE', '')) 34 return feed 35 36 def _add_item_to_the_feed(self, feed, item): 37 38 title = Markup(item.title).striptags() 39 feed.add_item( 40 title=title, 41 link='%s/%s' % (self.site_url, item.url), 42 unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''), 43 item.date.date(), item.url), 44 description=item.content, 45 categories=item.tags if hasattr(item, 'tags') else None, 46 author_name=getattr(item, 'author', 'John Doe'), 47 pubdate=set_date_tzinfo(item.date, 48 self.settings.get('TIMEZONE', None))) 49 50 def write_feed(self, elements, context, filename=None, feed_type='atom'): 51 """Generate a feed with the list of articles provided 52 53 Return the feed. If no output_path or filename is specified, just 54 return the feed object. 55 56 :param elements: the articles to put on the feed. 57 :param context: the context to get the feed metadata. 58 :param filename: the filename to output. 59 :param feed_type: the feed type to use (atom or rss) 60 """ 61 old_locale = locale.setlocale(locale.LC_ALL) 62 locale.setlocale(locale.LC_ALL, 'C') 63 try: 64 self.site_url = context.get('SITEURL', get_relative_path(filename)) 65 self.feed_domain = context.get('FEED_DOMAIN') 66 self.feed_url = '%s/%s' % (self.feed_domain, filename) 67 68 feed = self._create_new_feed(feed_type, context) 69 70 max_items = len(elements) 71 if self.settings['FEED_MAX_ITEMS']: 72 max_items = min(self.settings['FEED_MAX_ITEMS'], max_items) 73 for i in xrange(max_items): 74 self._add_item_to_the_feed(feed, elements[i]) 75 76 if filename: 77 complete_path = os.path.join(self.output_path, filename) 78 try: 79 os.makedirs(os.path.dirname(complete_path)) 80 except Exception: 81 pass 82 fp = open(complete_path, 'w') 83 feed.write(fp, 'utf-8') 84 logger.info('writing %s' % complete_path) 85 86 fp.close() 87 return feed 88 finally: 89 locale.setlocale(locale.LC_ALL, old_locale) 90 91 def write_file(self, name, template, context, relative_urls=True, 92 paginated=None, **kwargs): 93 """Render the template and write the file. 94 95 :param name: name of the file to output 96 :param template: template to use to generate the content 97 :param context: dict to pass to the templates. 98 :param relative_urls: use relative urls or absolutes ones 99 :param paginated: dict of article list to paginate - must have the 100 same length (same list in different orders) 101 :param **kwargs: additional variables to pass to the templates 102 """ 103 104 if name is False: 105 return 106 elif not name: 107 # other stuff, just return for now 108 return 109 110 def _write_file(template, localcontext, output_path, name): 111 """Render the template write the file.""" 112 old_locale = locale.setlocale(locale.LC_ALL) 113 locale.setlocale(locale.LC_ALL, 'C') 114 try: 115 output = template.render(localcontext) 116 finally: 117 locale.setlocale(locale.LC_ALL, old_locale) 118 filename = os.sep.join((output_path, name)) 119 try: 120 os.makedirs(os.path.dirname(filename)) 121 except Exception: 122 pass 123 with open(filename, 'w', encoding='utf-8') as f: 124 f.write(output) 125 logger.info(u'writing %s' % filename) 126 127 localcontext = context.copy() 128 if relative_urls: 129 localcontext['SITEURL'] = get_relative_path(name) 130 131 localcontext.update(kwargs) 132 if relative_urls: 133 self.update_context_contents(name, localcontext) 134 135 # check paginated 136 paginated = paginated or {} 137 if paginated: 138 # pagination needed, init paginators 139 paginators = {} 140 for key in paginated.iterkeys(): 141 object_list = paginated[key] 142 143 if self.settings.get('DEFAULT_PAGINATION'): 144 paginators[key] = Paginator(object_list, 145 self.settings.get('DEFAULT_PAGINATION'), 146 self.settings.get('DEFAULT_ORPHANS')) 147 else: 148 paginators[key] = Paginator(object_list, len(object_list)) 149 150 # generated pages, and write 151 for page_num in range(paginators.values()[0].num_pages): 152 paginated_localcontext = localcontext.copy() 153 paginated_name = name 154 for key in paginators.iterkeys(): 155 paginator = paginators[key] 156 page = paginator.page(page_num + 1) 157 paginated_localcontext.update( 158 {'%s_paginator' % key: paginator, 159 '%s_page' % key: page}) 160 if page_num > 0: 161 ext = '.' + paginated_name.rsplit('.')[-1] 162 paginated_name = paginated_name.replace(ext, 163 '%s%s' % (page_num + 1, ext)) 164 165 _write_file(template, paginated_localcontext, self.output_path, 166 paginated_name) 167 else: 168 # no pagination 169 _write_file(template, localcontext, self.output_path, name) 170 171 def update_context_contents(self, name, context): 172 """Recursively run the context to find elements (articles, pages, etc) 173 whose content getter needs to be modified in order to deal with 174 relative paths. 175 176 :param name: name of the file to output. 177 :param context: dict that will be passed to the templates, which need 178 to be updated. 179 """ 180 def _update_content(name, input): 181 """Change all the relatives paths of the input content to relatives 182 paths suitable fot the ouput content 183 184 :param name: path of the output. 185 :param input: input resource that will be passed to the templates. 186 """ 187 content = input._content 188 189 hrefs = re.compile(r""" 190 (?P<markup><\s*[^\>]* # match tag with src and href attr 191 (?:href|src)\s*=\s* 192 ) 193 (?P<quote>["\']) # require value to be quoted 194 (?![#?]) # don't match fragment or query URLs 195 (?![a-z]+:) # don't match protocol URLS 196 (?P<path>.*?) # the url value 197 \2""", re.X) 198 199 def replacer(m): 200 relative_path = m.group('path') 201 dest_path = os.path.normpath( 202 os.sep.join((get_relative_path(name), "static", 203 relative_path))) 204 205 return m.group('markup') + m.group('quote') + dest_path \ 206 + m.group('quote') 207 208 return hrefs.sub(replacer, content) 209 210 if context is None: 211 return 212 if hasattr(context, 'values'): 213 context = context.values() 214 215 for item in context: 216 # run recursively on iterables 217 if hasattr(item, '__iter__'): 218 self.update_context_contents(name, item) 219 220 # if it is a content, patch it 221 elif hasattr(item, '_content'): 222 relative_path = get_relative_path(name) 223 224 paths = self.reminder.setdefault(item, []) 225 if relative_path not in paths: 226 paths.append(relative_path) 227 setattr(item, "_get_content", 228 partial(_update_content, name, item)) 229 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -148,9 +148,9 @@ paginators[key] = Paginator(object_list, len(object_list)) # generated pages, and write + name_root, ext = os.path.splitext(name) for page_num in range(paginators.values()[0].num_pages): paginated_localcontext = localcontext.copy() - paginated_name = name for key in paginators.iterkeys(): paginator = paginators[key] page = paginator.page(page_num + 1) @@ -158,9 +158,10 @@ {'%s_paginator' % key: paginator, '%s_page' % key: page}) if page_num > 0: - ext = '.' + paginated_name.rsplit('.')[-1] - paginated_name = paginated_name.replace(ext, - '%s%s' % (page_num + 1, ext)) + paginated_name = '%s%s%s' % ( + name_root, page_num + 1, ext) + else: + paginated_name = name _write_file(template, paginated_localcontext, self.output_path, paginated_name)
{"golden_diff": "diff --git a/pelican/writers.py b/pelican/writers.py\n--- a/pelican/writers.py\n+++ b/pelican/writers.py\n@@ -148,9 +148,9 @@\n paginators[key] = Paginator(object_list, len(object_list))\n \n # generated pages, and write\n+ name_root, ext = os.path.splitext(name)\n for page_num in range(paginators.values()[0].num_pages):\n paginated_localcontext = localcontext.copy()\n- paginated_name = name\n for key in paginators.iterkeys():\n paginator = paginators[key]\n page = paginator.page(page_num + 1)\n@@ -158,9 +158,10 @@\n {'%s_paginator' % key: paginator,\n '%s_page' % key: page})\n if page_num > 0:\n- ext = '.' + paginated_name.rsplit('.')[-1]\n- paginated_name = paginated_name.replace(ext,\n- '%s%s' % (page_num + 1, ext))\n+ paginated_name = '%s%s%s' % (\n+ name_root, page_num + 1, ext)\n+ else:\n+ paginated_name = name\n \n _write_file(template, paginated_localcontext, self.output_path,\n paginated_name)\n", "issue": "Pagination fails when files don't have an extension\nThe pagination process assumes that files will have extensions (i.e. usually \".html\"). This fails if the URL scheme for the site uses extension-less names. My settings.py has:\n\n```\nARTICLE_URL = '{slug}'\nARTICLE_SAVE_AS = '{slug}'\nAUTHOR_URL = 'author/{name}.html'\nCATEGORY_URL = 'category/{name}'\nCATEGORY_SAVE_AS = 'category/{name}'\nTAG_URL = 'tag/{name}'\nTAG_SAVE_AS = 'tag/{name}'\n```\n\nThis means that, for example, the linux tag is listed at `http://example.com/tag/linux` -- note: no extension. Using commit 1580f7f (current master as of this writing), only the final page of tags is generated, because the same file is rewritten for each page because the name doesn't change.\n\nI have a patch for this and will submit a PR.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import with_statement\n\nimport os\nimport re\nimport locale\nimport logging\n\nfrom codecs import open\nfrom functools import partial\nfrom feedgenerator import Atom1Feed, Rss201rev2Feed\nfrom jinja2 import Markup\nfrom pelican.paginator import Paginator\nfrom pelican.utils import get_relative_path, set_date_tzinfo\n\nlogger = logging.getLogger(__name__)\n\n\nclass Writer(object):\n\n def __init__(self, output_path, settings=None):\n self.output_path = output_path\n self.reminder = dict()\n self.settings = settings or {}\n\n def _create_new_feed(self, feed_type, context):\n feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed\n sitename = Markup(context['SITENAME']).striptags()\n feed = feed_class(\n title=sitename,\n link=(self.site_url + '/'),\n feed_url=self.feed_url,\n description=context.get('SITESUBTITLE', ''))\n return feed\n\n def _add_item_to_the_feed(self, feed, item):\n\n title = Markup(item.title).striptags()\n feed.add_item(\n title=title,\n link='%s/%s' % (self.site_url, item.url),\n unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''),\n item.date.date(), item.url),\n description=item.content,\n categories=item.tags if hasattr(item, 'tags') else None,\n author_name=getattr(item, 'author', 'John Doe'),\n pubdate=set_date_tzinfo(item.date,\n self.settings.get('TIMEZONE', None)))\n\n def write_feed(self, elements, context, filename=None, feed_type='atom'):\n \"\"\"Generate a feed with the list of articles provided\n\n Return the feed. If no output_path or filename is specified, just\n return the feed object.\n\n :param elements: the articles to put on the feed.\n :param context: the context to get the feed metadata.\n :param filename: the filename to output.\n :param feed_type: the feed type to use (atom or rss)\n \"\"\"\n old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, 'C')\n try:\n self.site_url = context.get('SITEURL', get_relative_path(filename))\n self.feed_domain = context.get('FEED_DOMAIN')\n self.feed_url = '%s/%s' % (self.feed_domain, filename)\n\n feed = self._create_new_feed(feed_type, context)\n\n max_items = len(elements)\n if self.settings['FEED_MAX_ITEMS']:\n max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)\n for i in xrange(max_items):\n self._add_item_to_the_feed(feed, elements[i])\n\n if filename:\n complete_path = os.path.join(self.output_path, filename)\n try:\n os.makedirs(os.path.dirname(complete_path))\n except Exception:\n pass\n fp = open(complete_path, 'w')\n feed.write(fp, 'utf-8')\n logger.info('writing %s' % complete_path)\n\n fp.close()\n return feed\n finally:\n locale.setlocale(locale.LC_ALL, old_locale)\n\n def write_file(self, name, template, context, relative_urls=True,\n paginated=None, **kwargs):\n \"\"\"Render the template and write the file.\n\n :param name: name of the file to output\n :param template: template to use to generate the content\n :param context: dict to pass to the templates.\n :param relative_urls: use relative urls or absolutes ones\n :param paginated: dict of article list to paginate - must have the\n same length (same list in different orders)\n :param **kwargs: additional variables to pass to the templates\n \"\"\"\n\n if name is False:\n return\n elif not name:\n # other stuff, just return for now\n return\n\n def _write_file(template, localcontext, output_path, name):\n \"\"\"Render the template write the file.\"\"\"\n old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, 'C')\n try:\n output = template.render(localcontext)\n finally:\n locale.setlocale(locale.LC_ALL, old_locale)\n filename = os.sep.join((output_path, name))\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception:\n pass\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(output)\n logger.info(u'writing %s' % filename)\n\n localcontext = context.copy()\n if relative_urls:\n localcontext['SITEURL'] = get_relative_path(name)\n\n localcontext.update(kwargs)\n if relative_urls:\n self.update_context_contents(name, localcontext)\n\n # check paginated\n paginated = paginated or {}\n if paginated:\n # pagination needed, init paginators\n paginators = {}\n for key in paginated.iterkeys():\n object_list = paginated[key]\n\n if self.settings.get('DEFAULT_PAGINATION'):\n paginators[key] = Paginator(object_list,\n self.settings.get('DEFAULT_PAGINATION'),\n self.settings.get('DEFAULT_ORPHANS'))\n else:\n paginators[key] = Paginator(object_list, len(object_list))\n\n # generated pages, and write\n for page_num in range(paginators.values()[0].num_pages):\n paginated_localcontext = localcontext.copy()\n paginated_name = name\n for key in paginators.iterkeys():\n paginator = paginators[key]\n page = paginator.page(page_num + 1)\n paginated_localcontext.update(\n {'%s_paginator' % key: paginator,\n '%s_page' % key: page})\n if page_num > 0:\n ext = '.' + paginated_name.rsplit('.')[-1]\n paginated_name = paginated_name.replace(ext,\n '%s%s' % (page_num + 1, ext))\n\n _write_file(template, paginated_localcontext, self.output_path,\n paginated_name)\n else:\n # no pagination\n _write_file(template, localcontext, self.output_path, name)\n\n def update_context_contents(self, name, context):\n \"\"\"Recursively run the context to find elements (articles, pages, etc)\n whose content getter needs to be modified in order to deal with\n relative paths.\n\n :param name: name of the file to output.\n :param context: dict that will be passed to the templates, which need\n to be updated.\n \"\"\"\n def _update_content(name, input):\n \"\"\"Change all the relatives paths of the input content to relatives\n paths suitable fot the ouput content\n\n :param name: path of the output.\n :param input: input resource that will be passed to the templates.\n \"\"\"\n content = input._content\n\n hrefs = re.compile(r\"\"\"\n (?P<markup><\\s*[^\\>]* # match tag with src and href attr\n (?:href|src)\\s*=\\s*\n )\n (?P<quote>[\"\\']) # require value to be quoted\n (?![#?]) # don't match fragment or query URLs\n (?![a-z]+:) # don't match protocol URLS\n (?P<path>.*?) # the url value\n \\2\"\"\", re.X)\n\n def replacer(m):\n relative_path = m.group('path')\n dest_path = os.path.normpath(\n os.sep.join((get_relative_path(name), \"static\",\n relative_path)))\n\n return m.group('markup') + m.group('quote') + dest_path \\\n + m.group('quote')\n\n return hrefs.sub(replacer, content)\n\n if context is None:\n return\n if hasattr(context, 'values'):\n context = context.values()\n\n for item in context:\n # run recursively on iterables\n if hasattr(item, '__iter__'):\n self.update_context_contents(name, item)\n\n # if it is a content, patch it\n elif hasattr(item, '_content'):\n relative_path = get_relative_path(name)\n\n paths = self.reminder.setdefault(item, [])\n if relative_path not in paths:\n paths.append(relative_path)\n setattr(item, \"_get_content\",\n partial(_update_content, name, item))\n", "path": "pelican/writers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import with_statement\n\nimport os\nimport re\nimport locale\nimport logging\n\nfrom codecs import open\nfrom functools import partial\nfrom feedgenerator import Atom1Feed, Rss201rev2Feed\nfrom jinja2 import Markup\nfrom pelican.paginator import Paginator\nfrom pelican.utils import get_relative_path, set_date_tzinfo\n\nlogger = logging.getLogger(__name__)\n\n\nclass Writer(object):\n\n def __init__(self, output_path, settings=None):\n self.output_path = output_path\n self.reminder = dict()\n self.settings = settings or {}\n\n def _create_new_feed(self, feed_type, context):\n feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed\n sitename = Markup(context['SITENAME']).striptags()\n feed = feed_class(\n title=sitename,\n link=(self.site_url + '/'),\n feed_url=self.feed_url,\n description=context.get('SITESUBTITLE', ''))\n return feed\n\n def _add_item_to_the_feed(self, feed, item):\n\n title = Markup(item.title).striptags()\n feed.add_item(\n title=title,\n link='%s/%s' % (self.site_url, item.url),\n unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''),\n item.date.date(), item.url),\n description=item.content,\n categories=item.tags if hasattr(item, 'tags') else None,\n author_name=getattr(item, 'author', 'John Doe'),\n pubdate=set_date_tzinfo(item.date,\n self.settings.get('TIMEZONE', None)))\n\n def write_feed(self, elements, context, filename=None, feed_type='atom'):\n \"\"\"Generate a feed with the list of articles provided\n\n Return the feed. If no output_path or filename is specified, just\n return the feed object.\n\n :param elements: the articles to put on the feed.\n :param context: the context to get the feed metadata.\n :param filename: the filename to output.\n :param feed_type: the feed type to use (atom or rss)\n \"\"\"\n old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, 'C')\n try:\n self.site_url = context.get('SITEURL', get_relative_path(filename))\n self.feed_domain = context.get('FEED_DOMAIN')\n self.feed_url = '%s/%s' % (self.feed_domain, filename)\n\n feed = self._create_new_feed(feed_type, context)\n\n max_items = len(elements)\n if self.settings['FEED_MAX_ITEMS']:\n max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)\n for i in xrange(max_items):\n self._add_item_to_the_feed(feed, elements[i])\n\n if filename:\n complete_path = os.path.join(self.output_path, filename)\n try:\n os.makedirs(os.path.dirname(complete_path))\n except Exception:\n pass\n fp = open(complete_path, 'w')\n feed.write(fp, 'utf-8')\n logger.info('writing %s' % complete_path)\n\n fp.close()\n return feed\n finally:\n locale.setlocale(locale.LC_ALL, old_locale)\n\n def write_file(self, name, template, context, relative_urls=True,\n paginated=None, **kwargs):\n \"\"\"Render the template and write the file.\n\n :param name: name of the file to output\n :param template: template to use to generate the content\n :param context: dict to pass to the templates.\n :param relative_urls: use relative urls or absolutes ones\n :param paginated: dict of article list to paginate - must have the\n same length (same list in different orders)\n :param **kwargs: additional variables to pass to the templates\n \"\"\"\n\n if name is False:\n return\n elif not name:\n # other stuff, just return for now\n return\n\n def _write_file(template, localcontext, output_path, name):\n \"\"\"Render the template write the file.\"\"\"\n old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, 'C')\n try:\n output = template.render(localcontext)\n finally:\n locale.setlocale(locale.LC_ALL, old_locale)\n filename = os.sep.join((output_path, name))\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception:\n pass\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(output)\n logger.info(u'writing %s' % filename)\n\n localcontext = context.copy()\n if relative_urls:\n localcontext['SITEURL'] = get_relative_path(name)\n\n localcontext.update(kwargs)\n if relative_urls:\n self.update_context_contents(name, localcontext)\n\n # check paginated\n paginated = paginated or {}\n if paginated:\n # pagination needed, init paginators\n paginators = {}\n for key in paginated.iterkeys():\n object_list = paginated[key]\n\n if self.settings.get('DEFAULT_PAGINATION'):\n paginators[key] = Paginator(object_list,\n self.settings.get('DEFAULT_PAGINATION'),\n self.settings.get('DEFAULT_ORPHANS'))\n else:\n paginators[key] = Paginator(object_list, len(object_list))\n\n # generated pages, and write\n name_root, ext = os.path.splitext(name)\n for page_num in range(paginators.values()[0].num_pages):\n paginated_localcontext = localcontext.copy()\n for key in paginators.iterkeys():\n paginator = paginators[key]\n page = paginator.page(page_num + 1)\n paginated_localcontext.update(\n {'%s_paginator' % key: paginator,\n '%s_page' % key: page})\n if page_num > 0:\n paginated_name = '%s%s%s' % (\n name_root, page_num + 1, ext)\n else:\n paginated_name = name\n\n _write_file(template, paginated_localcontext, self.output_path,\n paginated_name)\n else:\n # no pagination\n _write_file(template, localcontext, self.output_path, name)\n\n def update_context_contents(self, name, context):\n \"\"\"Recursively run the context to find elements (articles, pages, etc)\n whose content getter needs to be modified in order to deal with\n relative paths.\n\n :param name: name of the file to output.\n :param context: dict that will be passed to the templates, which need\n to be updated.\n \"\"\"\n def _update_content(name, input):\n \"\"\"Change all the relatives paths of the input content to relatives\n paths suitable fot the ouput content\n\n :param name: path of the output.\n :param input: input resource that will be passed to the templates.\n \"\"\"\n content = input._content\n\n hrefs = re.compile(r\"\"\"\n (?P<markup><\\s*[^\\>]* # match tag with src and href attr\n (?:href|src)\\s*=\\s*\n )\n (?P<quote>[\"\\']) # require value to be quoted\n (?![#?]) # don't match fragment or query URLs\n (?![a-z]+:) # don't match protocol URLS\n (?P<path>.*?) # the url value\n \\2\"\"\", re.X)\n\n def replacer(m):\n relative_path = m.group('path')\n dest_path = os.path.normpath(\n os.sep.join((get_relative_path(name), \"static\",\n relative_path)))\n\n return m.group('markup') + m.group('quote') + dest_path \\\n + m.group('quote')\n\n return hrefs.sub(replacer, content)\n\n if context is None:\n return\n if hasattr(context, 'values'):\n context = context.values()\n\n for item in context:\n # run recursively on iterables\n if hasattr(item, '__iter__'):\n self.update_context_contents(name, item)\n\n # if it is a content, patch it\n elif hasattr(item, '_content'):\n relative_path = get_relative_path(name)\n\n paths = self.reminder.setdefault(item, [])\n if relative_path not in paths:\n paths.append(relative_path)\n setattr(item, \"_get_content\",\n partial(_update_content, name, item))\n", "path": "pelican/writers.py"}]}
2,870
296
gh_patches_debug_2247
rasdani/github-patches
git_diff
PaddlePaddle__PaddleDetection-8421
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 训练出现长警告 ### 问题确认 Search before asking - [X] 我已经查询[历史issue](https://github.com/PaddlePaddle/PaddleDetection/issues),没有发现相似的bug。I have searched the [issues](https://github.com/PaddlePaddle/PaddleDetection/issues) and found no similar bug report. ### Bug组件 Bug Component _No response_ ### Bug描述 Describe the Bug 训练出现长警告 ``` I0706 13:09:13.075042 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6. I0706 13:09:13.382442 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6. ``` ### 复现环境 Environment PaddleDetection2.6 PaddlePaddle2.5.0 经过排查将`ppdet/utils/stats.py`第77行进行如下修改 `v.update(stats[k].numpy())`→`v.update(float(stats[k]))` ### Bug描述确认 Bug description confirmation - [X] 我确认已经提供了Bug复现步骤、代码改动说明、以及环境信息,确认问题是可以复现的。I confirm that the bug replication steps, code change instructions, and environment information have been provided, and the problem can be reproduced. ### 是否愿意提交PR? Are you willing to submit a PR? - [ ] 我愿意提交PR!I'd like to help by submitting a PR! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ppdet/utils/stats.py` Content: ``` 1 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import collections 16 import numpy as np 17 18 __all__ = ['SmoothedValue', 'TrainingStats'] 19 20 21 class SmoothedValue(object): 22 """Track a series of values and provide access to smoothed values over a 23 window or the global series average. 24 """ 25 26 def __init__(self, window_size=20, fmt=None): 27 if fmt is None: 28 fmt = "{median:.4f} ({avg:.4f})" 29 self.deque = collections.deque(maxlen=window_size) 30 self.fmt = fmt 31 self.total = 0. 32 self.count = 0 33 34 def update(self, value, n=1): 35 self.deque.append(value) 36 self.count += n 37 self.total += value * n 38 39 @property 40 def median(self): 41 return np.median(self.deque) 42 43 @property 44 def avg(self): 45 return np.mean(self.deque) 46 47 @property 48 def max(self): 49 return np.max(self.deque) 50 51 @property 52 def value(self): 53 return self.deque[-1] 54 55 @property 56 def global_avg(self): 57 return self.total / self.count 58 59 def __str__(self): 60 return self.fmt.format( 61 median=self.median, avg=self.avg, max=self.max, value=self.value) 62 63 64 class TrainingStats(object): 65 def __init__(self, window_size, delimiter=' '): 66 self.meters = None 67 self.window_size = window_size 68 self.delimiter = delimiter 69 70 def update(self, stats): 71 if self.meters is None: 72 self.meters = { 73 k: SmoothedValue(self.window_size) 74 for k in stats.keys() 75 } 76 for k, v in self.meters.items(): 77 v.update(stats[k].numpy()) 78 79 def get(self, extras=None): 80 stats = collections.OrderedDict() 81 if extras: 82 for k, v in extras.items(): 83 stats[k] = v 84 for k, v in self.meters.items(): 85 stats[k] = format(v.median, '.6f') 86 87 return stats 88 89 def log(self, extras=None): 90 d = self.get(extras) 91 strs = [] 92 for k, v in d.items(): 93 strs.append("{}: {}".format(k, str(v))) 94 return self.delimiter.join(strs) 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ppdet/utils/stats.py b/ppdet/utils/stats.py --- a/ppdet/utils/stats.py +++ b/ppdet/utils/stats.py @@ -74,7 +74,7 @@ for k in stats.keys() } for k, v in self.meters.items(): - v.update(stats[k].numpy()) + v.update(float(stats[k])) def get(self, extras=None): stats = collections.OrderedDict()
{"golden_diff": "diff --git a/ppdet/utils/stats.py b/ppdet/utils/stats.py\n--- a/ppdet/utils/stats.py\n+++ b/ppdet/utils/stats.py\n@@ -74,7 +74,7 @@\n for k in stats.keys()\n }\n for k, v in self.meters.items():\n- v.update(stats[k].numpy())\n+ v.update(float(stats[k]))\n \n def get(self, extras=None):\n stats = collections.OrderedDict()\n", "issue": "\u8bad\u7ec3\u51fa\u73b0\u957f\u8b66\u544a\n### \u95ee\u9898\u786e\u8ba4 Search before asking\n\n- [X] \u6211\u5df2\u7ecf\u67e5\u8be2[\u5386\u53f2issue](https://github.com/PaddlePaddle/PaddleDetection/issues)\uff0c\u6ca1\u6709\u53d1\u73b0\u76f8\u4f3c\u7684bug\u3002I have searched the [issues](https://github.com/PaddlePaddle/PaddleDetection/issues) and found no similar bug report.\n\n\n### Bug\u7ec4\u4ef6 Bug Component\n\n_No response_\n\n### Bug\u63cf\u8ff0 Describe the Bug\n\n\u8bad\u7ec3\u51fa\u73b0\u957f\u8b66\u544a\r\n```\r\nI0706 13:09:13.075042 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6.\r\nI0706 13:09:13.382442 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6.\r\n```\n\n### \u590d\u73b0\u73af\u5883 Environment\n\nPaddleDetection2.6\r\nPaddlePaddle2.5.0\r\n\r\n\u7ecf\u8fc7\u6392\u67e5\u5c06`ppdet/utils/stats.py`\u7b2c77\u884c\u8fdb\u884c\u5982\u4e0b\u4fee\u6539\r\n`v.update(stats[k].numpy())`\u2192`v.update(float(stats[k]))`\n\n### Bug\u63cf\u8ff0\u786e\u8ba4 Bug description confirmation\n\n- [X] \u6211\u786e\u8ba4\u5df2\u7ecf\u63d0\u4f9b\u4e86Bug\u590d\u73b0\u6b65\u9aa4\u3001\u4ee3\u7801\u6539\u52a8\u8bf4\u660e\u3001\u4ee5\u53ca\u73af\u5883\u4fe1\u606f\uff0c\u786e\u8ba4\u95ee\u9898\u662f\u53ef\u4ee5\u590d\u73b0\u7684\u3002I confirm that the bug replication steps, code change instructions, and environment information have been provided, and the problem can be reproduced.\n\n\n### \u662f\u5426\u613f\u610f\u63d0\u4ea4PR\uff1f Are you willing to submit a PR?\n\n- [ ] \u6211\u613f\u610f\u63d0\u4ea4PR\uff01I'd like to help by submitting a PR!\n", "before_files": [{"content": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport numpy as np\n\n__all__ = ['SmoothedValue', 'TrainingStats']\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({avg:.4f})\"\n self.deque = collections.deque(maxlen=window_size)\n self.fmt = fmt\n self.total = 0.\n self.count = 0\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n @property\n def median(self):\n return np.median(self.deque)\n\n @property\n def avg(self):\n return np.mean(self.deque)\n\n @property\n def max(self):\n return np.max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n def __str__(self):\n return self.fmt.format(\n median=self.median, avg=self.avg, max=self.max, value=self.value)\n\n\nclass TrainingStats(object):\n def __init__(self, window_size, delimiter=' '):\n self.meters = None\n self.window_size = window_size\n self.delimiter = delimiter\n\n def update(self, stats):\n if self.meters is None:\n self.meters = {\n k: SmoothedValue(self.window_size)\n for k in stats.keys()\n }\n for k, v in self.meters.items():\n v.update(stats[k].numpy())\n\n def get(self, extras=None):\n stats = collections.OrderedDict()\n if extras:\n for k, v in extras.items():\n stats[k] = v\n for k, v in self.meters.items():\n stats[k] = format(v.median, '.6f')\n\n return stats\n\n def log(self, extras=None):\n d = self.get(extras)\n strs = []\n for k, v in d.items():\n strs.append(\"{}: {}\".format(k, str(v)))\n return self.delimiter.join(strs)\n", "path": "ppdet/utils/stats.py"}], "after_files": [{"content": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport numpy as np\n\n__all__ = ['SmoothedValue', 'TrainingStats']\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({avg:.4f})\"\n self.deque = collections.deque(maxlen=window_size)\n self.fmt = fmt\n self.total = 0.\n self.count = 0\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n @property\n def median(self):\n return np.median(self.deque)\n\n @property\n def avg(self):\n return np.mean(self.deque)\n\n @property\n def max(self):\n return np.max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n def __str__(self):\n return self.fmt.format(\n median=self.median, avg=self.avg, max=self.max, value=self.value)\n\n\nclass TrainingStats(object):\n def __init__(self, window_size, delimiter=' '):\n self.meters = None\n self.window_size = window_size\n self.delimiter = delimiter\n\n def update(self, stats):\n if self.meters is None:\n self.meters = {\n k: SmoothedValue(self.window_size)\n for k in stats.keys()\n }\n for k, v in self.meters.items():\n v.update(float(stats[k]))\n\n def get(self, extras=None):\n stats = collections.OrderedDict()\n if extras:\n for k, v in extras.items():\n stats[k] = v\n for k, v in self.meters.items():\n stats[k] = format(v.median, '.6f')\n\n return stats\n\n def log(self, extras=None):\n d = self.get(extras)\n strs = []\n for k, v in d.items():\n strs.append(\"{}: {}\".format(k, str(v)))\n return self.delimiter.join(strs)\n", "path": "ppdet/utils/stats.py"}]}
1,600
98
gh_patches_debug_11868
rasdani/github-patches
git_diff
keras-team__keras-19643
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Layernorm not supporting axis [-2, 3] Hi, I wanted to normalise my output on the -2 and -3 axis, (image height and width), however, it seems that the with rms_scaling=true, the self.gamma is not broadcasted to same shape as layer input causing this error, ``` inputs shape: (1, 1920, 1200, 3) inv shape: (1, 1, 1, 3) gamma_cast shape: (1920, 1200) inv shape: (1, 1920, 1200, 3) 2024-04-30 13:50:54.238379: W tensorflow/core/framework/local_rendezvous.cc:404] Local rendezvous is aborting with status: INVALID_ARGUMENT: Incompatible shapes: [1,1920,1200,3] vs. [1920,1200] Traceback (most recent call last): File "C:\Users\88bbh\PycharmProjects\AI\tempt.py", line 10, in <module> layer(np.zeros((1, 1920, 1200, 3))) File "C:\Users\88bbh\PycharmProjects\AI\venv\lib\site-packages\keras\src\utils\traceback_utils.py", line 122, in error_handler raise e.with_traceback(filtered_tb) from None File "C:\Users\88bbh\PycharmProjects\AI\venv\lib\site-packages\tensorflow\python\framework\ops.py", line 5983, in raise_from_not_ok_status raise core._status_to_exception(e) from None # pylint: disable=protected-access tensorflow.python.framework.errors_impl.InvalidArgumentError: Exception encountered when calling LayerNormalization.call(). {{function_node __wrapped__Mul_device_/job:localhost/replica:0/task:0/device:CPU:0}} Incompatible shapes: [1,1920,1200,3] vs. [1920,1200] [Op:Mul] name: Arguments received by LayerNormalization.call(): • inputs=tf.Tensor(shape=(1, 1920, 1200, 3), dtype=float32) ``` code to reproduce ``` layer = keras.layers.LayerNormalization(axis=[-3, -2], rms_scaling=True) layer.build([None, 1920, 1200, 3]) layer(np.zeros((1, 1920, 1200, 3))) ``` the error is in layernorm call method ``` if self.rms_scaling: # Calculate outputs with only variance and gamma if rms scaling # is enabled # Calculate the variance along self.axis (layer activations). variance = ops.var(inputs, axis=self.axis, keepdims=True) inv = ops.rsqrt(variance + self.epsilon) print("inputs shape:", inputs.shape) print("inv shape:", inv.shape) print("gamma_cast shape:", self.gamma.shape) print("inv shape:", (inputs * inv).shape) outputs = inputs * inv * ops.cast(self.gamma, inputs.dtype) ``` the error can be fixed by changing ``` outputs = inputs * inv * ops.cast(self.gamma, inputs.dtype) to outputs = inputs * inv * ops.cast(_broadcast(self.gamma), inputs.dtype) ``` please fix it in the next update thank you --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `keras/src/layers/normalization/layer_normalization.py` Content: ``` 1 from keras.src import constraints 2 from keras.src import initializers 3 from keras.src import ops 4 from keras.src import regularizers 5 from keras.src.api_export import keras_export 6 from keras.src.layers.layer import Layer 7 8 9 @keras_export("keras.layers.LayerNormalization") 10 class LayerNormalization(Layer): 11 """Layer normalization layer (Ba et al., 2016). 12 13 Normalize the activations of the previous layer for each given example in a 14 batch independently, rather than across a batch like Batch Normalization. 15 i.e. applies a transformation that maintains the mean activation within each 16 example close to 0 and the activation standard deviation close to 1. 17 18 If `scale` or `center` are enabled, the layer will scale the normalized 19 outputs by broadcasting them with a trainable variable `gamma`, and center 20 the outputs by broadcasting with a trainable variable `beta`. `gamma` will 21 default to a ones tensor and `beta` will default to a zeros tensor, so that 22 centering and scaling are no-ops before training has begun. 23 24 So, with scaling and centering enabled the normalization equations 25 are as follows: 26 27 Let the intermediate activations for a mini-batch to be the `inputs`. 28 29 For each sample `x_i` in `inputs` with `k` features, we compute the mean and 30 variance of the sample: 31 32 ```python 33 mean_i = sum(x_i[j] for j in range(k)) / k 34 var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k 35 ``` 36 37 and then compute a normalized `x_i_normalized`, including a small factor 38 `epsilon` for numerical stability. 39 40 ```python 41 x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon) 42 ``` 43 44 And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`, 45 which are learned parameters: 46 47 ```python 48 output_i = x_i_normalized * gamma + beta 49 ``` 50 51 `gamma` and `beta` will span the axes of `inputs` specified in `axis`, and 52 this part of the inputs' shape must be fully defined. 53 54 For example: 55 56 >>> layer = keras.layers.LayerNormalization(axis=[1, 2, 3]) 57 >>> layer.build([5, 20, 30, 40]) 58 >>> print(layer.beta.shape) 59 (20, 30, 40) 60 >>> print(layer.gamma.shape) 61 (20, 30, 40) 62 63 Note that other implementations of layer normalization may choose to define 64 `gamma` and `beta` over a separate set of axes from the axes being 65 normalized across. For example, Group Normalization 66 ([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1 67 corresponds to a Layer Normalization that normalizes across height, width, 68 and channel and has `gamma` and `beta` span only the channel dimension. 69 So, this Layer Normalization implementation will not match a Group 70 Normalization layer with group size set to 1. 71 72 Args: 73 axis: Integer or List/Tuple. The axis or axes to normalize across. 74 Typically, this is the features axis/axes. The left-out axes are 75 typically the batch axis/axes. `-1` is the last dimension in the 76 input. Defaults to `-1`. 77 epsilon: Small float added to variance to avoid dividing by zero. 78 Defaults to 1e-3. 79 center: If True, add offset of `beta` to normalized tensor. If False, 80 `beta` is ignored. Defaults to `True`. 81 scale: If True, multiply by `gamma`. If False, `gamma` is not used. 82 When the next layer is linear (also e.g. `nn.relu`), this can be 83 disabled since the scaling will be done by the next layer. 84 Defaults to `True`. 85 rms_scaling: If True, `center` and `scale` are ignored, and the 86 inputs are scaled by `gamma` and the inverse square root 87 of the square of all inputs. This is an approximate and faster 88 approach that avoids ever computing the mean of the input. 89 beta_initializer: Initializer for the beta weight. Defaults to zeros. 90 gamma_initializer: Initializer for the gamma weight. Defaults to ones. 91 beta_regularizer: Optional regularizer for the beta weight. 92 None by default. 93 gamma_regularizer: Optional regularizer for the gamma weight. 94 None by default. 95 beta_constraint: Optional constraint for the beta weight. 96 None by default. 97 gamma_constraint: Optional constraint for the gamma weight. 98 None by default. 99 **kwargs: Base layer keyword arguments (e.g. `name` and `dtype`). 100 101 102 Reference: 103 104 - [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450). 105 """ 106 107 def __init__( 108 self, 109 axis=-1, 110 epsilon=1e-3, 111 center=True, 112 scale=True, 113 rms_scaling=False, 114 beta_initializer="zeros", 115 gamma_initializer="ones", 116 beta_regularizer=None, 117 gamma_regularizer=None, 118 beta_constraint=None, 119 gamma_constraint=None, 120 **kwargs 121 ): 122 super().__init__(**kwargs) 123 if isinstance(axis, (list, tuple)): 124 self.axis = list(axis) 125 elif isinstance(axis, int): 126 self.axis = axis 127 else: 128 raise TypeError( 129 "Expected an int or a list/tuple of ints for the " 130 "argument 'axis', but received: %r" % axis 131 ) 132 133 self.epsilon = epsilon 134 self.center = center 135 self.scale = scale 136 self.rms_scaling = rms_scaling 137 self.beta_initializer = initializers.get(beta_initializer) 138 self.gamma_initializer = initializers.get(gamma_initializer) 139 self.beta_regularizer = regularizers.get(beta_regularizer) 140 self.gamma_regularizer = regularizers.get(gamma_regularizer) 141 self.beta_constraint = constraints.get(beta_constraint) 142 self.gamma_constraint = constraints.get(gamma_constraint) 143 144 self.supports_masking = True 145 self.autocast = False 146 147 def build(self, input_shape): 148 if isinstance(self.axis, list): 149 shape = tuple([input_shape[dim] for dim in self.axis]) 150 else: 151 shape = (input_shape[self.axis],) 152 self.axis = [self.axis] 153 if self.scale or self.rms_scaling: 154 self.gamma = self.add_weight( 155 name="gamma", 156 shape=shape, 157 initializer=self.gamma_initializer, 158 regularizer=self.gamma_regularizer, 159 constraint=self.gamma_constraint, 160 trainable=True, 161 autocast=False, 162 ) 163 else: 164 self.gamma = None 165 166 if self.center and not self.rms_scaling: 167 self.beta = self.add_weight( 168 name="beta", 169 shape=shape, 170 initializer=self.beta_initializer, 171 regularizer=self.beta_regularizer, 172 constraint=self.beta_constraint, 173 trainable=True, 174 autocast=False, 175 ) 176 else: 177 self.beta = None 178 179 self.built = True 180 181 def call(self, inputs): 182 inputs = ops.cast(inputs, self.compute_dtype) 183 # Compute the axes along which to reduce the mean / variance 184 input_shape = inputs.shape 185 ndims = len(input_shape) 186 187 # Broadcasting only necessary for norm when the axis is not just 188 # the last dimension 189 broadcast_shape = [1] * ndims 190 for dim in self.axis: 191 broadcast_shape[dim] = input_shape[dim] 192 193 def _broadcast(v): 194 if ( 195 v is not None 196 and len(v.shape) != ndims 197 and self.axis != [ndims - 1] 198 ): 199 return ops.reshape(v, broadcast_shape) 200 return v 201 202 input_dtype = inputs.dtype 203 if input_dtype in ("float16", "bfloat16") and self.dtype == "float32": 204 # If mixed precision is used, cast inputs to float32 so that 205 # this is at least as numerically stable as the fused version. 206 inputs = ops.cast(inputs, "float32") 207 208 if self.rms_scaling: 209 # Calculate outputs with only variance and gamma if rms scaling 210 # is enabled 211 # Calculate the variance along self.axis (layer activations). 212 variance = ops.var(inputs, axis=self.axis, keepdims=True) 213 inv = ops.rsqrt(variance + self.epsilon) 214 215 outputs = inputs * inv * ops.cast(self.gamma, inputs.dtype) 216 else: 217 # Calculate the mean & variance along self.axis (layer activations). 218 mean, variance = ops.moments(inputs, axes=self.axis, keepdims=True) 219 gamma, beta = _broadcast(self.gamma), _broadcast(self.beta) 220 221 inv = ops.rsqrt(variance + self.epsilon) 222 if gamma is not None: 223 gamma = ops.cast(gamma, inputs.dtype) 224 inv = inv * gamma 225 226 res = -mean * inv 227 if beta is not None: 228 beta = ops.cast(beta, inputs.dtype) 229 res = res + beta 230 231 outputs = inputs * inv + res 232 233 return ops.cast(outputs, input_dtype) 234 235 def compute_output_shape(self, input_shape): 236 return input_shape 237 238 def get_config(self): 239 config = { 240 "axis": self.axis, 241 "epsilon": self.epsilon, 242 "center": self.center, 243 "scale": self.scale, 244 "beta_initializer": initializers.serialize(self.beta_initializer), 245 "gamma_initializer": initializers.serialize(self.gamma_initializer), 246 "beta_regularizer": regularizers.serialize(self.beta_regularizer), 247 "gamma_regularizer": regularizers.serialize(self.gamma_regularizer), 248 "beta_constraint": constraints.serialize(self.beta_constraint), 249 "gamma_constraint": constraints.serialize(self.gamma_constraint), 250 } 251 base_config = super().get_config() 252 return {**base_config, **config} 253 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/keras/src/layers/normalization/layer_normalization.py b/keras/src/layers/normalization/layer_normalization.py --- a/keras/src/layers/normalization/layer_normalization.py +++ b/keras/src/layers/normalization/layer_normalization.py @@ -212,7 +212,9 @@ variance = ops.var(inputs, axis=self.axis, keepdims=True) inv = ops.rsqrt(variance + self.epsilon) - outputs = inputs * inv * ops.cast(self.gamma, inputs.dtype) + outputs = ( + inputs * inv * ops.cast(_broadcast(self.gamma), inputs.dtype) + ) else: # Calculate the mean & variance along self.axis (layer activations). mean, variance = ops.moments(inputs, axes=self.axis, keepdims=True)
{"golden_diff": "diff --git a/keras/src/layers/normalization/layer_normalization.py b/keras/src/layers/normalization/layer_normalization.py\n--- a/keras/src/layers/normalization/layer_normalization.py\n+++ b/keras/src/layers/normalization/layer_normalization.py\n@@ -212,7 +212,9 @@\n variance = ops.var(inputs, axis=self.axis, keepdims=True)\n inv = ops.rsqrt(variance + self.epsilon)\n \n- outputs = inputs * inv * ops.cast(self.gamma, inputs.dtype)\n+ outputs = (\n+ inputs * inv * ops.cast(_broadcast(self.gamma), inputs.dtype)\n+ )\n else:\n # Calculate the mean & variance along self.axis (layer activations).\n mean, variance = ops.moments(inputs, axes=self.axis, keepdims=True)\n", "issue": "Layernorm not supporting axis [-2, 3]\nHi, \r\nI wanted to normalise my output on the -2 and -3 axis, (image height and width), however, it seems that the with rms_scaling=true, the self.gamma is not broadcasted to same shape as layer input causing this error, \r\n```\r\ninputs shape: (1, 1920, 1200, 3)\r\ninv shape: (1, 1, 1, 3)\r\ngamma_cast shape: (1920, 1200)\r\ninv shape: (1, 1920, 1200, 3)\r\n2024-04-30 13:50:54.238379: W tensorflow/core/framework/local_rendezvous.cc:404] Local rendezvous is aborting with status: INVALID_ARGUMENT: Incompatible shapes: [1,1920,1200,3] vs. [1920,1200]\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\88bbh\\PycharmProjects\\AI\\tempt.py\", line 10, in <module>\r\n layer(np.zeros((1, 1920, 1200, 3)))\r\n File \"C:\\Users\\88bbh\\PycharmProjects\\AI\\venv\\lib\\site-packages\\keras\\src\\utils\\traceback_utils.py\", line 122, in error_handler\r\n raise e.with_traceback(filtered_tb) from None\r\n File \"C:\\Users\\88bbh\\PycharmProjects\\AI\\venv\\lib\\site-packages\\tensorflow\\python\\framework\\ops.py\", line 5983, in raise_from_not_ok_status\r\n raise core._status_to_exception(e) from None # pylint: disable=protected-access\r\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Exception encountered when calling LayerNormalization.call().\r\n\r\n{{function_node __wrapped__Mul_device_/job:localhost/replica:0/task:0/device:CPU:0}} Incompatible shapes: [1,1920,1200,3] vs. [1920,1200] [Op:Mul] name: \r\n\r\nArguments received by LayerNormalization.call():\r\n \u2022 inputs=tf.Tensor(shape=(1, 1920, 1200, 3), dtype=float32)\r\n```\r\n\r\ncode to reproduce\r\n```\r\nlayer = keras.layers.LayerNormalization(axis=[-3, -2], rms_scaling=True)\r\nlayer.build([None, 1920, 1200, 3])\r\nlayer(np.zeros((1, 1920, 1200, 3)))\r\n```\r\n\r\nthe error is in layernorm call method\r\n```\r\n if self.rms_scaling:\r\n # Calculate outputs with only variance and gamma if rms scaling\r\n # is enabled\r\n # Calculate the variance along self.axis (layer activations).\r\n variance = ops.var(inputs, axis=self.axis, keepdims=True)\r\n inv = ops.rsqrt(variance + self.epsilon)\r\n print(\"inputs shape:\", inputs.shape)\r\n print(\"inv shape:\", inv.shape)\r\n print(\"gamma_cast shape:\", self.gamma.shape)\r\n print(\"inv shape:\", (inputs * inv).shape)\r\n outputs = inputs * inv * ops.cast(self.gamma, inputs.dtype)\r\n```\r\n\r\nthe error can be fixed by changing\r\n```\r\noutputs = inputs * inv * ops.cast(self.gamma, inputs.dtype)\r\nto\r\noutputs = inputs * inv * ops.cast(_broadcast(self.gamma), inputs.dtype)\r\n```\r\nplease fix it in the next update\r\nthank you\n", "before_files": [{"content": "from keras.src import constraints\nfrom keras.src import initializers\nfrom keras.src import ops\nfrom keras.src import regularizers\nfrom keras.src.api_export import keras_export\nfrom keras.src.layers.layer import Layer\n\n\n@keras_export(\"keras.layers.LayerNormalization\")\nclass LayerNormalization(Layer):\n \"\"\"Layer normalization layer (Ba et al., 2016).\n\n Normalize the activations of the previous layer for each given example in a\n batch independently, rather than across a batch like Batch Normalization.\n i.e. applies a transformation that maintains the mean activation within each\n example close to 0 and the activation standard deviation close to 1.\n\n If `scale` or `center` are enabled, the layer will scale the normalized\n outputs by broadcasting them with a trainable variable `gamma`, and center\n the outputs by broadcasting with a trainable variable `beta`. `gamma` will\n default to a ones tensor and `beta` will default to a zeros tensor, so that\n centering and scaling are no-ops before training has begun.\n\n So, with scaling and centering enabled the normalization equations\n are as follows:\n\n Let the intermediate activations for a mini-batch to be the `inputs`.\n\n For each sample `x_i` in `inputs` with `k` features, we compute the mean and\n variance of the sample:\n\n ```python\n mean_i = sum(x_i[j] for j in range(k)) / k\n var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k\n ```\n\n and then compute a normalized `x_i_normalized`, including a small factor\n `epsilon` for numerical stability.\n\n ```python\n x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)\n ```\n\n And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,\n which are learned parameters:\n\n ```python\n output_i = x_i_normalized * gamma + beta\n ```\n\n `gamma` and `beta` will span the axes of `inputs` specified in `axis`, and\n this part of the inputs' shape must be fully defined.\n\n For example:\n\n >>> layer = keras.layers.LayerNormalization(axis=[1, 2, 3])\n >>> layer.build([5, 20, 30, 40])\n >>> print(layer.beta.shape)\n (20, 30, 40)\n >>> print(layer.gamma.shape)\n (20, 30, 40)\n\n Note that other implementations of layer normalization may choose to define\n `gamma` and `beta` over a separate set of axes from the axes being\n normalized across. For example, Group Normalization\n ([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1\n corresponds to a Layer Normalization that normalizes across height, width,\n and channel and has `gamma` and `beta` span only the channel dimension.\n So, this Layer Normalization implementation will not match a Group\n Normalization layer with group size set to 1.\n\n Args:\n axis: Integer or List/Tuple. The axis or axes to normalize across.\n Typically, this is the features axis/axes. The left-out axes are\n typically the batch axis/axes. `-1` is the last dimension in the\n input. Defaults to `-1`.\n epsilon: Small float added to variance to avoid dividing by zero.\n Defaults to 1e-3.\n center: If True, add offset of `beta` to normalized tensor. If False,\n `beta` is ignored. Defaults to `True`.\n scale: If True, multiply by `gamma`. If False, `gamma` is not used.\n When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling will be done by the next layer.\n Defaults to `True`.\n rms_scaling: If True, `center` and `scale` are ignored, and the\n inputs are scaled by `gamma` and the inverse square root\n of the square of all inputs. This is an approximate and faster\n approach that avoids ever computing the mean of the input.\n beta_initializer: Initializer for the beta weight. Defaults to zeros.\n gamma_initializer: Initializer for the gamma weight. Defaults to ones.\n beta_regularizer: Optional regularizer for the beta weight.\n None by default.\n gamma_regularizer: Optional regularizer for the gamma weight.\n None by default.\n beta_constraint: Optional constraint for the beta weight.\n None by default.\n gamma_constraint: Optional constraint for the gamma weight.\n None by default.\n **kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).\n\n\n Reference:\n\n - [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).\n \"\"\"\n\n def __init__(\n self,\n axis=-1,\n epsilon=1e-3,\n center=True,\n scale=True,\n rms_scaling=False,\n beta_initializer=\"zeros\",\n gamma_initializer=\"ones\",\n beta_regularizer=None,\n gamma_regularizer=None,\n beta_constraint=None,\n gamma_constraint=None,\n **kwargs\n ):\n super().__init__(**kwargs)\n if isinstance(axis, (list, tuple)):\n self.axis = list(axis)\n elif isinstance(axis, int):\n self.axis = axis\n else:\n raise TypeError(\n \"Expected an int or a list/tuple of ints for the \"\n \"argument 'axis', but received: %r\" % axis\n )\n\n self.epsilon = epsilon\n self.center = center\n self.scale = scale\n self.rms_scaling = rms_scaling\n self.beta_initializer = initializers.get(beta_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.beta_regularizer = regularizers.get(beta_regularizer)\n self.gamma_regularizer = regularizers.get(gamma_regularizer)\n self.beta_constraint = constraints.get(beta_constraint)\n self.gamma_constraint = constraints.get(gamma_constraint)\n\n self.supports_masking = True\n self.autocast = False\n\n def build(self, input_shape):\n if isinstance(self.axis, list):\n shape = tuple([input_shape[dim] for dim in self.axis])\n else:\n shape = (input_shape[self.axis],)\n self.axis = [self.axis]\n if self.scale or self.rms_scaling:\n self.gamma = self.add_weight(\n name=\"gamma\",\n shape=shape,\n initializer=self.gamma_initializer,\n regularizer=self.gamma_regularizer,\n constraint=self.gamma_constraint,\n trainable=True,\n autocast=False,\n )\n else:\n self.gamma = None\n\n if self.center and not self.rms_scaling:\n self.beta = self.add_weight(\n name=\"beta\",\n shape=shape,\n initializer=self.beta_initializer,\n regularizer=self.beta_regularizer,\n constraint=self.beta_constraint,\n trainable=True,\n autocast=False,\n )\n else:\n self.beta = None\n\n self.built = True\n\n def call(self, inputs):\n inputs = ops.cast(inputs, self.compute_dtype)\n # Compute the axes along which to reduce the mean / variance\n input_shape = inputs.shape\n ndims = len(input_shape)\n\n # Broadcasting only necessary for norm when the axis is not just\n # the last dimension\n broadcast_shape = [1] * ndims\n for dim in self.axis:\n broadcast_shape[dim] = input_shape[dim]\n\n def _broadcast(v):\n if (\n v is not None\n and len(v.shape) != ndims\n and self.axis != [ndims - 1]\n ):\n return ops.reshape(v, broadcast_shape)\n return v\n\n input_dtype = inputs.dtype\n if input_dtype in (\"float16\", \"bfloat16\") and self.dtype == \"float32\":\n # If mixed precision is used, cast inputs to float32 so that\n # this is at least as numerically stable as the fused version.\n inputs = ops.cast(inputs, \"float32\")\n\n if self.rms_scaling:\n # Calculate outputs with only variance and gamma if rms scaling\n # is enabled\n # Calculate the variance along self.axis (layer activations).\n variance = ops.var(inputs, axis=self.axis, keepdims=True)\n inv = ops.rsqrt(variance + self.epsilon)\n\n outputs = inputs * inv * ops.cast(self.gamma, inputs.dtype)\n else:\n # Calculate the mean & variance along self.axis (layer activations).\n mean, variance = ops.moments(inputs, axes=self.axis, keepdims=True)\n gamma, beta = _broadcast(self.gamma), _broadcast(self.beta)\n\n inv = ops.rsqrt(variance + self.epsilon)\n if gamma is not None:\n gamma = ops.cast(gamma, inputs.dtype)\n inv = inv * gamma\n\n res = -mean * inv\n if beta is not None:\n beta = ops.cast(beta, inputs.dtype)\n res = res + beta\n\n outputs = inputs * inv + res\n\n return ops.cast(outputs, input_dtype)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n \"axis\": self.axis,\n \"epsilon\": self.epsilon,\n \"center\": self.center,\n \"scale\": self.scale,\n \"beta_initializer\": initializers.serialize(self.beta_initializer),\n \"gamma_initializer\": initializers.serialize(self.gamma_initializer),\n \"beta_regularizer\": regularizers.serialize(self.beta_regularizer),\n \"gamma_regularizer\": regularizers.serialize(self.gamma_regularizer),\n \"beta_constraint\": constraints.serialize(self.beta_constraint),\n \"gamma_constraint\": constraints.serialize(self.gamma_constraint),\n }\n base_config = super().get_config()\n return {**base_config, **config}\n", "path": "keras/src/layers/normalization/layer_normalization.py"}], "after_files": [{"content": "from keras.src import constraints\nfrom keras.src import initializers\nfrom keras.src import ops\nfrom keras.src import regularizers\nfrom keras.src.api_export import keras_export\nfrom keras.src.layers.layer import Layer\n\n\n@keras_export(\"keras.layers.LayerNormalization\")\nclass LayerNormalization(Layer):\n \"\"\"Layer normalization layer (Ba et al., 2016).\n\n Normalize the activations of the previous layer for each given example in a\n batch independently, rather than across a batch like Batch Normalization.\n i.e. applies a transformation that maintains the mean activation within each\n example close to 0 and the activation standard deviation close to 1.\n\n If `scale` or `center` are enabled, the layer will scale the normalized\n outputs by broadcasting them with a trainable variable `gamma`, and center\n the outputs by broadcasting with a trainable variable `beta`. `gamma` will\n default to a ones tensor and `beta` will default to a zeros tensor, so that\n centering and scaling are no-ops before training has begun.\n\n So, with scaling and centering enabled the normalization equations\n are as follows:\n\n Let the intermediate activations for a mini-batch to be the `inputs`.\n\n For each sample `x_i` in `inputs` with `k` features, we compute the mean and\n variance of the sample:\n\n ```python\n mean_i = sum(x_i[j] for j in range(k)) / k\n var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k\n ```\n\n and then compute a normalized `x_i_normalized`, including a small factor\n `epsilon` for numerical stability.\n\n ```python\n x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)\n ```\n\n And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,\n which are learned parameters:\n\n ```python\n output_i = x_i_normalized * gamma + beta\n ```\n\n `gamma` and `beta` will span the axes of `inputs` specified in `axis`, and\n this part of the inputs' shape must be fully defined.\n\n For example:\n\n >>> layer = keras.layers.LayerNormalization(axis=[1, 2, 3])\n >>> layer.build([5, 20, 30, 40])\n >>> print(layer.beta.shape)\n (20, 30, 40)\n >>> print(layer.gamma.shape)\n (20, 30, 40)\n\n Note that other implementations of layer normalization may choose to define\n `gamma` and `beta` over a separate set of axes from the axes being\n normalized across. For example, Group Normalization\n ([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1\n corresponds to a Layer Normalization that normalizes across height, width,\n and channel and has `gamma` and `beta` span only the channel dimension.\n So, this Layer Normalization implementation will not match a Group\n Normalization layer with group size set to 1.\n\n Args:\n axis: Integer or List/Tuple. The axis or axes to normalize across.\n Typically, this is the features axis/axes. The left-out axes are\n typically the batch axis/axes. `-1` is the last dimension in the\n input. Defaults to `-1`.\n epsilon: Small float added to variance to avoid dividing by zero.\n Defaults to 1e-3.\n center: If True, add offset of `beta` to normalized tensor. If False,\n `beta` is ignored. Defaults to `True`.\n scale: If True, multiply by `gamma`. If False, `gamma` is not used.\n When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling will be done by the next layer.\n Defaults to `True`.\n rms_scaling: If True, `center` and `scale` are ignored, and the\n inputs are scaled by `gamma` and the inverse square root\n of the square of all inputs. This is an approximate and faster\n approach that avoids ever computing the mean of the input.\n beta_initializer: Initializer for the beta weight. Defaults to zeros.\n gamma_initializer: Initializer for the gamma weight. Defaults to ones.\n beta_regularizer: Optional regularizer for the beta weight.\n None by default.\n gamma_regularizer: Optional regularizer for the gamma weight.\n None by default.\n beta_constraint: Optional constraint for the beta weight.\n None by default.\n gamma_constraint: Optional constraint for the gamma weight.\n None by default.\n **kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).\n\n\n Reference:\n\n - [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).\n \"\"\"\n\n def __init__(\n self,\n axis=-1,\n epsilon=1e-3,\n center=True,\n scale=True,\n rms_scaling=False,\n beta_initializer=\"zeros\",\n gamma_initializer=\"ones\",\n beta_regularizer=None,\n gamma_regularizer=None,\n beta_constraint=None,\n gamma_constraint=None,\n **kwargs\n ):\n super().__init__(**kwargs)\n if isinstance(axis, (list, tuple)):\n self.axis = list(axis)\n elif isinstance(axis, int):\n self.axis = axis\n else:\n raise TypeError(\n \"Expected an int or a list/tuple of ints for the \"\n \"argument 'axis', but received: %r\" % axis\n )\n\n self.epsilon = epsilon\n self.center = center\n self.scale = scale\n self.rms_scaling = rms_scaling\n self.beta_initializer = initializers.get(beta_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.beta_regularizer = regularizers.get(beta_regularizer)\n self.gamma_regularizer = regularizers.get(gamma_regularizer)\n self.beta_constraint = constraints.get(beta_constraint)\n self.gamma_constraint = constraints.get(gamma_constraint)\n\n self.supports_masking = True\n self.autocast = False\n\n def build(self, input_shape):\n if isinstance(self.axis, list):\n shape = tuple([input_shape[dim] for dim in self.axis])\n else:\n shape = (input_shape[self.axis],)\n self.axis = [self.axis]\n if self.scale or self.rms_scaling:\n self.gamma = self.add_weight(\n name=\"gamma\",\n shape=shape,\n initializer=self.gamma_initializer,\n regularizer=self.gamma_regularizer,\n constraint=self.gamma_constraint,\n trainable=True,\n autocast=False,\n )\n else:\n self.gamma = None\n\n if self.center and not self.rms_scaling:\n self.beta = self.add_weight(\n name=\"beta\",\n shape=shape,\n initializer=self.beta_initializer,\n regularizer=self.beta_regularizer,\n constraint=self.beta_constraint,\n trainable=True,\n autocast=False,\n )\n else:\n self.beta = None\n\n self.built = True\n\n def call(self, inputs):\n inputs = ops.cast(inputs, self.compute_dtype)\n # Compute the axes along which to reduce the mean / variance\n input_shape = inputs.shape\n ndims = len(input_shape)\n\n # Broadcasting only necessary for norm when the axis is not just\n # the last dimension\n broadcast_shape = [1] * ndims\n for dim in self.axis:\n broadcast_shape[dim] = input_shape[dim]\n\n def _broadcast(v):\n if (\n v is not None\n and len(v.shape) != ndims\n and self.axis != [ndims - 1]\n ):\n return ops.reshape(v, broadcast_shape)\n return v\n\n input_dtype = inputs.dtype\n if input_dtype in (\"float16\", \"bfloat16\") and self.dtype == \"float32\":\n # If mixed precision is used, cast inputs to float32 so that\n # this is at least as numerically stable as the fused version.\n inputs = ops.cast(inputs, \"float32\")\n\n if self.rms_scaling:\n # Calculate outputs with only variance and gamma if rms scaling\n # is enabled\n # Calculate the variance along self.axis (layer activations).\n variance = ops.var(inputs, axis=self.axis, keepdims=True)\n inv = ops.rsqrt(variance + self.epsilon)\n\n outputs = (\n inputs * inv * ops.cast(_broadcast(self.gamma), inputs.dtype)\n )\n else:\n # Calculate the mean & variance along self.axis (layer activations).\n mean, variance = ops.moments(inputs, axes=self.axis, keepdims=True)\n gamma, beta = _broadcast(self.gamma), _broadcast(self.beta)\n\n inv = ops.rsqrt(variance + self.epsilon)\n if gamma is not None:\n gamma = ops.cast(gamma, inputs.dtype)\n inv = inv * gamma\n\n res = -mean * inv\n if beta is not None:\n beta = ops.cast(beta, inputs.dtype)\n res = res + beta\n\n outputs = inputs * inv + res\n\n return ops.cast(outputs, input_dtype)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n \"axis\": self.axis,\n \"epsilon\": self.epsilon,\n \"center\": self.center,\n \"scale\": self.scale,\n \"beta_initializer\": initializers.serialize(self.beta_initializer),\n \"gamma_initializer\": initializers.serialize(self.gamma_initializer),\n \"beta_regularizer\": regularizers.serialize(self.beta_regularizer),\n \"gamma_regularizer\": regularizers.serialize(self.gamma_regularizer),\n \"beta_constraint\": constraints.serialize(self.beta_constraint),\n \"gamma_constraint\": constraints.serialize(self.gamma_constraint),\n }\n base_config = super().get_config()\n return {**base_config, **config}\n", "path": "keras/src/layers/normalization/layer_normalization.py"}]}
3,871
186
gh_patches_debug_28974
rasdani/github-patches
git_diff
prowler-cloud__prowler-2282
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug]: False positives on some checks? ### Steps to Reproduce Hi, it looks like some checks produce false positives (they are tagged as warning because I've allowlisted them): ``` Check ID: ec2_ebs_snapshots_encrypted - ec2 [medium] WARNING eu-central-1: EBS Snapshot snap-112 is unencrypted. WARNING eu-central-1: EBS Snapshot snap-113 is encrypted. <<<< ``` ``` Check ID: iam_policy_allows_privilege_escalation - iam [high] WARNING eu-central-1: Custom Policy arn:aws:iam::112:policy/aws_admin_access does not allow privilege escalation ``` Are you maybe simply overring the status (also "PASS") by WARNING in case of an allowlist match? Another type of issue but more like a question: _sns_topics_not_publicly_accessible_ triggers with ` WARNING eu-central-1: SNS topic cloudwatch-pagerduty-alarms-ec2-state-changes policy with public access but has a Condition` which is (from the User's perspective) a false positive as well because we have a condition, which prowler cannot evaluate? ### Expected behavior none ### Actual Result with Screenshots or Logs none ### How did you install Prowler? Cloning the repository from github.com (git clone) ### Environment Resource locally ### OS used Linux ### Prowler version 3.4.1 ### Pip version none ### Context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py` Content: ``` 1 from prowler.lib.check.models import Check, Check_Report_AWS 2 from prowler.providers.aws.services.sns.sns_client import sns_client 3 4 5 class sns_topics_not_publicly_accessible(Check): 6 def execute(self): 7 findings = [] 8 for topic in sns_client.topics: 9 report = Check_Report_AWS(self.metadata()) 10 report.region = topic.region 11 report.resource_id = topic.name 12 report.resource_arn = topic.arn 13 report.resource_tags = topic.tags 14 report.status = "PASS" 15 report.status_extended = f"SNS topic {topic.name} without public access" 16 if topic.policy: 17 for statement in topic.policy["Statement"]: 18 # Only check allow statements 19 if statement["Effect"] == "Allow": 20 if ( 21 "*" in statement["Principal"] 22 or ( 23 "AWS" in statement["Principal"] 24 and "*" in statement["Principal"]["AWS"] 25 ) 26 or ( 27 "CanonicalUser" in statement["Principal"] 28 and "*" in statement["Principal"]["CanonicalUser"] 29 ) 30 ): 31 if "Condition" not in statement: 32 report.status = "FAIL" 33 report.status_extended = ( 34 f"SNS topic {topic.name} policy with public access" 35 ) 36 else: 37 report.status = "FAIL" 38 report.status_extended = f"SNS topic {topic.name} policy with public access but has a Condition" 39 40 findings.append(report) 41 42 return findings 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py --- a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py +++ b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py @@ -12,7 +12,7 @@ report.resource_arn = topic.arn report.resource_tags = topic.tags report.status = "PASS" - report.status_extended = f"SNS topic {topic.name} without public access" + report.status_extended = f"SNS topic {topic.name} is not publicly accesible" if topic.policy: for statement in topic.policy["Statement"]: # Only check allow statements @@ -31,11 +31,11 @@ if "Condition" not in statement: report.status = "FAIL" report.status_extended = ( - f"SNS topic {topic.name} policy with public access" + f"SNS topic {topic.name} is publicly accesible" ) else: - report.status = "FAIL" - report.status_extended = f"SNS topic {topic.name} policy with public access but has a Condition" + report.status = "PASS" + report.status_extended = f"SNS topic {topic.name} is publicly accesible but has a Condition that could filter it" findings.append(report)
{"golden_diff": "diff --git a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py\n--- a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py\n+++ b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py\n@@ -12,7 +12,7 @@\n report.resource_arn = topic.arn\n report.resource_tags = topic.tags\n report.status = \"PASS\"\n- report.status_extended = f\"SNS topic {topic.name} without public access\"\n+ report.status_extended = f\"SNS topic {topic.name} is not publicly accesible\"\n if topic.policy:\n for statement in topic.policy[\"Statement\"]:\n # Only check allow statements\n@@ -31,11 +31,11 @@\n if \"Condition\" not in statement:\n report.status = \"FAIL\"\n report.status_extended = (\n- f\"SNS topic {topic.name} policy with public access\"\n+ f\"SNS topic {topic.name} is publicly accesible\"\n )\n else:\n- report.status = \"FAIL\"\n- report.status_extended = f\"SNS topic {topic.name} policy with public access but has a Condition\"\n+ report.status = \"PASS\"\n+ report.status_extended = f\"SNS topic {topic.name} is publicly accesible but has a Condition that could filter it\"\n \n findings.append(report)\n", "issue": "[Bug]: False positives on some checks?\n### Steps to Reproduce\n\nHi,\r\n\r\nit looks like some checks produce false positives (they are tagged as warning because I've allowlisted them):\r\n\r\n```\r\nCheck ID: ec2_ebs_snapshots_encrypted - ec2 [medium]\r\n WARNING eu-central-1: EBS Snapshot snap-112 is unencrypted.\r\n WARNING eu-central-1: EBS Snapshot snap-113 is encrypted. <<<<\r\n```\r\n\r\n\r\n```\r\nCheck ID: iam_policy_allows_privilege_escalation - iam [high]\r\n WARNING eu-central-1: Custom Policy arn:aws:iam::112:policy/aws_admin_access does not allow privilege escalation\r\n```\r\n\r\nAre you maybe simply overring the status (also \"PASS\") by WARNING in case of an allowlist match?\r\n\r\n\r\nAnother type of issue but more like a question:\r\n\r\n_sns_topics_not_publicly_accessible_ triggers with \r\n` WARNING eu-central-1: SNS topic cloudwatch-pagerduty-alarms-ec2-state-changes policy with public access but has a Condition`\r\nwhich is (from the User's perspective) a false positive as well because we have a condition, which prowler cannot evaluate?\r\n\r\n\r\n\n\n### Expected behavior\n\nnone\n\n### Actual Result with Screenshots or Logs\n\nnone\n\n### How did you install Prowler?\n\nCloning the repository from github.com (git clone)\n\n### Environment Resource\n\nlocally\n\n### OS used\n\nLinux\n\n### Prowler version\n\n3.4.1\n\n### Pip version\n\nnone\n\n### Context\n\n_No response_\n", "before_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.sns.sns_client import sns_client\n\n\nclass sns_topics_not_publicly_accessible(Check):\n def execute(self):\n findings = []\n for topic in sns_client.topics:\n report = Check_Report_AWS(self.metadata())\n report.region = topic.region\n report.resource_id = topic.name\n report.resource_arn = topic.arn\n report.resource_tags = topic.tags\n report.status = \"PASS\"\n report.status_extended = f\"SNS topic {topic.name} without public access\"\n if topic.policy:\n for statement in topic.policy[\"Statement\"]:\n # Only check allow statements\n if statement[\"Effect\"] == \"Allow\":\n if (\n \"*\" in statement[\"Principal\"]\n or (\n \"AWS\" in statement[\"Principal\"]\n and \"*\" in statement[\"Principal\"][\"AWS\"]\n )\n or (\n \"CanonicalUser\" in statement[\"Principal\"]\n and \"*\" in statement[\"Principal\"][\"CanonicalUser\"]\n )\n ):\n if \"Condition\" not in statement:\n report.status = \"FAIL\"\n report.status_extended = (\n f\"SNS topic {topic.name} policy with public access\"\n )\n else:\n report.status = \"FAIL\"\n report.status_extended = f\"SNS topic {topic.name} policy with public access but has a Condition\"\n\n findings.append(report)\n\n return findings\n", "path": "prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py"}], "after_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.sns.sns_client import sns_client\n\n\nclass sns_topics_not_publicly_accessible(Check):\n def execute(self):\n findings = []\n for topic in sns_client.topics:\n report = Check_Report_AWS(self.metadata())\n report.region = topic.region\n report.resource_id = topic.name\n report.resource_arn = topic.arn\n report.resource_tags = topic.tags\n report.status = \"PASS\"\n report.status_extended = f\"SNS topic {topic.name} is not publicly accesible\"\n if topic.policy:\n for statement in topic.policy[\"Statement\"]:\n # Only check allow statements\n if statement[\"Effect\"] == \"Allow\":\n if (\n \"*\" in statement[\"Principal\"]\n or (\n \"AWS\" in statement[\"Principal\"]\n and \"*\" in statement[\"Principal\"][\"AWS\"]\n )\n or (\n \"CanonicalUser\" in statement[\"Principal\"]\n and \"*\" in statement[\"Principal\"][\"CanonicalUser\"]\n )\n ):\n if \"Condition\" not in statement:\n report.status = \"FAIL\"\n report.status_extended = (\n f\"SNS topic {topic.name} is publicly accesible\"\n )\n else:\n report.status = \"PASS\"\n report.status_extended = f\"SNS topic {topic.name} is publicly accesible but has a Condition that could filter it\"\n\n findings.append(report)\n\n return findings\n", "path": "prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py"}]}
988
343
gh_patches_debug_26160
rasdani/github-patches
git_diff
buildbot__buildbot-1614
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix bytes/unicode issue to fix test on Python 3 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `master/buildbot/db/schedulers.py` Content: ``` 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 16 import sqlalchemy as sa 17 import sqlalchemy.exc 18 19 from buildbot.db import base 20 21 22 class SchedulersConnectorComponent(base.DBConnectorComponent): 23 # Documentation is in developer/database.rst 24 25 def classifyChanges(self, objectid, classifications): 26 def thd(conn): 27 transaction = conn.begin() 28 tbl = self.db.model.scheduler_changes 29 ins_q = tbl.insert() 30 upd_q = tbl.update( 31 ((tbl.c.objectid == objectid) 32 & (tbl.c.changeid == sa.bindparam('wc_changeid')))) 33 for changeid, important in classifications.items(): 34 # convert the 'important' value into an integer, since that 35 # is the column type 36 imp_int = important and 1 or 0 37 try: 38 conn.execute(ins_q, 39 objectid=objectid, 40 changeid=changeid, 41 important=imp_int) 42 except (sqlalchemy.exc.ProgrammingError, 43 sqlalchemy.exc.IntegrityError): 44 transaction.rollback() 45 transaction = conn.begin() 46 # insert failed, so try an update 47 conn.execute(upd_q, 48 wc_changeid=changeid, 49 important=imp_int) 50 51 transaction.commit() 52 return self.db.pool.do(thd) 53 54 def flushChangeClassifications(self, objectid, less_than=None): 55 def thd(conn): 56 sch_ch_tbl = self.db.model.scheduler_changes 57 wc = (sch_ch_tbl.c.objectid == objectid) 58 if less_than is not None: 59 wc = wc & (sch_ch_tbl.c.changeid < less_than) 60 q = sch_ch_tbl.delete(whereclause=wc) 61 conn.execute(q) 62 return self.db.pool.do(thd) 63 64 class Thunk: 65 pass 66 67 def getChangeClassifications(self, objectid, branch=Thunk, 68 repository=Thunk, project=Thunk, 69 codebase=Thunk): 70 def thd(conn): 71 sch_ch_tbl = self.db.model.scheduler_changes 72 ch_tbl = self.db.model.changes 73 74 wc = (sch_ch_tbl.c.objectid == objectid) 75 76 # may need to filter further based on branch, etc 77 extra_wheres = [] 78 if branch is not self.Thunk: 79 extra_wheres.append(ch_tbl.c.branch == branch) 80 if repository is not self.Thunk: 81 extra_wheres.append(ch_tbl.c.repository == repository) 82 if project is not self.Thunk: 83 extra_wheres.append(ch_tbl.c.project == project) 84 if codebase is not self.Thunk: 85 extra_wheres.append(ch_tbl.c.codebase == codebase) 86 87 # if we need to filter further append those, as well as a join 88 # on changeid (but just once for that one) 89 if extra_wheres: 90 wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid) 91 for w in extra_wheres: 92 wc &= w 93 94 q = sa.select( 95 [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important], 96 whereclause=wc) 97 return dict([(r.changeid, [False, True][r.important]) 98 for r in conn.execute(q)]) 99 return self.db.pool.do(thd) 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py --- a/master/buildbot/db/schedulers.py +++ b/master/buildbot/db/schedulers.py @@ -24,13 +24,13 @@ def classifyChanges(self, objectid, classifications): def thd(conn): - transaction = conn.begin() tbl = self.db.model.scheduler_changes ins_q = tbl.insert() upd_q = tbl.update( ((tbl.c.objectid == objectid) & (tbl.c.changeid == sa.bindparam('wc_changeid')))) for changeid, important in classifications.items(): + transaction = conn.begin() # convert the 'important' value into an integer, since that # is the column type imp_int = important and 1 or 0 @@ -48,7 +48,7 @@ wc_changeid=changeid, important=imp_int) - transaction.commit() + transaction.commit() return self.db.pool.do(thd) def flushChangeClassifications(self, objectid, less_than=None):
{"golden_diff": "diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py\n--- a/master/buildbot/db/schedulers.py\n+++ b/master/buildbot/db/schedulers.py\n@@ -24,13 +24,13 @@\n \n def classifyChanges(self, objectid, classifications):\n def thd(conn):\n- transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.objectid == objectid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n+ transaction = conn.begin()\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n@@ -48,7 +48,7 @@\n wc_changeid=changeid,\n important=imp_int)\n \n- transaction.commit()\n+ transaction.commit()\n return self.db.pool.do(thd)\n \n def flushChangeClassifications(self, objectid, less_than=None):\n", "issue": "Fix bytes/unicode issue to fix test on Python 3\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\nimport sqlalchemy.exc\n\nfrom buildbot.db import base\n\n\nclass SchedulersConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/database.rst\n\n def classifyChanges(self, objectid, classifications):\n def thd(conn):\n transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.objectid == objectid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n try:\n conn.execute(ins_q,\n objectid=objectid,\n changeid=changeid,\n important=imp_int)\n except (sqlalchemy.exc.ProgrammingError,\n sqlalchemy.exc.IntegrityError):\n transaction.rollback()\n transaction = conn.begin()\n # insert failed, so try an update\n conn.execute(upd_q,\n wc_changeid=changeid,\n important=imp_int)\n\n transaction.commit()\n return self.db.pool.do(thd)\n\n def flushChangeClassifications(self, objectid, less_than=None):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n wc = (sch_ch_tbl.c.objectid == objectid)\n if less_than is not None:\n wc = wc & (sch_ch_tbl.c.changeid < less_than)\n q = sch_ch_tbl.delete(whereclause=wc)\n conn.execute(q)\n return self.db.pool.do(thd)\n\n class Thunk:\n pass\n\n def getChangeClassifications(self, objectid, branch=Thunk,\n repository=Thunk, project=Thunk,\n codebase=Thunk):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n ch_tbl = self.db.model.changes\n\n wc = (sch_ch_tbl.c.objectid == objectid)\n\n # may need to filter further based on branch, etc\n extra_wheres = []\n if branch is not self.Thunk:\n extra_wheres.append(ch_tbl.c.branch == branch)\n if repository is not self.Thunk:\n extra_wheres.append(ch_tbl.c.repository == repository)\n if project is not self.Thunk:\n extra_wheres.append(ch_tbl.c.project == project)\n if codebase is not self.Thunk:\n extra_wheres.append(ch_tbl.c.codebase == codebase)\n\n # if we need to filter further append those, as well as a join\n # on changeid (but just once for that one)\n if extra_wheres:\n wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)\n for w in extra_wheres:\n wc &= w\n\n q = sa.select(\n [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important],\n whereclause=wc)\n return dict([(r.changeid, [False, True][r.important])\n for r in conn.execute(q)])\n return self.db.pool.do(thd)\n", "path": "master/buildbot/db/schedulers.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\nimport sqlalchemy.exc\n\nfrom buildbot.db import base\n\n\nclass SchedulersConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/database.rst\n\n def classifyChanges(self, objectid, classifications):\n def thd(conn):\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.objectid == objectid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n transaction = conn.begin()\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n try:\n conn.execute(ins_q,\n objectid=objectid,\n changeid=changeid,\n important=imp_int)\n except (sqlalchemy.exc.ProgrammingError,\n sqlalchemy.exc.IntegrityError):\n transaction.rollback()\n transaction = conn.begin()\n # insert failed, so try an update\n conn.execute(upd_q,\n wc_changeid=changeid,\n important=imp_int)\n\n transaction.commit()\n return self.db.pool.do(thd)\n\n def flushChangeClassifications(self, objectid, less_than=None):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n wc = (sch_ch_tbl.c.objectid == objectid)\n if less_than is not None:\n wc = wc & (sch_ch_tbl.c.changeid < less_than)\n q = sch_ch_tbl.delete(whereclause=wc)\n conn.execute(q)\n return self.db.pool.do(thd)\n\n class Thunk:\n pass\n\n def getChangeClassifications(self, objectid, branch=Thunk,\n repository=Thunk, project=Thunk,\n codebase=Thunk):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n ch_tbl = self.db.model.changes\n\n wc = (sch_ch_tbl.c.objectid == objectid)\n\n # may need to filter further based on branch, etc\n extra_wheres = []\n if branch is not self.Thunk:\n extra_wheres.append(ch_tbl.c.branch == branch)\n if repository is not self.Thunk:\n extra_wheres.append(ch_tbl.c.repository == repository)\n if project is not self.Thunk:\n extra_wheres.append(ch_tbl.c.project == project)\n if codebase is not self.Thunk:\n extra_wheres.append(ch_tbl.c.codebase == codebase)\n\n # if we need to filter further append those, as well as a join\n # on changeid (but just once for that one)\n if extra_wheres:\n wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)\n for w in extra_wheres:\n wc &= w\n\n q = sa.select(\n [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important],\n whereclause=wc)\n return dict([(r.changeid, [False, True][r.important])\n for r in conn.execute(q)])\n return self.db.pool.do(thd)\n", "path": "master/buildbot/db/schedulers.py"}]}
1,309
242
gh_patches_debug_36820
rasdani/github-patches
git_diff
open-mmlab__mmdetection-7407
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ./tools/analysis_tools/analyze_logs.py plot_curve IndexError: list index out of range `(openmmlab) lbc@prust-System-3:~/mmdetection-master$ python3.8 ./tools/analysis_tools/analyze_logs.py plot_curve ./work_dirs/deformable_detr_twostage_refine_r50_16x2_50e_coco/20211119_170702.log.json --keys bbox_mAP plot curve of ./work_dirs/deformable_detr_twostage_refine_r50_16x2_50e_coco/20211119_170702.log.json, metric is bbox_mAP Traceback (most recent call last): File "./tools/analysis_tools/analyze_logs.py", line 180, in <module> main() File "./tools/analysis_tools/analyze_logs.py", line 176, in main eval(args.task)(log_dicts, args) File "./tools/analysis_tools/analyze_logs.py", line 53, in plot_curve if metric not in log_dict[epochs[0]]: IndexError: list index out of range ` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/analysis_tools/analyze_logs.py` Content: ``` 1 # Copyright (c) OpenMMLab. All rights reserved. 2 import argparse 3 import json 4 from collections import defaultdict 5 6 import matplotlib.pyplot as plt 7 import numpy as np 8 import seaborn as sns 9 10 11 def cal_train_time(log_dicts, args): 12 for i, log_dict in enumerate(log_dicts): 13 print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') 14 all_times = [] 15 for epoch in log_dict.keys(): 16 if args.include_outliers: 17 all_times.append(log_dict[epoch]['time']) 18 else: 19 all_times.append(log_dict[epoch]['time'][1:]) 20 all_times = np.array(all_times) 21 epoch_ave_time = all_times.mean(-1) 22 slowest_epoch = epoch_ave_time.argmax() 23 fastest_epoch = epoch_ave_time.argmin() 24 std_over_epoch = epoch_ave_time.std() 25 print(f'slowest epoch {slowest_epoch + 1}, ' 26 f'average time is {epoch_ave_time[slowest_epoch]:.4f}') 27 print(f'fastest epoch {fastest_epoch + 1}, ' 28 f'average time is {epoch_ave_time[fastest_epoch]:.4f}') 29 print(f'time std over epochs is {std_over_epoch:.4f}') 30 print(f'average iter time: {np.mean(all_times):.4f} s/iter') 31 print() 32 33 34 def plot_curve(log_dicts, args): 35 if args.backend is not None: 36 plt.switch_backend(args.backend) 37 sns.set_style(args.style) 38 # if legend is None, use {filename}_{key} as legend 39 legend = args.legend 40 if legend is None: 41 legend = [] 42 for json_log in args.json_logs: 43 for metric in args.keys: 44 legend.append(f'{json_log}_{metric}') 45 assert len(legend) == (len(args.json_logs) * len(args.keys)) 46 metrics = args.keys 47 48 num_metrics = len(metrics) 49 for i, log_dict in enumerate(log_dicts): 50 epochs = list(log_dict.keys()) 51 for j, metric in enumerate(metrics): 52 print(f'plot curve of {args.json_logs[i]}, metric is {metric}') 53 if metric not in log_dict[epochs[0]]: 54 raise KeyError( 55 f'{args.json_logs[i]} does not contain metric {metric}') 56 57 if 'mAP' in metric: 58 xs = np.arange(1, max(epochs) + 1) 59 ys = [] 60 for epoch in epochs: 61 ys += log_dict[epoch][metric] 62 ax = plt.gca() 63 ax.set_xticks(xs) 64 plt.xlabel('epoch') 65 plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') 66 else: 67 xs = [] 68 ys = [] 69 num_iters_per_epoch = log_dict[epochs[0]]['iter'][-2] 70 for epoch in epochs: 71 iters = log_dict[epoch]['iter'] 72 if log_dict[epoch]['mode'][-1] == 'val': 73 iters = iters[:-1] 74 xs.append( 75 np.array(iters) + (epoch - 1) * num_iters_per_epoch) 76 ys.append(np.array(log_dict[epoch][metric][:len(iters)])) 77 xs = np.concatenate(xs) 78 ys = np.concatenate(ys) 79 plt.xlabel('iter') 80 plt.plot( 81 xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) 82 plt.legend() 83 if args.title is not None: 84 plt.title(args.title) 85 if args.out is None: 86 plt.show() 87 else: 88 print(f'save curve to: {args.out}') 89 plt.savefig(args.out) 90 plt.cla() 91 92 93 def add_plot_parser(subparsers): 94 parser_plt = subparsers.add_parser( 95 'plot_curve', help='parser for plotting curves') 96 parser_plt.add_argument( 97 'json_logs', 98 type=str, 99 nargs='+', 100 help='path of train log in json format') 101 parser_plt.add_argument( 102 '--keys', 103 type=str, 104 nargs='+', 105 default=['bbox_mAP'], 106 help='the metric that you want to plot') 107 parser_plt.add_argument('--title', type=str, help='title of figure') 108 parser_plt.add_argument( 109 '--legend', 110 type=str, 111 nargs='+', 112 default=None, 113 help='legend of each plot') 114 parser_plt.add_argument( 115 '--backend', type=str, default=None, help='backend of plt') 116 parser_plt.add_argument( 117 '--style', type=str, default='dark', help='style of plt') 118 parser_plt.add_argument('--out', type=str, default=None) 119 120 121 def add_time_parser(subparsers): 122 parser_time = subparsers.add_parser( 123 'cal_train_time', 124 help='parser for computing the average time per training iteration') 125 parser_time.add_argument( 126 'json_logs', 127 type=str, 128 nargs='+', 129 help='path of train log in json format') 130 parser_time.add_argument( 131 '--include-outliers', 132 action='store_true', 133 help='include the first value of every epoch when computing ' 134 'the average time') 135 136 137 def parse_args(): 138 parser = argparse.ArgumentParser(description='Analyze Json Log') 139 # currently only support plot curve and calculate average train time 140 subparsers = parser.add_subparsers(dest='task', help='task parser') 141 add_plot_parser(subparsers) 142 add_time_parser(subparsers) 143 args = parser.parse_args() 144 return args 145 146 147 def load_json_logs(json_logs): 148 # load and convert json_logs to log_dict, key is epoch, value is a sub dict 149 # keys of sub dict is different metrics, e.g. memory, bbox_mAP 150 # value of sub dict is a list of corresponding values of all iterations 151 log_dicts = [dict() for _ in json_logs] 152 for json_log, log_dict in zip(json_logs, log_dicts): 153 with open(json_log, 'r') as log_file: 154 for line in log_file: 155 log = json.loads(line.strip()) 156 # skip lines without `epoch` field 157 if 'epoch' not in log: 158 continue 159 epoch = log.pop('epoch') 160 if epoch not in log_dict: 161 log_dict[epoch] = defaultdict(list) 162 for k, v in log.items(): 163 log_dict[epoch][k].append(v) 164 return log_dicts 165 166 167 def main(): 168 args = parse_args() 169 170 json_logs = args.json_logs 171 for json_log in json_logs: 172 assert json_log.endswith('.json') 173 174 log_dicts = load_json_logs(json_logs) 175 176 eval(args.task)(log_dicts, args) 177 178 179 if __name__ == '__main__': 180 main() 181 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py old mode 100644 new mode 100755 --- a/tools/analysis_tools/analyze_logs.py +++ b/tools/analysis_tools/analyze_logs.py @@ -17,6 +17,10 @@ all_times.append(log_dict[epoch]['time']) else: all_times.append(log_dict[epoch]['time'][1:]) + if not all_times: + raise KeyError( + 'Please reduce the log interval in the config so that' + 'interval is less than iterations of one epoch.') all_times = np.array(all_times) epoch_ave_time = all_times.mean(-1) slowest_epoch = epoch_ave_time.argmax() @@ -50,12 +54,21 @@ epochs = list(log_dict.keys()) for j, metric in enumerate(metrics): print(f'plot curve of {args.json_logs[i]}, metric is {metric}') - if metric not in log_dict[epochs[0]]: + if metric not in log_dict[epochs[int(args.start_epoch) - 1]]: + if 'mAP' in metric: + raise KeyError( + f'{args.json_logs[i]} does not contain metric ' + f'{metric}. Please check if "--no-validate" is ' + 'specified when you trained the model.') raise KeyError( - f'{args.json_logs[i]} does not contain metric {metric}') + f'{args.json_logs[i]} does not contain metric {metric}. ' + 'Please reduce the log interval in the config so that ' + 'interval is less than iterations of one epoch.') if 'mAP' in metric: - xs = np.arange(1, max(epochs) + 1) + xs = np.arange( + int(args.start_epoch), + max(epochs) + 1, int(args.eval_interval)) ys = [] for epoch in epochs: ys += log_dict[epoch][metric] @@ -104,6 +117,16 @@ nargs='+', default=['bbox_mAP'], help='the metric that you want to plot') + parser_plt.add_argument( + '--start-epoch', + type=str, + default='1', + help='the epoch that you want to start') + parser_plt.add_argument( + '--eval-interval', + type=str, + default='1', + help='the eval interval when training') parser_plt.add_argument('--title', type=str, help='title of figure') parser_plt.add_argument( '--legend',
{"golden_diff": "diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py\nold mode 100644\nnew mode 100755\n--- a/tools/analysis_tools/analyze_logs.py\n+++ b/tools/analysis_tools/analyze_logs.py\n@@ -17,6 +17,10 @@\n all_times.append(log_dict[epoch]['time'])\n else:\n all_times.append(log_dict[epoch]['time'][1:])\n+ if not all_times:\n+ raise KeyError(\n+ 'Please reduce the log interval in the config so that'\n+ 'interval is less than iterations of one epoch.')\n all_times = np.array(all_times)\n epoch_ave_time = all_times.mean(-1)\n slowest_epoch = epoch_ave_time.argmax()\n@@ -50,12 +54,21 @@\n epochs = list(log_dict.keys())\n for j, metric in enumerate(metrics):\n print(f'plot curve of {args.json_logs[i]}, metric is {metric}')\n- if metric not in log_dict[epochs[0]]:\n+ if metric not in log_dict[epochs[int(args.start_epoch) - 1]]:\n+ if 'mAP' in metric:\n+ raise KeyError(\n+ f'{args.json_logs[i]} does not contain metric '\n+ f'{metric}. Please check if \"--no-validate\" is '\n+ 'specified when you trained the model.')\n raise KeyError(\n- f'{args.json_logs[i]} does not contain metric {metric}')\n+ f'{args.json_logs[i]} does not contain metric {metric}. '\n+ 'Please reduce the log interval in the config so that '\n+ 'interval is less than iterations of one epoch.')\n \n if 'mAP' in metric:\n- xs = np.arange(1, max(epochs) + 1)\n+ xs = np.arange(\n+ int(args.start_epoch),\n+ max(epochs) + 1, int(args.eval_interval))\n ys = []\n for epoch in epochs:\n ys += log_dict[epoch][metric]\n@@ -104,6 +117,16 @@\n nargs='+',\n default=['bbox_mAP'],\n help='the metric that you want to plot')\n+ parser_plt.add_argument(\n+ '--start-epoch',\n+ type=str,\n+ default='1',\n+ help='the epoch that you want to start')\n+ parser_plt.add_argument(\n+ '--eval-interval',\n+ type=str,\n+ default='1',\n+ help='the eval interval when training')\n parser_plt.add_argument('--title', type=str, help='title of figure')\n parser_plt.add_argument(\n '--legend',\n", "issue": "./tools/analysis_tools/analyze_logs.py plot_curve IndexError: list index out of range\n`(openmmlab) lbc@prust-System-3:~/mmdetection-master$ python3.8 ./tools/analysis_tools/analyze_logs.py plot_curve ./work_dirs/deformable_detr_twostage_refine_r50_16x2_50e_coco/20211119_170702.log.json --keys bbox_mAP \r\nplot curve of ./work_dirs/deformable_detr_twostage_refine_r50_16x2_50e_coco/20211119_170702.log.json, metric is bbox_mAP\r\nTraceback (most recent call last):\r\n File \"./tools/analysis_tools/analyze_logs.py\", line 180, in <module>\r\n main()\r\n File \"./tools/analysis_tools/analyze_logs.py\", line 176, in main\r\n eval(args.task)(log_dicts, args)\r\n File \"./tools/analysis_tools/analyze_logs.py\", line 53, in plot_curve\r\n if metric not in log_dict[epochs[0]]:\r\nIndexError: list index out of range\r\n`\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport json\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\n\ndef cal_train_time(log_dicts, args):\n for i, log_dict in enumerate(log_dicts):\n print(f'{\"-\" * 5}Analyze train time of {args.json_logs[i]}{\"-\" * 5}')\n all_times = []\n for epoch in log_dict.keys():\n if args.include_outliers:\n all_times.append(log_dict[epoch]['time'])\n else:\n all_times.append(log_dict[epoch]['time'][1:])\n all_times = np.array(all_times)\n epoch_ave_time = all_times.mean(-1)\n slowest_epoch = epoch_ave_time.argmax()\n fastest_epoch = epoch_ave_time.argmin()\n std_over_epoch = epoch_ave_time.std()\n print(f'slowest epoch {slowest_epoch + 1}, '\n f'average time is {epoch_ave_time[slowest_epoch]:.4f}')\n print(f'fastest epoch {fastest_epoch + 1}, '\n f'average time is {epoch_ave_time[fastest_epoch]:.4f}')\n print(f'time std over epochs is {std_over_epoch:.4f}')\n print(f'average iter time: {np.mean(all_times):.4f} s/iter')\n print()\n\n\ndef plot_curve(log_dicts, args):\n if args.backend is not None:\n plt.switch_backend(args.backend)\n sns.set_style(args.style)\n # if legend is None, use {filename}_{key} as legend\n legend = args.legend\n if legend is None:\n legend = []\n for json_log in args.json_logs:\n for metric in args.keys:\n legend.append(f'{json_log}_{metric}')\n assert len(legend) == (len(args.json_logs) * len(args.keys))\n metrics = args.keys\n\n num_metrics = len(metrics)\n for i, log_dict in enumerate(log_dicts):\n epochs = list(log_dict.keys())\n for j, metric in enumerate(metrics):\n print(f'plot curve of {args.json_logs[i]}, metric is {metric}')\n if metric not in log_dict[epochs[0]]:\n raise KeyError(\n f'{args.json_logs[i]} does not contain metric {metric}')\n\n if 'mAP' in metric:\n xs = np.arange(1, max(epochs) + 1)\n ys = []\n for epoch in epochs:\n ys += log_dict[epoch][metric]\n ax = plt.gca()\n ax.set_xticks(xs)\n plt.xlabel('epoch')\n plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')\n else:\n xs = []\n ys = []\n num_iters_per_epoch = log_dict[epochs[0]]['iter'][-2]\n for epoch in epochs:\n iters = log_dict[epoch]['iter']\n if log_dict[epoch]['mode'][-1] == 'val':\n iters = iters[:-1]\n xs.append(\n np.array(iters) + (epoch - 1) * num_iters_per_epoch)\n ys.append(np.array(log_dict[epoch][metric][:len(iters)]))\n xs = np.concatenate(xs)\n ys = np.concatenate(ys)\n plt.xlabel('iter')\n plt.plot(\n xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)\n plt.legend()\n if args.title is not None:\n plt.title(args.title)\n if args.out is None:\n plt.show()\n else:\n print(f'save curve to: {args.out}')\n plt.savefig(args.out)\n plt.cla()\n\n\ndef add_plot_parser(subparsers):\n parser_plt = subparsers.add_parser(\n 'plot_curve', help='parser for plotting curves')\n parser_plt.add_argument(\n 'json_logs',\n type=str,\n nargs='+',\n help='path of train log in json format')\n parser_plt.add_argument(\n '--keys',\n type=str,\n nargs='+',\n default=['bbox_mAP'],\n help='the metric that you want to plot')\n parser_plt.add_argument('--title', type=str, help='title of figure')\n parser_plt.add_argument(\n '--legend',\n type=str,\n nargs='+',\n default=None,\n help='legend of each plot')\n parser_plt.add_argument(\n '--backend', type=str, default=None, help='backend of plt')\n parser_plt.add_argument(\n '--style', type=str, default='dark', help='style of plt')\n parser_plt.add_argument('--out', type=str, default=None)\n\n\ndef add_time_parser(subparsers):\n parser_time = subparsers.add_parser(\n 'cal_train_time',\n help='parser for computing the average time per training iteration')\n parser_time.add_argument(\n 'json_logs',\n type=str,\n nargs='+',\n help='path of train log in json format')\n parser_time.add_argument(\n '--include-outliers',\n action='store_true',\n help='include the first value of every epoch when computing '\n 'the average time')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Analyze Json Log')\n # currently only support plot curve and calculate average train time\n subparsers = parser.add_subparsers(dest='task', help='task parser')\n add_plot_parser(subparsers)\n add_time_parser(subparsers)\n args = parser.parse_args()\n return args\n\n\ndef load_json_logs(json_logs):\n # load and convert json_logs to log_dict, key is epoch, value is a sub dict\n # keys of sub dict is different metrics, e.g. memory, bbox_mAP\n # value of sub dict is a list of corresponding values of all iterations\n log_dicts = [dict() for _ in json_logs]\n for json_log, log_dict in zip(json_logs, log_dicts):\n with open(json_log, 'r') as log_file:\n for line in log_file:\n log = json.loads(line.strip())\n # skip lines without `epoch` field\n if 'epoch' not in log:\n continue\n epoch = log.pop('epoch')\n if epoch not in log_dict:\n log_dict[epoch] = defaultdict(list)\n for k, v in log.items():\n log_dict[epoch][k].append(v)\n return log_dicts\n\n\ndef main():\n args = parse_args()\n\n json_logs = args.json_logs\n for json_log in json_logs:\n assert json_log.endswith('.json')\n\n log_dicts = load_json_logs(json_logs)\n\n eval(args.task)(log_dicts, args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/analysis_tools/analyze_logs.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport json\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\n\ndef cal_train_time(log_dicts, args):\n for i, log_dict in enumerate(log_dicts):\n print(f'{\"-\" * 5}Analyze train time of {args.json_logs[i]}{\"-\" * 5}')\n all_times = []\n for epoch in log_dict.keys():\n if args.include_outliers:\n all_times.append(log_dict[epoch]['time'])\n else:\n all_times.append(log_dict[epoch]['time'][1:])\n if not all_times:\n raise KeyError(\n 'Please reduce the log interval in the config so that'\n 'interval is less than iterations of one epoch.')\n all_times = np.array(all_times)\n epoch_ave_time = all_times.mean(-1)\n slowest_epoch = epoch_ave_time.argmax()\n fastest_epoch = epoch_ave_time.argmin()\n std_over_epoch = epoch_ave_time.std()\n print(f'slowest epoch {slowest_epoch + 1}, '\n f'average time is {epoch_ave_time[slowest_epoch]:.4f}')\n print(f'fastest epoch {fastest_epoch + 1}, '\n f'average time is {epoch_ave_time[fastest_epoch]:.4f}')\n print(f'time std over epochs is {std_over_epoch:.4f}')\n print(f'average iter time: {np.mean(all_times):.4f} s/iter')\n print()\n\n\ndef plot_curve(log_dicts, args):\n if args.backend is not None:\n plt.switch_backend(args.backend)\n sns.set_style(args.style)\n # if legend is None, use {filename}_{key} as legend\n legend = args.legend\n if legend is None:\n legend = []\n for json_log in args.json_logs:\n for metric in args.keys:\n legend.append(f'{json_log}_{metric}')\n assert len(legend) == (len(args.json_logs) * len(args.keys))\n metrics = args.keys\n\n num_metrics = len(metrics)\n for i, log_dict in enumerate(log_dicts):\n epochs = list(log_dict.keys())\n for j, metric in enumerate(metrics):\n print(f'plot curve of {args.json_logs[i]}, metric is {metric}')\n if metric not in log_dict[epochs[int(args.start_epoch) - 1]]:\n if 'mAP' in metric:\n raise KeyError(\n f'{args.json_logs[i]} does not contain metric '\n f'{metric}. Please check if \"--no-validate\" is '\n 'specified when you trained the model.')\n raise KeyError(\n f'{args.json_logs[i]} does not contain metric {metric}. '\n 'Please reduce the log interval in the config so that '\n 'interval is less than iterations of one epoch.')\n\n if 'mAP' in metric:\n xs = np.arange(\n int(args.start_epoch),\n max(epochs) + 1, int(args.eval_interval))\n ys = []\n for epoch in epochs:\n ys += log_dict[epoch][metric]\n ax = plt.gca()\n ax.set_xticks(xs)\n plt.xlabel('epoch')\n plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')\n else:\n xs = []\n ys = []\n num_iters_per_epoch = log_dict[epochs[0]]['iter'][-2]\n for epoch in epochs:\n iters = log_dict[epoch]['iter']\n if log_dict[epoch]['mode'][-1] == 'val':\n iters = iters[:-1]\n xs.append(\n np.array(iters) + (epoch - 1) * num_iters_per_epoch)\n ys.append(np.array(log_dict[epoch][metric][:len(iters)]))\n xs = np.concatenate(xs)\n ys = np.concatenate(ys)\n plt.xlabel('iter')\n plt.plot(\n xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)\n plt.legend()\n if args.title is not None:\n plt.title(args.title)\n if args.out is None:\n plt.show()\n else:\n print(f'save curve to: {args.out}')\n plt.savefig(args.out)\n plt.cla()\n\n\ndef add_plot_parser(subparsers):\n parser_plt = subparsers.add_parser(\n 'plot_curve', help='parser for plotting curves')\n parser_plt.add_argument(\n 'json_logs',\n type=str,\n nargs='+',\n help='path of train log in json format')\n parser_plt.add_argument(\n '--keys',\n type=str,\n nargs='+',\n default=['bbox_mAP'],\n help='the metric that you want to plot')\n parser_plt.add_argument(\n '--start-epoch',\n type=str,\n default='1',\n help='the epoch that you want to start')\n parser_plt.add_argument(\n '--eval-interval',\n type=str,\n default='1',\n help='the eval interval when training')\n parser_plt.add_argument('--title', type=str, help='title of figure')\n parser_plt.add_argument(\n '--legend',\n type=str,\n nargs='+',\n default=None,\n help='legend of each plot')\n parser_plt.add_argument(\n '--backend', type=str, default=None, help='backend of plt')\n parser_plt.add_argument(\n '--style', type=str, default='dark', help='style of plt')\n parser_plt.add_argument('--out', type=str, default=None)\n\n\ndef add_time_parser(subparsers):\n parser_time = subparsers.add_parser(\n 'cal_train_time',\n help='parser for computing the average time per training iteration')\n parser_time.add_argument(\n 'json_logs',\n type=str,\n nargs='+',\n help='path of train log in json format')\n parser_time.add_argument(\n '--include-outliers',\n action='store_true',\n help='include the first value of every epoch when computing '\n 'the average time')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Analyze Json Log')\n # currently only support plot curve and calculate average train time\n subparsers = parser.add_subparsers(dest='task', help='task parser')\n add_plot_parser(subparsers)\n add_time_parser(subparsers)\n args = parser.parse_args()\n return args\n\n\ndef load_json_logs(json_logs):\n # load and convert json_logs to log_dict, key is epoch, value is a sub dict\n # keys of sub dict is different metrics, e.g. memory, bbox_mAP\n # value of sub dict is a list of corresponding values of all iterations\n log_dicts = [dict() for _ in json_logs]\n for json_log, log_dict in zip(json_logs, log_dicts):\n with open(json_log, 'r') as log_file:\n for line in log_file:\n log = json.loads(line.strip())\n # skip lines without `epoch` field\n if 'epoch' not in log:\n continue\n epoch = log.pop('epoch')\n if epoch not in log_dict:\n log_dict[epoch] = defaultdict(list)\n for k, v in log.items():\n log_dict[epoch][k].append(v)\n return log_dicts\n\n\ndef main():\n args = parse_args()\n\n json_logs = args.json_logs\n for json_log in json_logs:\n assert json_log.endswith('.json')\n\n log_dicts = load_json_logs(json_logs)\n\n eval(args.task)(log_dicts, args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/analysis_tools/analyze_logs.py"}]}
2,423
588
gh_patches_debug_20333
rasdani/github-patches
git_diff
scipy__scipy-10309
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- lsoda fails to detect stiff problem when called from solve_ivp <!-- Thank you for taking the time to report a SciPy issue. Please describe the issue in detail, and for bug reports fill in the fields below. You can delete the sections that don't apply to your issue. You can view the final output by clicking the preview button above. --> Both the `ode` and the `solve_ivp` solver interfaces wrap the venerable lsoda solver. As far as I can see, `solve_ivp` even internally calls the `ode` wrapper. Strangely enough, when tested on a stiff problem, lsoda when called through `ode` handles stiffness just fine. However, when called through `solve_ivp`, it behaves as if using the non-stiff solver. The only parameter difference I can spot is the reduced default `atol` and `rtol` parameter, however, even setting them to the `ode` defaults will not change the behavior of `solve_ivp`. (Off-topic comment: I find the default tolerances for `solve_ivp` unnecessarily poor, especially given that any other blackbox solver I know, including the two older Scipy APIs, have stricter defaults. I would strongly recommend using common defaults across Scipy, otherwise an already confusing situation is made more confusing and less robust.) ### Reproducing code example: <!-- If you place your code between the triple backticks below, it will be marked as a code block automatically --> ```python #!/usr/bin/python3 from pylab import * from scipy.integrate import solve_ivp from scipy.integrate import ode import time mu = 1000 y0 = r_[0.5, 0.5] T = 500 tt = linspace(T/200,T,200) def van_der_pol(t, y): return r_[y[1], mu*(1.0-y[0]**2)*y[1]-y[0]] c1 = time.process_time() sol1 = solve_ivp(van_der_pol, [0,T], y0, t_eval=tt, method = 'LSODA', rtol=1e-6, atol=1e-12) c2 = time.process_time() r2 = ode(van_der_pol).set_integrator('lsoda') r2.set_initial_value(y0) sol2 = array([r2.integrate(T) for T in tt]) c3 = time.process_time() print ("Time for LSODA (solve_ivp): " + str(c2-c1)) print ("Time for lsoda (ode): " + str(c3-c2)) figure() plot(sol1.t, sol1.y[0,:], label='LSODA (solve_ivp)') plot(tt, sol2[:,0], label='lsoda (ode)') legend() show() ``` This yields the following output (graphical output is qualitatively OK with both cases being very close): <!-- If any, paste the *full* error message inside a code block as above (starting from line Traceback) --> ``` Time for LSODA (solve_ivp): 35.811249636000014 Time for lsoda (ode): 0.024488598000004913 ``` So there is more than a factor 1000 performance difference, these numbers indicates that in the ill-performing case, `lsoda` has not switched to using a BDF method. I tried to find any substantial differences in the code paths of the two wrappers, without success. ### Scipy/Numpy/Python version information: <!-- You can simply run the following and paste the result in a code block ``` import sys, scipy, numpy; print(scipy.__version__, numpy.__version__, sys.version_info) ``` --> 1.1.0 1.15.1 sys.version_info(major=3, minor=7, micro=2, releaselevel='final', serial=0) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scipy/integrate/_ivp/lsoda.py` Content: ``` 1 import numpy as np 2 from scipy.integrate import ode 3 from .common import validate_tol, validate_first_step, warn_extraneous 4 from .base import OdeSolver, DenseOutput 5 6 7 class LSODA(OdeSolver): 8 """Adams/BDF method with automatic stiffness detection and switching. 9 10 This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches 11 automatically between the nonstiff Adams method and the stiff BDF method. 12 The method was originally detailed in [2]_. 13 14 Parameters 15 ---------- 16 fun : callable 17 Right-hand side of the system. The calling signature is ``fun(t, y)``. 18 Here ``t`` is a scalar, and there are two options for the ndarray ``y``: 19 It can either have shape (n,); then ``fun`` must return array_like with 20 shape (n,). Alternatively it can have shape (n, k); then ``fun`` 21 must return an array_like with shape (n, k), i.e. each column 22 corresponds to a single column in ``y``. The choice between the two 23 options is determined by `vectorized` argument (see below). The 24 vectorized implementation allows a faster approximation of the Jacobian 25 by finite differences (required for this solver). 26 t0 : float 27 Initial time. 28 y0 : array_like, shape (n,) 29 Initial state. 30 t_bound : float 31 Boundary time - the integration won't continue beyond it. It also 32 determines the direction of the integration. 33 first_step : float or None, optional 34 Initial step size. Default is ``None`` which means that the algorithm 35 should choose. 36 min_step : float, optional 37 Minimum allowed step size. Default is 0.0, i.e. the step size is not 38 bounded and determined solely by the solver. 39 max_step : float, optional 40 Maximum allowed step size. Default is np.inf, i.e. the step size is not 41 bounded and determined solely by the solver. 42 rtol, atol : float and array_like, optional 43 Relative and absolute tolerances. The solver keeps the local error 44 estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 45 relative accuracy (number of correct digits). But if a component of `y` 46 is approximately below `atol`, the error only needs to fall within 47 the same `atol` threshold, and the number of correct digits is not 48 guaranteed. If components of y have different scales, it might be 49 beneficial to set different `atol` values for different components by 50 passing array_like with shape (n,) for `atol`. Default values are 51 1e-3 for `rtol` and 1e-6 for `atol`. 52 jac : None or callable, optional 53 Jacobian matrix of the right-hand side of the system with respect to 54 ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is 55 equal to ``d f_i / d y_j``. The function will be called as 56 ``jac(t, y)``. If None (default), the Jacobian will be 57 approximated by finite differences. It is generally recommended to 58 provide the Jacobian rather than relying on a finite-difference 59 approximation. 60 lband, uband : int or None 61 Parameters defining the bandwidth of the Jacobian, 62 i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting 63 these requires your jac routine to return the Jacobian in the packed format: 64 the returned array must have ``n`` columns and ``uband + lband + 1`` 65 rows in which Jacobian diagonals are written. Specifically 66 ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used 67 in `scipy.linalg.solve_banded` (check for an illustration). 68 These parameters can be also used with ``jac=None`` to reduce the 69 number of Jacobian elements estimated by finite differences. 70 vectorized : bool, optional 71 Whether `fun` is implemented in a vectorized fashion. A vectorized 72 implementation offers no advantages for this solver. Default is False. 73 74 Attributes 75 ---------- 76 n : int 77 Number of equations. 78 status : string 79 Current status of the solver: 'running', 'finished' or 'failed'. 80 t_bound : float 81 Boundary time. 82 direction : float 83 Integration direction: +1 or -1. 84 t : float 85 Current time. 86 y : ndarray 87 Current state. 88 t_old : float 89 Previous time. None if no steps were made yet. 90 nfev : int 91 Number of evaluations of the right-hand side. 92 njev : int 93 Number of evaluations of the Jacobian. 94 95 References 96 ---------- 97 .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE 98 Solvers," IMACS Transactions on Scientific Computation, Vol 1., 99 pp. 55-64, 1983. 100 .. [2] L. Petzold, "Automatic selection of methods for solving stiff and 101 nonstiff systems of ordinary differential equations", SIAM Journal 102 on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, 103 1983. 104 """ 105 def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0, 106 max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None, 107 uband=None, vectorized=False, **extraneous): 108 warn_extraneous(extraneous) 109 super(LSODA, self).__init__(fun, t0, y0, t_bound, vectorized) 110 111 if first_step is None: 112 first_step = 0 # LSODA value for automatic selection. 113 else: 114 first_step = validate_first_step(first_step, t0, t_bound) 115 116 first_step *= self.direction 117 118 if max_step == np.inf: 119 max_step = 0 # LSODA value for infinity. 120 elif max_step <= 0: 121 raise ValueError("`max_step` must be positive.") 122 123 if min_step < 0: 124 raise ValueError("`min_step` must be nonnegative.") 125 126 rtol, atol = validate_tol(rtol, atol, self.n) 127 128 if jac is None: # No lambda as PEP8 insists. 129 def jac(): 130 return None 131 132 solver = ode(self.fun, jac) 133 solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step, 134 min_step=min_step, first_step=first_step, 135 lband=lband, uband=uband) 136 solver.set_initial_value(y0, t0) 137 138 # Inject t_bound into rwork array as needed for itask=5. 139 solver._integrator.rwork[0] = self.t_bound 140 solver._integrator.call_args[4] = solver._integrator.rwork 141 142 self._lsoda_solver = solver 143 144 def _step_impl(self): 145 solver = self._lsoda_solver 146 integrator = solver._integrator 147 148 # From lsoda.step and lsoda.integrate itask=5 means take a single 149 # step and do not go past t_bound. 150 itask = integrator.call_args[2] 151 integrator.call_args[2] = 5 152 solver._y, solver.t = integrator.run( 153 solver.f, solver.jac, solver._y, solver.t, 154 self.t_bound, solver.f_params, solver.jac_params) 155 integrator.call_args[2] = itask 156 157 if solver.successful(): 158 self.t = solver.t 159 self.y = solver._y 160 # From LSODA Fortran source njev is equal to nlu. 161 self.njev = integrator.iwork[12] 162 self.nlu = integrator.iwork[12] 163 return True, None 164 else: 165 return False, 'Unexpected istate in LSODA.' 166 167 def _dense_output_impl(self): 168 iwork = self._lsoda_solver._integrator.iwork 169 rwork = self._lsoda_solver._integrator.rwork 170 171 order = iwork[14] 172 h = rwork[11] 173 yh = np.reshape(rwork[20:20 + (order + 1) * self.n], 174 (self.n, order + 1), order='F').copy() 175 176 return LsodaDenseOutput(self.t_old, self.t, h, order, yh) 177 178 179 class LsodaDenseOutput(DenseOutput): 180 def __init__(self, t_old, t, h, order, yh): 181 super(LsodaDenseOutput, self).__init__(t_old, t) 182 self.h = h 183 self.yh = yh 184 self.p = np.arange(order + 1) 185 186 def _call_impl(self, t): 187 if t.ndim == 0: 188 x = ((t - self.t) / self.h) ** self.p 189 else: 190 x = ((t - self.t) / self.h) ** self.p[:, None] 191 192 return np.dot(self.yh, x) 193 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scipy/integrate/_ivp/lsoda.py b/scipy/integrate/_ivp/lsoda.py --- a/scipy/integrate/_ivp/lsoda.py +++ b/scipy/integrate/_ivp/lsoda.py @@ -125,10 +125,6 @@ rtol, atol = validate_tol(rtol, atol, self.n) - if jac is None: # No lambda as PEP8 insists. - def jac(): - return None - solver = ode(self.fun, jac) solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step, min_step=min_step, first_step=first_step, @@ -150,7 +146,7 @@ itask = integrator.call_args[2] integrator.call_args[2] = 5 solver._y, solver.t = integrator.run( - solver.f, solver.jac, solver._y, solver.t, + solver.f, solver.jac or (lambda: None), solver._y, solver.t, self.t_bound, solver.f_params, solver.jac_params) integrator.call_args[2] = itask
{"golden_diff": "diff --git a/scipy/integrate/_ivp/lsoda.py b/scipy/integrate/_ivp/lsoda.py\n--- a/scipy/integrate/_ivp/lsoda.py\n+++ b/scipy/integrate/_ivp/lsoda.py\n@@ -125,10 +125,6 @@\n \n rtol, atol = validate_tol(rtol, atol, self.n)\n \n- if jac is None: # No lambda as PEP8 insists.\n- def jac():\n- return None\n-\n solver = ode(self.fun, jac)\n solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,\n min_step=min_step, first_step=first_step,\n@@ -150,7 +146,7 @@\n itask = integrator.call_args[2]\n integrator.call_args[2] = 5\n solver._y, solver.t = integrator.run(\n- solver.f, solver.jac, solver._y, solver.t,\n+ solver.f, solver.jac or (lambda: None), solver._y, solver.t,\n self.t_bound, solver.f_params, solver.jac_params)\n integrator.call_args[2] = itask\n", "issue": "lsoda fails to detect stiff problem when called from solve_ivp\n<!-- \r\nThank you for taking the time to report a SciPy issue.\r\nPlease describe the issue in detail, and for bug reports\r\nfill in the fields below. You can delete the sections that \r\ndon't apply to your issue. You can view the final output\r\nby clicking the preview button above.\r\n-->\r\n\r\nBoth the `ode` and the `solve_ivp` solver interfaces wrap the venerable lsoda solver. As far as I can see, `solve_ivp` even internally calls the `ode` wrapper. Strangely enough, when tested on a stiff problem, lsoda when called through `ode` handles stiffness just fine. However, when called through `solve_ivp`, it behaves as if using the non-stiff solver. The only parameter difference I can spot is the reduced default `atol` and `rtol` parameter, however, even setting them to the `ode` defaults will not change the behavior of `solve_ivp`. (Off-topic comment: I find the default tolerances for `solve_ivp` unnecessarily poor, especially given that any other blackbox solver I know, including the two older Scipy APIs, have stricter defaults. I would strongly recommend using common defaults across Scipy, otherwise an already confusing situation is made more confusing and less robust.)\r\n\r\n### Reproducing code example:\r\n<!-- \r\nIf you place your code between the triple backticks below, \r\nit will be marked as a code block automatically \r\n-->\r\n\r\n\r\n```python\r\n#!/usr/bin/python3\r\n\r\nfrom pylab import *\r\nfrom scipy.integrate import solve_ivp\r\nfrom scipy.integrate import ode\r\nimport time\r\n \r\nmu = 1000\r\ny0 = r_[0.5, 0.5]\r\nT = 500\r\ntt = linspace(T/200,T,200)\r\n\r\ndef van_der_pol(t, y):\r\n return r_[y[1], mu*(1.0-y[0]**2)*y[1]-y[0]]\r\n\r\nc1 = time.process_time()\r\nsol1 = solve_ivp(van_der_pol, [0,T], y0, t_eval=tt,\r\n method = 'LSODA', rtol=1e-6, atol=1e-12)\r\n\r\nc2 = time.process_time()\r\nr2 = ode(van_der_pol).set_integrator('lsoda')\r\nr2.set_initial_value(y0)\r\nsol2 = array([r2.integrate(T) for T in tt])\r\n\r\nc3 = time.process_time() \r\nprint (\"Time for LSODA (solve_ivp): \" + str(c2-c1))\r\nprint (\"Time for lsoda (ode): \" + str(c3-c2))\r\n\r\nfigure()\r\nplot(sol1.t, sol1.y[0,:], label='LSODA (solve_ivp)')\r\nplot(tt, sol2[:,0], label='lsoda (ode)')\r\nlegend()\r\nshow()\r\n```\r\nThis yields the following output (graphical output is qualitatively OK with both cases being very close):\r\n<!-- If any, paste the *full* error message inside a code block\r\nas above (starting from line Traceback)\r\n-->\r\n\r\n```\r\nTime for LSODA (solve_ivp): 35.811249636000014\r\nTime for lsoda (ode): 0.024488598000004913\r\n```\r\nSo there is more than a factor 1000 performance difference, these numbers indicates that in the ill-performing case, `lsoda` has not switched to using a BDF method.\r\n\r\nI tried to find any substantial differences in the code paths of the two wrappers, without success. \r\n\r\n### Scipy/Numpy/Python version information:\r\n<!-- You can simply run the following and paste the result in a code block\r\n```\r\nimport sys, scipy, numpy; print(scipy.__version__, numpy.__version__, sys.version_info)\r\n```\r\n-->\r\n1.1.0 1.15.1 sys.version_info(major=3, minor=7, micro=2, releaselevel='final', serial=0)\r\n\n", "before_files": [{"content": "import numpy as np\nfrom scipy.integrate import ode\nfrom .common import validate_tol, validate_first_step, warn_extraneous\nfrom .base import OdeSolver, DenseOutput\n\n\nclass LSODA(OdeSolver):\n \"\"\"Adams/BDF method with automatic stiffness detection and switching.\n\n This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches\n automatically between the nonstiff Adams method and the stiff BDF method.\n The method was originally detailed in [2]_.\n\n Parameters\n ----------\n fun : callable\n Right-hand side of the system. The calling signature is ``fun(t, y)``.\n Here ``t`` is a scalar, and there are two options for the ndarray ``y``:\n It can either have shape (n,); then ``fun`` must return array_like with\n shape (n,). Alternatively it can have shape (n, k); then ``fun``\n must return an array_like with shape (n, k), i.e. each column\n corresponds to a single column in ``y``. The choice between the two\n options is determined by `vectorized` argument (see below). The\n vectorized implementation allows a faster approximation of the Jacobian\n by finite differences (required for this solver).\n t0 : float\n Initial time.\n y0 : array_like, shape (n,)\n Initial state.\n t_bound : float\n Boundary time - the integration won't continue beyond it. It also\n determines the direction of the integration.\n first_step : float or None, optional\n Initial step size. Default is ``None`` which means that the algorithm\n should choose.\n min_step : float, optional\n Minimum allowed step size. Default is 0.0, i.e. the step size is not\n bounded and determined solely by the solver.\n max_step : float, optional\n Maximum allowed step size. Default is np.inf, i.e. the step size is not\n bounded and determined solely by the solver.\n rtol, atol : float and array_like, optional\n Relative and absolute tolerances. The solver keeps the local error\n estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a\n relative accuracy (number of correct digits). But if a component of `y`\n is approximately below `atol`, the error only needs to fall within\n the same `atol` threshold, and the number of correct digits is not\n guaranteed. If components of y have different scales, it might be\n beneficial to set different `atol` values for different components by\n passing array_like with shape (n,) for `atol`. Default values are\n 1e-3 for `rtol` and 1e-6 for `atol`.\n jac : None or callable, optional\n Jacobian matrix of the right-hand side of the system with respect to\n ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is\n equal to ``d f_i / d y_j``. The function will be called as\n ``jac(t, y)``. If None (default), the Jacobian will be\n approximated by finite differences. It is generally recommended to\n provide the Jacobian rather than relying on a finite-difference\n approximation.\n lband, uband : int or None\n Parameters defining the bandwidth of the Jacobian,\n i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting\n these requires your jac routine to return the Jacobian in the packed format:\n the returned array must have ``n`` columns and ``uband + lband + 1``\n rows in which Jacobian diagonals are written. Specifically\n ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used\n in `scipy.linalg.solve_banded` (check for an illustration).\n These parameters can be also used with ``jac=None`` to reduce the\n number of Jacobian elements estimated by finite differences.\n vectorized : bool, optional\n Whether `fun` is implemented in a vectorized fashion. A vectorized\n implementation offers no advantages for this solver. Default is False.\n\n Attributes\n ----------\n n : int\n Number of equations.\n status : string\n Current status of the solver: 'running', 'finished' or 'failed'.\n t_bound : float\n Boundary time.\n direction : float\n Integration direction: +1 or -1.\n t : float\n Current time.\n y : ndarray\n Current state.\n t_old : float\n Previous time. None if no steps were made yet.\n nfev : int\n Number of evaluations of the right-hand side.\n njev : int\n Number of evaluations of the Jacobian.\n\n References\n ----------\n .. [1] A. C. Hindmarsh, \"ODEPACK, A Systematized Collection of ODE\n Solvers,\" IMACS Transactions on Scientific Computation, Vol 1.,\n pp. 55-64, 1983.\n .. [2] L. Petzold, \"Automatic selection of methods for solving stiff and\n nonstiff systems of ordinary differential equations\", SIAM Journal\n on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,\n 1983.\n \"\"\"\n def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,\n max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,\n uband=None, vectorized=False, **extraneous):\n warn_extraneous(extraneous)\n super(LSODA, self).__init__(fun, t0, y0, t_bound, vectorized)\n\n if first_step is None:\n first_step = 0 # LSODA value for automatic selection.\n else:\n first_step = validate_first_step(first_step, t0, t_bound)\n\n first_step *= self.direction\n\n if max_step == np.inf:\n max_step = 0 # LSODA value for infinity.\n elif max_step <= 0:\n raise ValueError(\"`max_step` must be positive.\")\n\n if min_step < 0:\n raise ValueError(\"`min_step` must be nonnegative.\")\n\n rtol, atol = validate_tol(rtol, atol, self.n)\n\n if jac is None: # No lambda as PEP8 insists.\n def jac():\n return None\n\n solver = ode(self.fun, jac)\n solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,\n min_step=min_step, first_step=first_step,\n lband=lband, uband=uband)\n solver.set_initial_value(y0, t0)\n\n # Inject t_bound into rwork array as needed for itask=5.\n solver._integrator.rwork[0] = self.t_bound\n solver._integrator.call_args[4] = solver._integrator.rwork\n\n self._lsoda_solver = solver\n\n def _step_impl(self):\n solver = self._lsoda_solver\n integrator = solver._integrator\n\n # From lsoda.step and lsoda.integrate itask=5 means take a single\n # step and do not go past t_bound.\n itask = integrator.call_args[2]\n integrator.call_args[2] = 5\n solver._y, solver.t = integrator.run(\n solver.f, solver.jac, solver._y, solver.t,\n self.t_bound, solver.f_params, solver.jac_params)\n integrator.call_args[2] = itask\n\n if solver.successful():\n self.t = solver.t\n self.y = solver._y\n # From LSODA Fortran source njev is equal to nlu.\n self.njev = integrator.iwork[12]\n self.nlu = integrator.iwork[12]\n return True, None\n else:\n return False, 'Unexpected istate in LSODA.'\n\n def _dense_output_impl(self):\n iwork = self._lsoda_solver._integrator.iwork\n rwork = self._lsoda_solver._integrator.rwork\n\n order = iwork[14]\n h = rwork[11]\n yh = np.reshape(rwork[20:20 + (order + 1) * self.n],\n (self.n, order + 1), order='F').copy()\n\n return LsodaDenseOutput(self.t_old, self.t, h, order, yh)\n\n\nclass LsodaDenseOutput(DenseOutput):\n def __init__(self, t_old, t, h, order, yh):\n super(LsodaDenseOutput, self).__init__(t_old, t)\n self.h = h\n self.yh = yh\n self.p = np.arange(order + 1)\n\n def _call_impl(self, t):\n if t.ndim == 0:\n x = ((t - self.t) / self.h) ** self.p\n else:\n x = ((t - self.t) / self.h) ** self.p[:, None]\n\n return np.dot(self.yh, x)\n", "path": "scipy/integrate/_ivp/lsoda.py"}], "after_files": [{"content": "import numpy as np\nfrom scipy.integrate import ode\nfrom .common import validate_tol, validate_first_step, warn_extraneous\nfrom .base import OdeSolver, DenseOutput\n\n\nclass LSODA(OdeSolver):\n \"\"\"Adams/BDF method with automatic stiffness detection and switching.\n\n This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches\n automatically between the nonstiff Adams method and the stiff BDF method.\n The method was originally detailed in [2]_.\n\n Parameters\n ----------\n fun : callable\n Right-hand side of the system. The calling signature is ``fun(t, y)``.\n Here ``t`` is a scalar, and there are two options for the ndarray ``y``:\n It can either have shape (n,); then ``fun`` must return array_like with\n shape (n,). Alternatively it can have shape (n, k); then ``fun``\n must return an array_like with shape (n, k), i.e. each column\n corresponds to a single column in ``y``. The choice between the two\n options is determined by `vectorized` argument (see below). The\n vectorized implementation allows a faster approximation of the Jacobian\n by finite differences (required for this solver).\n t0 : float\n Initial time.\n y0 : array_like, shape (n,)\n Initial state.\n t_bound : float\n Boundary time - the integration won't continue beyond it. It also\n determines the direction of the integration.\n first_step : float or None, optional\n Initial step size. Default is ``None`` which means that the algorithm\n should choose.\n min_step : float, optional\n Minimum allowed step size. Default is 0.0, i.e. the step size is not\n bounded and determined solely by the solver.\n max_step : float, optional\n Maximum allowed step size. Default is np.inf, i.e. the step size is not\n bounded and determined solely by the solver.\n rtol, atol : float and array_like, optional\n Relative and absolute tolerances. The solver keeps the local error\n estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a\n relative accuracy (number of correct digits). But if a component of `y`\n is approximately below `atol`, the error only needs to fall within\n the same `atol` threshold, and the number of correct digits is not\n guaranteed. If components of y have different scales, it might be\n beneficial to set different `atol` values for different components by\n passing array_like with shape (n,) for `atol`. Default values are\n 1e-3 for `rtol` and 1e-6 for `atol`.\n jac : None or callable, optional\n Jacobian matrix of the right-hand side of the system with respect to\n ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is\n equal to ``d f_i / d y_j``. The function will be called as\n ``jac(t, y)``. If None (default), the Jacobian will be\n approximated by finite differences. It is generally recommended to\n provide the Jacobian rather than relying on a finite-difference\n approximation.\n lband, uband : int or None\n Parameters defining the bandwidth of the Jacobian,\n i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting\n these requires your jac routine to return the Jacobian in the packed format:\n the returned array must have ``n`` columns and ``uband + lband + 1``\n rows in which Jacobian diagonals are written. Specifically\n ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used\n in `scipy.linalg.solve_banded` (check for an illustration).\n These parameters can be also used with ``jac=None`` to reduce the\n number of Jacobian elements estimated by finite differences.\n vectorized : bool, optional\n Whether `fun` is implemented in a vectorized fashion. A vectorized\n implementation offers no advantages for this solver. Default is False.\n\n Attributes\n ----------\n n : int\n Number of equations.\n status : string\n Current status of the solver: 'running', 'finished' or 'failed'.\n t_bound : float\n Boundary time.\n direction : float\n Integration direction: +1 or -1.\n t : float\n Current time.\n y : ndarray\n Current state.\n t_old : float\n Previous time. None if no steps were made yet.\n nfev : int\n Number of evaluations of the right-hand side.\n njev : int\n Number of evaluations of the Jacobian.\n\n References\n ----------\n .. [1] A. C. Hindmarsh, \"ODEPACK, A Systematized Collection of ODE\n Solvers,\" IMACS Transactions on Scientific Computation, Vol 1.,\n pp. 55-64, 1983.\n .. [2] L. Petzold, \"Automatic selection of methods for solving stiff and\n nonstiff systems of ordinary differential equations\", SIAM Journal\n on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,\n 1983.\n \"\"\"\n def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,\n max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,\n uband=None, vectorized=False, **extraneous):\n warn_extraneous(extraneous)\n super(LSODA, self).__init__(fun, t0, y0, t_bound, vectorized)\n\n if first_step is None:\n first_step = 0 # LSODA value for automatic selection.\n else:\n first_step = validate_first_step(first_step, t0, t_bound)\n\n first_step *= self.direction\n\n if max_step == np.inf:\n max_step = 0 # LSODA value for infinity.\n elif max_step <= 0:\n raise ValueError(\"`max_step` must be positive.\")\n\n if min_step < 0:\n raise ValueError(\"`min_step` must be nonnegative.\")\n\n rtol, atol = validate_tol(rtol, atol, self.n)\n\n solver = ode(self.fun, jac)\n solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,\n min_step=min_step, first_step=first_step,\n lband=lband, uband=uband)\n solver.set_initial_value(y0, t0)\n\n # Inject t_bound into rwork array as needed for itask=5.\n solver._integrator.rwork[0] = self.t_bound\n solver._integrator.call_args[4] = solver._integrator.rwork\n\n self._lsoda_solver = solver\n\n def _step_impl(self):\n solver = self._lsoda_solver\n integrator = solver._integrator\n\n # From lsoda.step and lsoda.integrate itask=5 means take a single\n # step and do not go past t_bound.\n itask = integrator.call_args[2]\n integrator.call_args[2] = 5\n solver._y, solver.t = integrator.run(\n solver.f, solver.jac or (lambda: None), solver._y, solver.t,\n self.t_bound, solver.f_params, solver.jac_params)\n integrator.call_args[2] = itask\n\n if solver.successful():\n self.t = solver.t\n self.y = solver._y\n # From LSODA Fortran source njev is equal to nlu.\n self.njev = integrator.iwork[12]\n self.nlu = integrator.iwork[12]\n return True, None\n else:\n return False, 'Unexpected istate in LSODA.'\n\n def _dense_output_impl(self):\n iwork = self._lsoda_solver._integrator.iwork\n rwork = self._lsoda_solver._integrator.rwork\n\n order = iwork[14]\n h = rwork[11]\n yh = np.reshape(rwork[20:20 + (order + 1) * self.n],\n (self.n, order + 1), order='F').copy()\n\n return LsodaDenseOutput(self.t_old, self.t, h, order, yh)\n\n\nclass LsodaDenseOutput(DenseOutput):\n def __init__(self, t_old, t, h, order, yh):\n super(LsodaDenseOutput, self).__init__(t_old, t)\n self.h = h\n self.yh = yh\n self.p = np.arange(order + 1)\n\n def _call_impl(self, t):\n if t.ndim == 0:\n x = ((t - self.t) / self.h) ** self.p\n else:\n x = ((t - self.t) / self.h) ** self.p[:, None]\n\n return np.dot(self.yh, x)\n", "path": "scipy/integrate/_ivp/lsoda.py"}]}
3,711
275
gh_patches_debug_27985
rasdani/github-patches
git_diff
zulip__zulip-17870
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Clean up code for hacky OpenAPI curl test After testing `deactivate_own_account` endpoint, we need to reactivate the client so that other tests are not affected by the deactivated client. In `test_curl_examples`, this has been hackily implemented and should be replaced by cleaner code. More details at https://github.com/zulip/zulip/pull/17014#discussion_r601173277 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zerver/openapi/curl_param_value_generators.py` Content: ``` 1 # Zulip's OpenAPI-based API documentation system is documented at 2 # https://zulip.readthedocs.io/en/latest/documentation/api.html 3 # 4 # This file contains helper functions for generating cURL examples 5 # based on Zulip's OpenAPI definitions, as well as test setup and 6 # fetching of appropriate parameter values to use when running the 7 # cURL examples as part of the tools/test-api test suite. 8 from functools import wraps 9 from typing import Any, Callable, Dict, List, Optional, Set, Tuple 10 11 from django.utils.timezone import now as timezone_now 12 13 from zerver.lib.actions import ( 14 do_add_linkifier, 15 do_add_reaction, 16 do_add_realm_playground, 17 do_create_user, 18 update_user_presence, 19 ) 20 from zerver.lib.events import do_events_register 21 from zerver.lib.initial_password import initial_password 22 from zerver.lib.test_classes import ZulipTestCase 23 from zerver.models import Client, Message, UserGroup, UserPresence, get_realm 24 25 GENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {} 26 REGISTERED_GENERATOR_FUNCTIONS: Set[str] = set() 27 CALLED_GENERATOR_FUNCTIONS: Set[str] = set() 28 29 helpers = ZulipTestCase() 30 31 32 def openapi_param_value_generator( 33 endpoints: List[str], 34 ) -> Callable[[Callable[[], Dict[str, object]]], Callable[[], Dict[str, object]]]: 35 """This decorator is used to register OpenAPI param value genarator functions 36 with endpoints. Example usage: 37 38 @openapi_param_value_generator(["/messages/render:post"]) 39 def ... 40 """ 41 42 def wrapper(generator_func: Callable[[], Dict[str, object]]) -> Callable[[], Dict[str, object]]: 43 @wraps(generator_func) 44 def _record_calls_wrapper() -> Dict[str, object]: 45 CALLED_GENERATOR_FUNCTIONS.add(generator_func.__name__) 46 return generator_func() 47 48 REGISTERED_GENERATOR_FUNCTIONS.add(generator_func.__name__) 49 for endpoint in endpoints: 50 GENERATOR_FUNCTIONS[endpoint] = _record_calls_wrapper 51 52 return _record_calls_wrapper 53 54 return wrapper 55 56 57 def assert_all_helper_functions_called() -> None: 58 """Throws an exception if any registered helpers were not called by tests""" 59 if REGISTERED_GENERATOR_FUNCTIONS == CALLED_GENERATOR_FUNCTIONS: 60 return 61 62 uncalled_functions = str(REGISTERED_GENERATOR_FUNCTIONS - CALLED_GENERATOR_FUNCTIONS) 63 64 raise Exception(f"Registered curl API generators were not called: {uncalled_functions}") 65 66 67 def patch_openapi_example_values( 68 entry: str, 69 params: List[Dict[str, Any]], 70 request_body: Optional[Dict[str, Any]] = None, 71 ) -> Tuple[List[Dict[str, object]], Optional[Dict[str, object]]]: 72 if entry not in GENERATOR_FUNCTIONS: 73 return params, request_body 74 func = GENERATOR_FUNCTIONS[entry] 75 realm_example_values: Dict[str, object] = func() 76 77 for param in params: 78 param_name = param["name"] 79 if param_name in realm_example_values: 80 if "content" in param: 81 param["content"]["application/json"]["example"] = realm_example_values[param_name] 82 else: 83 param["example"] = realm_example_values[param_name] 84 85 if request_body is not None: 86 properties = request_body["content"]["multipart/form-data"]["schema"]["properties"] 87 for key, property in properties.items(): 88 if key in realm_example_values: 89 property["example"] = realm_example_values[key] 90 return params, request_body 91 92 93 @openapi_param_value_generator(["/fetch_api_key:post"]) 94 def fetch_api_key() -> Dict[str, object]: 95 email = helpers.example_email("iago") 96 password = initial_password(email) 97 98 return { 99 "username": email, 100 "password": password, 101 } 102 103 104 @openapi_param_value_generator( 105 [ 106 "/messages/{message_id}:get", 107 "/messages/{message_id}/history:get", 108 "/messages/{message_id}:patch", 109 "/messages/{message_id}:delete", 110 ] 111 ) 112 def iago_message_id() -> Dict[str, object]: 113 return { 114 "message_id": helpers.send_stream_message(helpers.example_user("iago"), "Denmark"), 115 } 116 117 118 @openapi_param_value_generator(["/messages/{message_id}/reactions:delete"]) 119 def add_emoji_to_message() -> Dict[str, object]: 120 user_profile = helpers.example_user("iago") 121 122 # from OpenAPI format data in zulip.yaml 123 message_id = 41 124 emoji_name = "octopus" 125 emoji_code = "1f419" 126 reaction_type = "unicode_emoji" 127 128 message = Message.objects.select_related().get(id=message_id) 129 do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type) 130 131 return {} 132 133 134 @openapi_param_value_generator(["/messages/flags:post"]) 135 def update_flags_message_ids() -> Dict[str, object]: 136 stream_name = "Venice" 137 helpers.subscribe(helpers.example_user("iago"), stream_name) 138 139 messages = [] 140 for _ in range(3): 141 messages.append(helpers.send_stream_message(helpers.example_user("iago"), stream_name)) 142 return { 143 "messages": messages, 144 } 145 146 147 @openapi_param_value_generator(["/mark_stream_as_read:post", "/users/me/{stream_id}/topics:get"]) 148 def get_venice_stream_id() -> Dict[str, object]: 149 return { 150 "stream_id": helpers.get_stream_id("Venice"), 151 } 152 153 154 @openapi_param_value_generator(["/streams/{stream_id}:patch"]) 155 def update_stream() -> Dict[str, object]: 156 stream = helpers.subscribe(helpers.example_user("iago"), "temp_stream 1") 157 return { 158 "stream_id": stream.id, 159 } 160 161 162 @openapi_param_value_generator(["/streams/{stream_id}:delete"]) 163 def create_temp_stream_and_get_id() -> Dict[str, object]: 164 stream = helpers.subscribe(helpers.example_user("iago"), "temp_stream 2") 165 return { 166 "stream_id": stream.id, 167 } 168 169 170 @openapi_param_value_generator(["/mark_topic_as_read:post"]) 171 def get_denmark_stream_id_and_topic() -> Dict[str, object]: 172 stream_name = "Denmark" 173 topic_name = "Tivoli Gardens" 174 175 helpers.subscribe(helpers.example_user("iago"), stream_name) 176 helpers.send_stream_message(helpers.example_user("hamlet"), stream_name, topic_name=topic_name) 177 178 return { 179 "stream_id": helpers.get_stream_id(stream_name), 180 "topic_name": topic_name, 181 } 182 183 184 @openapi_param_value_generator(["/users/me/subscriptions/properties:post"]) 185 def update_subscription_data() -> Dict[str, object]: 186 profile = helpers.example_user("iago") 187 helpers.subscribe(profile, "Verona") 188 helpers.subscribe(profile, "social") 189 return { 190 "subscription_data": [ 191 {"stream_id": helpers.get_stream_id("Verona"), "property": "pin_to_top", "value": True}, 192 {"stream_id": helpers.get_stream_id("social"), "property": "color", "value": "#f00f00"}, 193 ], 194 } 195 196 197 @openapi_param_value_generator(["/users/me/subscriptions:delete"]) 198 def delete_subscription_data() -> Dict[str, object]: 199 iago = helpers.example_user("iago") 200 zoe = helpers.example_user("ZOE") 201 helpers.subscribe(iago, "Verona") 202 helpers.subscribe(iago, "social") 203 helpers.subscribe(zoe, "Verona") 204 helpers.subscribe(zoe, "social") 205 return {} 206 207 208 @openapi_param_value_generator(["/events:get"]) 209 def get_events() -> Dict[str, object]: 210 profile = helpers.example_user("iago") 211 helpers.subscribe(profile, "Verona") 212 client = Client.objects.create(name="curl-test-client-1") 213 response = do_events_register(profile, client, event_types=["message", "realm_emoji"]) 214 helpers.send_stream_message(helpers.example_user("hamlet"), "Verona") 215 return { 216 "queue_id": response["queue_id"], 217 "last_event_id": response["last_event_id"], 218 } 219 220 221 @openapi_param_value_generator(["/events:delete"]) 222 def delete_event_queue() -> Dict[str, object]: 223 profile = helpers.example_user("iago") 224 client = Client.objects.create(name="curl-test-client-2") 225 response = do_events_register(profile, client, event_types=["message"]) 226 return { 227 "queue_id": response["queue_id"], 228 "last_event_id": response["last_event_id"], 229 } 230 231 232 @openapi_param_value_generator(["/users/{user_id_or_email}/presence:get"]) 233 def get_user_presence() -> Dict[str, object]: 234 iago = helpers.example_user("iago") 235 client = Client.objects.create(name="curl-test-client-3") 236 update_user_presence(iago, client, timezone_now(), UserPresence.ACTIVE, False) 237 return {} 238 239 240 @openapi_param_value_generator(["/users:post"]) 241 def create_user() -> Dict[str, object]: 242 return { 243 "email": helpers.nonreg_email("test"), 244 } 245 246 247 @openapi_param_value_generator(["/user_groups/create:post"]) 248 def create_user_group_data() -> Dict[str, object]: 249 return { 250 "members": [helpers.example_user("hamlet").id, helpers.example_user("othello").id], 251 } 252 253 254 @openapi_param_value_generator( 255 ["/user_groups/{user_group_id}:patch", "/user_groups/{user_group_id}:delete"] 256 ) 257 def get_temp_user_group_id() -> Dict[str, object]: 258 user_group, _ = UserGroup.objects.get_or_create(name="temp", realm=get_realm("zulip")) 259 return { 260 "user_group_id": user_group.id, 261 } 262 263 264 @openapi_param_value_generator(["/realm/filters/{filter_id}:delete"]) 265 def remove_realm_filters() -> Dict[str, object]: 266 filter_id = do_add_linkifier( 267 get_realm("zulip"), "#(?P<id>[0-9]{2,8})", "https://github.com/zulip/zulip/pull/%(id)s" 268 ) 269 return { 270 "filter_id": filter_id, 271 } 272 273 274 @openapi_param_value_generator(["/realm/emoji/{emoji_name}:post", "/user_uploads:post"]) 275 def upload_custom_emoji() -> Dict[str, object]: 276 return { 277 "filename": "zerver/tests/images/animated_img.gif", 278 } 279 280 281 @openapi_param_value_generator(["/realm/playgrounds:post"]) 282 def add_realm_playground() -> Dict[str, object]: 283 return { 284 "name": "Python2 playground", 285 "pygments_language": "Python2", 286 "url_prefix": "https://python2.example.com", 287 } 288 289 290 @openapi_param_value_generator(["/realm/playgrounds/{playground_id}:delete"]) 291 def remove_realm_playground() -> Dict[str, object]: 292 playground_info = dict( 293 name="Python playground", 294 pygments_language="Python", 295 url_prefix="https://python.example.com", 296 ) 297 playground_id = do_add_realm_playground(get_realm("zulip"), **playground_info) 298 return { 299 "playground_id": playground_id, 300 } 301 302 303 @openapi_param_value_generator(["/users/{user_id}:delete"]) 304 def deactivate_user() -> Dict[str, object]: 305 user_profile = do_create_user( 306 email="[email protected]", 307 password=None, 308 full_name="test_user", 309 realm=get_realm("zulip"), 310 acting_user=None, 311 ) 312 return {"user_id": user_profile.id} 313 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zerver/openapi/curl_param_value_generators.py b/zerver/openapi/curl_param_value_generators.py --- a/zerver/openapi/curl_param_value_generators.py +++ b/zerver/openapi/curl_param_value_generators.py @@ -20,11 +20,15 @@ from zerver.lib.events import do_events_register from zerver.lib.initial_password import initial_password from zerver.lib.test_classes import ZulipTestCase -from zerver.models import Client, Message, UserGroup, UserPresence, get_realm +from zerver.lib.users import get_api_key +from zerver.models import Client, Message, UserGroup, UserPresence, get_realm, get_user GENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {} REGISTERED_GENERATOR_FUNCTIONS: Set[str] = set() CALLED_GENERATOR_FUNCTIONS: Set[str] = set() +# This is a List rather than just a string in order to make it easier +# to write to it from another module. +AUTHENTICATION_LINE: List[str] = [""] helpers = ZulipTestCase() @@ -310,3 +314,22 @@ acting_user=None, ) return {"user_id": user_profile.id} + + +@openapi_param_value_generator(["/users/me:delete"]) +def deactivate_own_user() -> Dict[str, object]: + test_user_email = "[email protected]" + deactivate_test_user = do_create_user( + test_user_email, + "secret", + get_realm("zulip"), + "Mr. Delete", + role=200, + acting_user=None, + ) + realm = get_realm("zulip") + test_user = get_user(test_user_email, realm) + test_user_api_key = get_api_key(test_user) + # change authentication line to allow test_client to delete itself. + AUTHENTICATION_LINE[0] = f"{deactivate_test_user.email}:{test_user_api_key}" + return {}
{"golden_diff": "diff --git a/zerver/openapi/curl_param_value_generators.py b/zerver/openapi/curl_param_value_generators.py\n--- a/zerver/openapi/curl_param_value_generators.py\n+++ b/zerver/openapi/curl_param_value_generators.py\n@@ -20,11 +20,15 @@\n from zerver.lib.events import do_events_register\n from zerver.lib.initial_password import initial_password\n from zerver.lib.test_classes import ZulipTestCase\n-from zerver.models import Client, Message, UserGroup, UserPresence, get_realm\n+from zerver.lib.users import get_api_key\n+from zerver.models import Client, Message, UserGroup, UserPresence, get_realm, get_user\n \n GENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {}\n REGISTERED_GENERATOR_FUNCTIONS: Set[str] = set()\n CALLED_GENERATOR_FUNCTIONS: Set[str] = set()\n+# This is a List rather than just a string in order to make it easier\n+# to write to it from another module.\n+AUTHENTICATION_LINE: List[str] = [\"\"]\n \n helpers = ZulipTestCase()\n \n@@ -310,3 +314,22 @@\n acting_user=None,\n )\n return {\"user_id\": user_profile.id}\n+\n+\n+@openapi_param_value_generator([\"/users/me:delete\"])\n+def deactivate_own_user() -> Dict[str, object]:\n+ test_user_email = \"[email protected]\"\n+ deactivate_test_user = do_create_user(\n+ test_user_email,\n+ \"secret\",\n+ get_realm(\"zulip\"),\n+ \"Mr. Delete\",\n+ role=200,\n+ acting_user=None,\n+ )\n+ realm = get_realm(\"zulip\")\n+ test_user = get_user(test_user_email, realm)\n+ test_user_api_key = get_api_key(test_user)\n+ # change authentication line to allow test_client to delete itself.\n+ AUTHENTICATION_LINE[0] = f\"{deactivate_test_user.email}:{test_user_api_key}\"\n+ return {}\n", "issue": "Clean up code for hacky OpenAPI curl test\nAfter testing `deactivate_own_account` endpoint, we need to reactivate the client so that other tests are not affected by the deactivated client. In `test_curl_examples`, this has been hackily implemented and should be replaced by cleaner code. More details at https://github.com/zulip/zulip/pull/17014#discussion_r601173277\n", "before_files": [{"content": "# Zulip's OpenAPI-based API documentation system is documented at\n# https://zulip.readthedocs.io/en/latest/documentation/api.html\n#\n# This file contains helper functions for generating cURL examples\n# based on Zulip's OpenAPI definitions, as well as test setup and\n# fetching of appropriate parameter values to use when running the\n# cURL examples as part of the tools/test-api test suite.\nfrom functools import wraps\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple\n\nfrom django.utils.timezone import now as timezone_now\n\nfrom zerver.lib.actions import (\n do_add_linkifier,\n do_add_reaction,\n do_add_realm_playground,\n do_create_user,\n update_user_presence,\n)\nfrom zerver.lib.events import do_events_register\nfrom zerver.lib.initial_password import initial_password\nfrom zerver.lib.test_classes import ZulipTestCase\nfrom zerver.models import Client, Message, UserGroup, UserPresence, get_realm\n\nGENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {}\nREGISTERED_GENERATOR_FUNCTIONS: Set[str] = set()\nCALLED_GENERATOR_FUNCTIONS: Set[str] = set()\n\nhelpers = ZulipTestCase()\n\n\ndef openapi_param_value_generator(\n endpoints: List[str],\n) -> Callable[[Callable[[], Dict[str, object]]], Callable[[], Dict[str, object]]]:\n \"\"\"This decorator is used to register OpenAPI param value genarator functions\n with endpoints. Example usage:\n\n @openapi_param_value_generator([\"/messages/render:post\"])\n def ...\n \"\"\"\n\n def wrapper(generator_func: Callable[[], Dict[str, object]]) -> Callable[[], Dict[str, object]]:\n @wraps(generator_func)\n def _record_calls_wrapper() -> Dict[str, object]:\n CALLED_GENERATOR_FUNCTIONS.add(generator_func.__name__)\n return generator_func()\n\n REGISTERED_GENERATOR_FUNCTIONS.add(generator_func.__name__)\n for endpoint in endpoints:\n GENERATOR_FUNCTIONS[endpoint] = _record_calls_wrapper\n\n return _record_calls_wrapper\n\n return wrapper\n\n\ndef assert_all_helper_functions_called() -> None:\n \"\"\"Throws an exception if any registered helpers were not called by tests\"\"\"\n if REGISTERED_GENERATOR_FUNCTIONS == CALLED_GENERATOR_FUNCTIONS:\n return\n\n uncalled_functions = str(REGISTERED_GENERATOR_FUNCTIONS - CALLED_GENERATOR_FUNCTIONS)\n\n raise Exception(f\"Registered curl API generators were not called: {uncalled_functions}\")\n\n\ndef patch_openapi_example_values(\n entry: str,\n params: List[Dict[str, Any]],\n request_body: Optional[Dict[str, Any]] = None,\n) -> Tuple[List[Dict[str, object]], Optional[Dict[str, object]]]:\n if entry not in GENERATOR_FUNCTIONS:\n return params, request_body\n func = GENERATOR_FUNCTIONS[entry]\n realm_example_values: Dict[str, object] = func()\n\n for param in params:\n param_name = param[\"name\"]\n if param_name in realm_example_values:\n if \"content\" in param:\n param[\"content\"][\"application/json\"][\"example\"] = realm_example_values[param_name]\n else:\n param[\"example\"] = realm_example_values[param_name]\n\n if request_body is not None:\n properties = request_body[\"content\"][\"multipart/form-data\"][\"schema\"][\"properties\"]\n for key, property in properties.items():\n if key in realm_example_values:\n property[\"example\"] = realm_example_values[key]\n return params, request_body\n\n\n@openapi_param_value_generator([\"/fetch_api_key:post\"])\ndef fetch_api_key() -> Dict[str, object]:\n email = helpers.example_email(\"iago\")\n password = initial_password(email)\n\n return {\n \"username\": email,\n \"password\": password,\n }\n\n\n@openapi_param_value_generator(\n [\n \"/messages/{message_id}:get\",\n \"/messages/{message_id}/history:get\",\n \"/messages/{message_id}:patch\",\n \"/messages/{message_id}:delete\",\n ]\n)\ndef iago_message_id() -> Dict[str, object]:\n return {\n \"message_id\": helpers.send_stream_message(helpers.example_user(\"iago\"), \"Denmark\"),\n }\n\n\n@openapi_param_value_generator([\"/messages/{message_id}/reactions:delete\"])\ndef add_emoji_to_message() -> Dict[str, object]:\n user_profile = helpers.example_user(\"iago\")\n\n # from OpenAPI format data in zulip.yaml\n message_id = 41\n emoji_name = \"octopus\"\n emoji_code = \"1f419\"\n reaction_type = \"unicode_emoji\"\n\n message = Message.objects.select_related().get(id=message_id)\n do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)\n\n return {}\n\n\n@openapi_param_value_generator([\"/messages/flags:post\"])\ndef update_flags_message_ids() -> Dict[str, object]:\n stream_name = \"Venice\"\n helpers.subscribe(helpers.example_user(\"iago\"), stream_name)\n\n messages = []\n for _ in range(3):\n messages.append(helpers.send_stream_message(helpers.example_user(\"iago\"), stream_name))\n return {\n \"messages\": messages,\n }\n\n\n@openapi_param_value_generator([\"/mark_stream_as_read:post\", \"/users/me/{stream_id}/topics:get\"])\ndef get_venice_stream_id() -> Dict[str, object]:\n return {\n \"stream_id\": helpers.get_stream_id(\"Venice\"),\n }\n\n\n@openapi_param_value_generator([\"/streams/{stream_id}:patch\"])\ndef update_stream() -> Dict[str, object]:\n stream = helpers.subscribe(helpers.example_user(\"iago\"), \"temp_stream 1\")\n return {\n \"stream_id\": stream.id,\n }\n\n\n@openapi_param_value_generator([\"/streams/{stream_id}:delete\"])\ndef create_temp_stream_and_get_id() -> Dict[str, object]:\n stream = helpers.subscribe(helpers.example_user(\"iago\"), \"temp_stream 2\")\n return {\n \"stream_id\": stream.id,\n }\n\n\n@openapi_param_value_generator([\"/mark_topic_as_read:post\"])\ndef get_denmark_stream_id_and_topic() -> Dict[str, object]:\n stream_name = \"Denmark\"\n topic_name = \"Tivoli Gardens\"\n\n helpers.subscribe(helpers.example_user(\"iago\"), stream_name)\n helpers.send_stream_message(helpers.example_user(\"hamlet\"), stream_name, topic_name=topic_name)\n\n return {\n \"stream_id\": helpers.get_stream_id(stream_name),\n \"topic_name\": topic_name,\n }\n\n\n@openapi_param_value_generator([\"/users/me/subscriptions/properties:post\"])\ndef update_subscription_data() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n helpers.subscribe(profile, \"Verona\")\n helpers.subscribe(profile, \"social\")\n return {\n \"subscription_data\": [\n {\"stream_id\": helpers.get_stream_id(\"Verona\"), \"property\": \"pin_to_top\", \"value\": True},\n {\"stream_id\": helpers.get_stream_id(\"social\"), \"property\": \"color\", \"value\": \"#f00f00\"},\n ],\n }\n\n\n@openapi_param_value_generator([\"/users/me/subscriptions:delete\"])\ndef delete_subscription_data() -> Dict[str, object]:\n iago = helpers.example_user(\"iago\")\n zoe = helpers.example_user(\"ZOE\")\n helpers.subscribe(iago, \"Verona\")\n helpers.subscribe(iago, \"social\")\n helpers.subscribe(zoe, \"Verona\")\n helpers.subscribe(zoe, \"social\")\n return {}\n\n\n@openapi_param_value_generator([\"/events:get\"])\ndef get_events() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n helpers.subscribe(profile, \"Verona\")\n client = Client.objects.create(name=\"curl-test-client-1\")\n response = do_events_register(profile, client, event_types=[\"message\", \"realm_emoji\"])\n helpers.send_stream_message(helpers.example_user(\"hamlet\"), \"Verona\")\n return {\n \"queue_id\": response[\"queue_id\"],\n \"last_event_id\": response[\"last_event_id\"],\n }\n\n\n@openapi_param_value_generator([\"/events:delete\"])\ndef delete_event_queue() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n client = Client.objects.create(name=\"curl-test-client-2\")\n response = do_events_register(profile, client, event_types=[\"message\"])\n return {\n \"queue_id\": response[\"queue_id\"],\n \"last_event_id\": response[\"last_event_id\"],\n }\n\n\n@openapi_param_value_generator([\"/users/{user_id_or_email}/presence:get\"])\ndef get_user_presence() -> Dict[str, object]:\n iago = helpers.example_user(\"iago\")\n client = Client.objects.create(name=\"curl-test-client-3\")\n update_user_presence(iago, client, timezone_now(), UserPresence.ACTIVE, False)\n return {}\n\n\n@openapi_param_value_generator([\"/users:post\"])\ndef create_user() -> Dict[str, object]:\n return {\n \"email\": helpers.nonreg_email(\"test\"),\n }\n\n\n@openapi_param_value_generator([\"/user_groups/create:post\"])\ndef create_user_group_data() -> Dict[str, object]:\n return {\n \"members\": [helpers.example_user(\"hamlet\").id, helpers.example_user(\"othello\").id],\n }\n\n\n@openapi_param_value_generator(\n [\"/user_groups/{user_group_id}:patch\", \"/user_groups/{user_group_id}:delete\"]\n)\ndef get_temp_user_group_id() -> Dict[str, object]:\n user_group, _ = UserGroup.objects.get_or_create(name=\"temp\", realm=get_realm(\"zulip\"))\n return {\n \"user_group_id\": user_group.id,\n }\n\n\n@openapi_param_value_generator([\"/realm/filters/{filter_id}:delete\"])\ndef remove_realm_filters() -> Dict[str, object]:\n filter_id = do_add_linkifier(\n get_realm(\"zulip\"), \"#(?P<id>[0-9]{2,8})\", \"https://github.com/zulip/zulip/pull/%(id)s\"\n )\n return {\n \"filter_id\": filter_id,\n }\n\n\n@openapi_param_value_generator([\"/realm/emoji/{emoji_name}:post\", \"/user_uploads:post\"])\ndef upload_custom_emoji() -> Dict[str, object]:\n return {\n \"filename\": \"zerver/tests/images/animated_img.gif\",\n }\n\n\n@openapi_param_value_generator([\"/realm/playgrounds:post\"])\ndef add_realm_playground() -> Dict[str, object]:\n return {\n \"name\": \"Python2 playground\",\n \"pygments_language\": \"Python2\",\n \"url_prefix\": \"https://python2.example.com\",\n }\n\n\n@openapi_param_value_generator([\"/realm/playgrounds/{playground_id}:delete\"])\ndef remove_realm_playground() -> Dict[str, object]:\n playground_info = dict(\n name=\"Python playground\",\n pygments_language=\"Python\",\n url_prefix=\"https://python.example.com\",\n )\n playground_id = do_add_realm_playground(get_realm(\"zulip\"), **playground_info)\n return {\n \"playground_id\": playground_id,\n }\n\n\n@openapi_param_value_generator([\"/users/{user_id}:delete\"])\ndef deactivate_user() -> Dict[str, object]:\n user_profile = do_create_user(\n email=\"[email protected]\",\n password=None,\n full_name=\"test_user\",\n realm=get_realm(\"zulip\"),\n acting_user=None,\n )\n return {\"user_id\": user_profile.id}\n", "path": "zerver/openapi/curl_param_value_generators.py"}], "after_files": [{"content": "# Zulip's OpenAPI-based API documentation system is documented at\n# https://zulip.readthedocs.io/en/latest/documentation/api.html\n#\n# This file contains helper functions for generating cURL examples\n# based on Zulip's OpenAPI definitions, as well as test setup and\n# fetching of appropriate parameter values to use when running the\n# cURL examples as part of the tools/test-api test suite.\nfrom functools import wraps\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple\n\nfrom django.utils.timezone import now as timezone_now\n\nfrom zerver.lib.actions import (\n do_add_linkifier,\n do_add_reaction,\n do_add_realm_playground,\n do_create_user,\n update_user_presence,\n)\nfrom zerver.lib.events import do_events_register\nfrom zerver.lib.initial_password import initial_password\nfrom zerver.lib.test_classes import ZulipTestCase\nfrom zerver.lib.users import get_api_key\nfrom zerver.models import Client, Message, UserGroup, UserPresence, get_realm, get_user\n\nGENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {}\nREGISTERED_GENERATOR_FUNCTIONS: Set[str] = set()\nCALLED_GENERATOR_FUNCTIONS: Set[str] = set()\n# This is a List rather than just a string in order to make it easier\n# to write to it from another module.\nAUTHENTICATION_LINE: List[str] = [\"\"]\n\nhelpers = ZulipTestCase()\n\n\ndef openapi_param_value_generator(\n endpoints: List[str],\n) -> Callable[[Callable[[], Dict[str, object]]], Callable[[], Dict[str, object]]]:\n \"\"\"This decorator is used to register OpenAPI param value genarator functions\n with endpoints. Example usage:\n\n @openapi_param_value_generator([\"/messages/render:post\"])\n def ...\n \"\"\"\n\n def wrapper(generator_func: Callable[[], Dict[str, object]]) -> Callable[[], Dict[str, object]]:\n @wraps(generator_func)\n def _record_calls_wrapper() -> Dict[str, object]:\n CALLED_GENERATOR_FUNCTIONS.add(generator_func.__name__)\n return generator_func()\n\n REGISTERED_GENERATOR_FUNCTIONS.add(generator_func.__name__)\n for endpoint in endpoints:\n GENERATOR_FUNCTIONS[endpoint] = _record_calls_wrapper\n\n return _record_calls_wrapper\n\n return wrapper\n\n\ndef assert_all_helper_functions_called() -> None:\n \"\"\"Throws an exception if any registered helpers were not called by tests\"\"\"\n if REGISTERED_GENERATOR_FUNCTIONS == CALLED_GENERATOR_FUNCTIONS:\n return\n\n uncalled_functions = str(REGISTERED_GENERATOR_FUNCTIONS - CALLED_GENERATOR_FUNCTIONS)\n\n raise Exception(f\"Registered curl API generators were not called: {uncalled_functions}\")\n\n\ndef patch_openapi_example_values(\n entry: str,\n params: List[Dict[str, Any]],\n request_body: Optional[Dict[str, Any]] = None,\n) -> Tuple[List[Dict[str, object]], Optional[Dict[str, object]]]:\n if entry not in GENERATOR_FUNCTIONS:\n return params, request_body\n func = GENERATOR_FUNCTIONS[entry]\n realm_example_values: Dict[str, object] = func()\n\n for param in params:\n param_name = param[\"name\"]\n if param_name in realm_example_values:\n if \"content\" in param:\n param[\"content\"][\"application/json\"][\"example\"] = realm_example_values[param_name]\n else:\n param[\"example\"] = realm_example_values[param_name]\n\n if request_body is not None:\n properties = request_body[\"content\"][\"multipart/form-data\"][\"schema\"][\"properties\"]\n for key, property in properties.items():\n if key in realm_example_values:\n property[\"example\"] = realm_example_values[key]\n return params, request_body\n\n\n@openapi_param_value_generator([\"/fetch_api_key:post\"])\ndef fetch_api_key() -> Dict[str, object]:\n email = helpers.example_email(\"iago\")\n password = initial_password(email)\n\n return {\n \"username\": email,\n \"password\": password,\n }\n\n\n@openapi_param_value_generator(\n [\n \"/messages/{message_id}:get\",\n \"/messages/{message_id}/history:get\",\n \"/messages/{message_id}:patch\",\n \"/messages/{message_id}:delete\",\n ]\n)\ndef iago_message_id() -> Dict[str, object]:\n return {\n \"message_id\": helpers.send_stream_message(helpers.example_user(\"iago\"), \"Denmark\"),\n }\n\n\n@openapi_param_value_generator([\"/messages/{message_id}/reactions:delete\"])\ndef add_emoji_to_message() -> Dict[str, object]:\n user_profile = helpers.example_user(\"iago\")\n\n # from OpenAPI format data in zulip.yaml\n message_id = 41\n emoji_name = \"octopus\"\n emoji_code = \"1f419\"\n reaction_type = \"unicode_emoji\"\n\n message = Message.objects.select_related().get(id=message_id)\n do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)\n\n return {}\n\n\n@openapi_param_value_generator([\"/messages/flags:post\"])\ndef update_flags_message_ids() -> Dict[str, object]:\n stream_name = \"Venice\"\n helpers.subscribe(helpers.example_user(\"iago\"), stream_name)\n\n messages = []\n for _ in range(3):\n messages.append(helpers.send_stream_message(helpers.example_user(\"iago\"), stream_name))\n return {\n \"messages\": messages,\n }\n\n\n@openapi_param_value_generator([\"/mark_stream_as_read:post\", \"/users/me/{stream_id}/topics:get\"])\ndef get_venice_stream_id() -> Dict[str, object]:\n return {\n \"stream_id\": helpers.get_stream_id(\"Venice\"),\n }\n\n\n@openapi_param_value_generator([\"/streams/{stream_id}:patch\"])\ndef update_stream() -> Dict[str, object]:\n stream = helpers.subscribe(helpers.example_user(\"iago\"), \"temp_stream 1\")\n return {\n \"stream_id\": stream.id,\n }\n\n\n@openapi_param_value_generator([\"/streams/{stream_id}:delete\"])\ndef create_temp_stream_and_get_id() -> Dict[str, object]:\n stream = helpers.subscribe(helpers.example_user(\"iago\"), \"temp_stream 2\")\n return {\n \"stream_id\": stream.id,\n }\n\n\n@openapi_param_value_generator([\"/mark_topic_as_read:post\"])\ndef get_denmark_stream_id_and_topic() -> Dict[str, object]:\n stream_name = \"Denmark\"\n topic_name = \"Tivoli Gardens\"\n\n helpers.subscribe(helpers.example_user(\"iago\"), stream_name)\n helpers.send_stream_message(helpers.example_user(\"hamlet\"), stream_name, topic_name=topic_name)\n\n return {\n \"stream_id\": helpers.get_stream_id(stream_name),\n \"topic_name\": topic_name,\n }\n\n\n@openapi_param_value_generator([\"/users/me/subscriptions/properties:post\"])\ndef update_subscription_data() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n helpers.subscribe(profile, \"Verona\")\n helpers.subscribe(profile, \"social\")\n return {\n \"subscription_data\": [\n {\"stream_id\": helpers.get_stream_id(\"Verona\"), \"property\": \"pin_to_top\", \"value\": True},\n {\"stream_id\": helpers.get_stream_id(\"social\"), \"property\": \"color\", \"value\": \"#f00f00\"},\n ],\n }\n\n\n@openapi_param_value_generator([\"/users/me/subscriptions:delete\"])\ndef delete_subscription_data() -> Dict[str, object]:\n iago = helpers.example_user(\"iago\")\n zoe = helpers.example_user(\"ZOE\")\n helpers.subscribe(iago, \"Verona\")\n helpers.subscribe(iago, \"social\")\n helpers.subscribe(zoe, \"Verona\")\n helpers.subscribe(zoe, \"social\")\n return {}\n\n\n@openapi_param_value_generator([\"/events:get\"])\ndef get_events() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n helpers.subscribe(profile, \"Verona\")\n client = Client.objects.create(name=\"curl-test-client-1\")\n response = do_events_register(profile, client, event_types=[\"message\", \"realm_emoji\"])\n helpers.send_stream_message(helpers.example_user(\"hamlet\"), \"Verona\")\n return {\n \"queue_id\": response[\"queue_id\"],\n \"last_event_id\": response[\"last_event_id\"],\n }\n\n\n@openapi_param_value_generator([\"/events:delete\"])\ndef delete_event_queue() -> Dict[str, object]:\n profile = helpers.example_user(\"iago\")\n client = Client.objects.create(name=\"curl-test-client-2\")\n response = do_events_register(profile, client, event_types=[\"message\"])\n return {\n \"queue_id\": response[\"queue_id\"],\n \"last_event_id\": response[\"last_event_id\"],\n }\n\n\n@openapi_param_value_generator([\"/users/{user_id_or_email}/presence:get\"])\ndef get_user_presence() -> Dict[str, object]:\n iago = helpers.example_user(\"iago\")\n client = Client.objects.create(name=\"curl-test-client-3\")\n update_user_presence(iago, client, timezone_now(), UserPresence.ACTIVE, False)\n return {}\n\n\n@openapi_param_value_generator([\"/users:post\"])\ndef create_user() -> Dict[str, object]:\n return {\n \"email\": helpers.nonreg_email(\"test\"),\n }\n\n\n@openapi_param_value_generator([\"/user_groups/create:post\"])\ndef create_user_group_data() -> Dict[str, object]:\n return {\n \"members\": [helpers.example_user(\"hamlet\").id, helpers.example_user(\"othello\").id],\n }\n\n\n@openapi_param_value_generator(\n [\"/user_groups/{user_group_id}:patch\", \"/user_groups/{user_group_id}:delete\"]\n)\ndef get_temp_user_group_id() -> Dict[str, object]:\n user_group, _ = UserGroup.objects.get_or_create(name=\"temp\", realm=get_realm(\"zulip\"))\n return {\n \"user_group_id\": user_group.id,\n }\n\n\n@openapi_param_value_generator([\"/realm/filters/{filter_id}:delete\"])\ndef remove_realm_filters() -> Dict[str, object]:\n filter_id = do_add_linkifier(\n get_realm(\"zulip\"), \"#(?P<id>[0-9]{2,8})\", \"https://github.com/zulip/zulip/pull/%(id)s\"\n )\n return {\n \"filter_id\": filter_id,\n }\n\n\n@openapi_param_value_generator([\"/realm/emoji/{emoji_name}:post\", \"/user_uploads:post\"])\ndef upload_custom_emoji() -> Dict[str, object]:\n return {\n \"filename\": \"zerver/tests/images/animated_img.gif\",\n }\n\n\n@openapi_param_value_generator([\"/realm/playgrounds:post\"])\ndef add_realm_playground() -> Dict[str, object]:\n return {\n \"name\": \"Python2 playground\",\n \"pygments_language\": \"Python2\",\n \"url_prefix\": \"https://python2.example.com\",\n }\n\n\n@openapi_param_value_generator([\"/realm/playgrounds/{playground_id}:delete\"])\ndef remove_realm_playground() -> Dict[str, object]:\n playground_info = dict(\n name=\"Python playground\",\n pygments_language=\"Python\",\n url_prefix=\"https://python.example.com\",\n )\n playground_id = do_add_realm_playground(get_realm(\"zulip\"), **playground_info)\n return {\n \"playground_id\": playground_id,\n }\n\n\n@openapi_param_value_generator([\"/users/{user_id}:delete\"])\ndef deactivate_user() -> Dict[str, object]:\n user_profile = do_create_user(\n email=\"[email protected]\",\n password=None,\n full_name=\"test_user\",\n realm=get_realm(\"zulip\"),\n acting_user=None,\n )\n return {\"user_id\": user_profile.id}\n\n\n@openapi_param_value_generator([\"/users/me:delete\"])\ndef deactivate_own_user() -> Dict[str, object]:\n test_user_email = \"[email protected]\"\n deactivate_test_user = do_create_user(\n test_user_email,\n \"secret\",\n get_realm(\"zulip\"),\n \"Mr. Delete\",\n role=200,\n acting_user=None,\n )\n realm = get_realm(\"zulip\")\n test_user = get_user(test_user_email, realm)\n test_user_api_key = get_api_key(test_user)\n # change authentication line to allow test_client to delete itself.\n AUTHENTICATION_LINE[0] = f\"{deactivate_test_user.email}:{test_user_api_key}\"\n return {}\n", "path": "zerver/openapi/curl_param_value_generators.py"}]}
3,699
452
gh_patches_debug_3917
rasdani/github-patches
git_diff
geopandas__geopandas-605
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Very slow when writing to GPKG Here's my test suite for a proof: https://github.com/culebron/geodata Run `python3.6 few.py` and `python3.6 multiple.py` to compare. `few.py` opens a file with a lot of data, but only 2.7K records as GeoDataFrame. It writes them into GeoJSON and GPKG. In this case, GPKG driver outperforms GeoJSON. `multiple.py` creates a 100K records dataframe and then saves it to GeoJSON and GPKG. Here, GPKG is incredibly slow. My results: $ python3.6 few.py writing 2.7K records to geojson 36.283805477003625 writing 2.7K records to gpkg 20.792497718997765 $ python3.6 multiple.py 100%|████████████████████████████████████████████████████████| 100000/100000 [00:03<00:00, 29996.25it/s] writing 100K records to geojson 61.62079200500011 writing 100K records to gpkg 260.4413645050008 And notice that in case of `multiple.py`, the resulting GeoPackage file is only 9 megs. Which is times smaller than the file produced by `few.py`. As I understand, the problem is that Fiona opens a session in Sqlite and creates a lock file, and it takes some time. And inspecting the code, I see GeoPandas naively writes everything 1 record at a time, which means Sqlite honestly locks it, then writes, then unlocks: https://github.com/geopandas/geopandas/blob/master/geopandas/io/file.py#L107 with fiona.drivers(): with fiona.open(filename, 'w', driver=driver, crs=df.crs, schema=schema, **kwargs) as colxn: for feature in df.iterfeatures(): colxn.write(feature) This should be optimized. Are there branches/pull requests for this? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `geopandas/io/file.py` Content: ``` 1 import os 2 3 import fiona 4 import numpy as np 5 import six 6 7 from geopandas import GeoDataFrame 8 9 # Adapted from pandas.io.common 10 if six.PY3: 11 from urllib.request import urlopen as _urlopen 12 from urllib.parse import urlparse as parse_url 13 from urllib.parse import uses_relative, uses_netloc, uses_params 14 else: 15 from urllib2 import urlopen as _urlopen 16 from urlparse import urlparse as parse_url 17 from urlparse import uses_relative, uses_netloc, uses_params 18 19 _VALID_URLS = set(uses_relative + uses_netloc + uses_params) 20 _VALID_URLS.discard('') 21 22 23 def _is_url(url): 24 """Check to see if *url* has a valid protocol.""" 25 try: 26 return parse_url(url).scheme in _VALID_URLS 27 except: 28 return False 29 30 31 def read_file(filename, **kwargs): 32 """ 33 Returns a GeoDataFrame from a file or URL. 34 35 Parameters 36 ---------- 37 filename: str 38 Either the absolute or relative path to the file or URL to 39 be opened. 40 **kwargs: 41 Keyword args to be passed to the `open` or `BytesCollection` method 42 in the fiona library when opening the file. For more information on 43 possible keywords, type: 44 ``import fiona; help(fiona.open)`` 45 46 Examples 47 -------- 48 >>> df = geopandas.read_file("nybb.shp") 49 50 Returns 51 ------- 52 geodataframe : GeoDataFrame 53 """ 54 bbox = kwargs.pop('bbox', None) 55 if _is_url(filename): 56 req = _urlopen(filename) 57 path_or_bytes = req.read() 58 reader = fiona.BytesCollection 59 else: 60 path_or_bytes = filename 61 reader = fiona.open 62 with reader(path_or_bytes, **kwargs) as f: 63 crs = f.crs 64 if bbox is not None: 65 assert len(bbox) == 4 66 f_filt = f.filter(bbox=bbox) 67 else: 68 f_filt = f 69 gdf = GeoDataFrame.from_features(f_filt, crs=crs) 70 # re-order with column order from metadata, with geometry last 71 columns = list(f.meta["schema"]["properties"]) + ["geometry"] 72 gdf = gdf[columns] 73 74 return gdf 75 76 77 def to_file(df, filename, driver="ESRI Shapefile", schema=None, 78 **kwargs): 79 """ 80 Write this GeoDataFrame to an OGR data source 81 82 A dictionary of supported OGR providers is available via: 83 >>> import fiona 84 >>> fiona.supported_drivers 85 86 Parameters 87 ---------- 88 df : GeoDataFrame to be written 89 filename : string 90 File path or file handle to write to. 91 driver : string, default 'ESRI Shapefile' 92 The OGR format driver used to write the vector file. 93 schema : dict, default None 94 If specified, the schema dictionary is passed to Fiona to 95 better control how the file is written. If None, GeoPandas 96 will determine the schema based on each column's dtype 97 98 The *kwargs* are passed to fiona.open and can be used to write 99 to multi-layer data, store data within archives (zip files), etc. 100 """ 101 if schema is None: 102 schema = infer_schema(df) 103 filename = os.path.abspath(os.path.expanduser(filename)) 104 with fiona.drivers(): 105 with fiona.open(filename, 'w', driver=driver, crs=df.crs, 106 schema=schema, **kwargs) as colxn: 107 for feature in df.iterfeatures(): 108 colxn.write(feature) 109 110 111 def infer_schema(df): 112 try: 113 from collections import OrderedDict 114 except ImportError: 115 from ordereddict import OrderedDict 116 117 def convert_type(in_type): 118 if in_type == object: 119 return 'str' 120 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__ 121 if out_type == 'long': 122 out_type = 'int' 123 return out_type 124 125 properties = OrderedDict([ 126 (col, convert_type(_type)) for col, _type in 127 zip(df.columns, df.dtypes) if col != df._geometry_column_name 128 ]) 129 130 geom_type = _common_geom_type(df) 131 if not geom_type: 132 raise ValueError("Geometry column cannot contain mutiple " 133 "geometry types when writing to file.") 134 135 schema = {'geometry': geom_type, 'properties': properties} 136 137 return schema 138 139 140 def _common_geom_type(df): 141 # Need to check geom_types before we write to file... 142 # Some (most?) providers expect a single geometry type: 143 # Point, LineString, or Polygon 144 geom_types = df.geometry.geom_type.unique() 145 146 from os.path import commonprefix # To find longest common prefix 147 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse 148 if not geom_type: 149 geom_type = None 150 151 return geom_type 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/geopandas/io/file.py b/geopandas/io/file.py --- a/geopandas/io/file.py +++ b/geopandas/io/file.py @@ -104,8 +104,7 @@ with fiona.drivers(): with fiona.open(filename, 'w', driver=driver, crs=df.crs, schema=schema, **kwargs) as colxn: - for feature in df.iterfeatures(): - colxn.write(feature) + colxn.writerecords(df.iterfeatures()) def infer_schema(df):
{"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -104,8 +104,7 @@\n with fiona.drivers():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n- for feature in df.iterfeatures():\n- colxn.write(feature)\n+ colxn.writerecords(df.iterfeatures())\n \n \n def infer_schema(df):\n", "issue": "Very slow when writing to GPKG\nHere's my test suite for a proof: https://github.com/culebron/geodata\r\n\r\nRun `python3.6 few.py` and `python3.6 multiple.py` to compare.\r\n\r\n`few.py` opens a file with a lot of data, but only 2.7K records as GeoDataFrame. It writes them into GeoJSON and GPKG. In this case, GPKG driver outperforms GeoJSON.\r\n\r\n`multiple.py` creates a 100K records dataframe and then saves it to GeoJSON and GPKG. Here, GPKG is incredibly slow.\r\n\r\nMy results:\r\n\r\n\t$ python3.6 few.py \r\n\twriting 2.7K records to geojson 36.283805477003625\r\n\twriting 2.7K records to gpkg 20.792497718997765\r\n\t$ python3.6 multiple.py \r\n\t100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 100000/100000 [00:03<00:00, 29996.25it/s]\r\n\twriting 100K records to geojson 61.62079200500011\r\n\twriting 100K records to gpkg 260.4413645050008\r\n\r\nAnd notice that in case of `multiple.py`, the resulting GeoPackage file is only 9 megs. Which is times smaller than the file produced by `few.py`.\r\n\r\nAs I understand, the problem is that Fiona opens a session in Sqlite and creates a lock file, and it takes some time. And inspecting the code, I see GeoPandas naively writes everything 1 record at a time, which means Sqlite honestly locks it, then writes, then unlocks:\r\n\r\nhttps://github.com/geopandas/geopandas/blob/master/geopandas/io/file.py#L107\r\n\r\n with fiona.drivers():\r\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\r\n schema=schema, **kwargs) as colxn:\r\n for feature in df.iterfeatures():\r\n colxn.write(feature)\r\n\r\n\r\nThis should be optimized. Are there branches/pull requests for this?\r\n\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nimport six\n\nfrom geopandas import GeoDataFrame\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n with reader(path_or_bytes, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox) == 4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n # re-order with column order from metadata, with geometry last\n columns = list(f.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = gdf[columns]\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.drivers():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n for feature in df.iterfeatures():\n colxn.write(feature)\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}], "after_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nimport six\n\nfrom geopandas import GeoDataFrame\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n with reader(path_or_bytes, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox) == 4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n # re-order with column order from metadata, with geometry last\n columns = list(f.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = gdf[columns]\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.drivers():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}]}
2,222
123
gh_patches_debug_23728
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-179
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Nested mappings raise an error ```cfn-lint 0.3.1``` We use nested maps in our templates: ```yaml Mappings: RegionAccountToAZ: ap-northeast-1: 0123456789: - ap-northeast-1a - ap-northeast-1c - none 9876543210: - ap-northeast-1a - ap-northeast-1b - ap-northeast-1c ``` We'd access this data using a construction like `!FindInMap [RegionAccountToAZ, !Ref 'AWS::Region', !Ref 'AWS::AccountId']`. However cfn-lint says: ``` E7001 Mapping RegionAccountToAZ has invalid property at 9876543210 test.cfn.yaml:3:5 E7001 Mapping RegionAccountToAZ has invalid property at 0123456789 test.cfn.yaml:4:7 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/mappings/Configuration.py` Content: ``` 1 """ 2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 from cfnlint import CloudFormationLintRule 18 from cfnlint import RuleMatch 19 20 21 class Configuration(CloudFormationLintRule): 22 """Check if Mappings are configured correctly""" 23 id = 'E7001' 24 shortdesc = 'Mappings are appropriately configured' 25 description = 'Check if Mappings are properly configured' 26 tags = ['base', 'mappings'] 27 28 def match(self, cfn): 29 """Check CloudFormation Parameters""" 30 31 matches = list() 32 33 mappings = cfn.template.get('Mappings', {}) 34 if mappings: 35 for mapname, mapobj in mappings.items(): 36 if not isinstance(mapobj, dict): 37 message = 'Mapping {0} has invalid property' 38 matches.append(RuleMatch( 39 ['Mappings', mapname], 40 message.format(mapname) 41 )) 42 else: 43 for firstkey in mapobj: 44 firstkeyobj = mapobj[firstkey] 45 if not isinstance(firstkeyobj, dict): 46 message = 'Mapping {0} has invalid property at {1}' 47 matches.append(RuleMatch( 48 ['Mappings', mapname, firstkey], 49 message.format(mapname, firstkeyobj) 50 )) 51 else: 52 for secondkey in firstkeyobj: 53 if isinstance(firstkeyobj[secondkey], (dict, list)): 54 message = 'Mapping {0} has invalid property at {1}' 55 matches.append(RuleMatch( 56 ['Mappings', mapname, firstkey, secondkey], 57 message.format(mapname, secondkey) 58 )) 59 60 return matches 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py --- a/src/cfnlint/rules/mappings/Configuration.py +++ b/src/cfnlint/rules/mappings/Configuration.py @@ -14,6 +14,7 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import six from cfnlint import CloudFormationLintRule from cfnlint import RuleMatch @@ -50,7 +51,9 @@ )) else: for secondkey in firstkeyobj: - if isinstance(firstkeyobj[secondkey], (dict, list)): + if not isinstance( + firstkeyobj[secondkey], + (six.string_types, list, six.integer_types)): message = 'Mapping {0} has invalid property at {1}' matches.append(RuleMatch( ['Mappings', mapname, firstkey, secondkey],
{"golden_diff": "diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py\n--- a/src/cfnlint/rules/mappings/Configuration.py\n+++ b/src/cfnlint/rules/mappings/Configuration.py\n@@ -14,6 +14,7 @@\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n+import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n \n@@ -50,7 +51,9 @@\n ))\n else:\n for secondkey in firstkeyobj:\n- if isinstance(firstkeyobj[secondkey], (dict, list)):\n+ if not isinstance(\n+ firstkeyobj[secondkey],\n+ (six.string_types, list, six.integer_types)):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n", "issue": "Nested mappings raise an error\n```cfn-lint 0.3.1```\r\n\r\nWe use nested maps in our templates:\r\n\r\n```yaml\r\nMappings:\r\n RegionAccountToAZ:\r\n ap-northeast-1:\r\n 0123456789:\r\n - ap-northeast-1a\r\n - ap-northeast-1c\r\n - none\r\n 9876543210:\r\n - ap-northeast-1a\r\n - ap-northeast-1b\r\n - ap-northeast-1c\r\n```\r\n\r\nWe'd access this data using a construction like `!FindInMap [RegionAccountToAZ, !Ref 'AWS::Region', !Ref 'AWS::AccountId']`. However cfn-lint says:\r\n\r\n```\r\nE7001 Mapping RegionAccountToAZ has invalid property at 9876543210\r\ntest.cfn.yaml:3:5\r\n\r\nE7001 Mapping RegionAccountToAZ has invalid property at 0123456789\r\ntest.cfn.yaml:4:7\r\n```\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Mappings are configured correctly\"\"\"\n id = 'E7001'\n shortdesc = 'Mappings are appropriately configured'\n description = 'Check if Mappings are properly configured'\n tags = ['base', 'mappings']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = list()\n\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n if not isinstance(mapobj, dict):\n message = 'Mapping {0} has invalid property'\n matches.append(RuleMatch(\n ['Mappings', mapname],\n message.format(mapname)\n ))\n else:\n for firstkey in mapobj:\n firstkeyobj = mapobj[firstkey]\n if not isinstance(firstkeyobj, dict):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey],\n message.format(mapname, firstkeyobj)\n ))\n else:\n for secondkey in firstkeyobj:\n if isinstance(firstkeyobj[secondkey], (dict, list)):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n message.format(mapname, secondkey)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/Configuration.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Mappings are configured correctly\"\"\"\n id = 'E7001'\n shortdesc = 'Mappings are appropriately configured'\n description = 'Check if Mappings are properly configured'\n tags = ['base', 'mappings']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = list()\n\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n if not isinstance(mapobj, dict):\n message = 'Mapping {0} has invalid property'\n matches.append(RuleMatch(\n ['Mappings', mapname],\n message.format(mapname)\n ))\n else:\n for firstkey in mapobj:\n firstkeyobj = mapobj[firstkey]\n if not isinstance(firstkeyobj, dict):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey],\n message.format(mapname, firstkeyobj)\n ))\n else:\n for secondkey in firstkeyobj:\n if not isinstance(\n firstkeyobj[secondkey],\n (six.string_types, list, six.integer_types)):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n message.format(mapname, secondkey)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/Configuration.py"}]}
1,153
223
gh_patches_debug_13928
rasdani/github-patches
git_diff
python-poetry__poetry-8227
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fix: remove exception when keyring is locked # Pull Request Check List Resolves: #1917 <!-- This is just a reminder about the most common mistakes. Please make sure that you tick all *appropriate* boxes. But please read our [contribution guide](https://python-poetry.org/docs/contributing/) at least once, it will save you unnecessary review cycles! --> - [ ] Added **tests** for changed code. - [ ] Updated **documentation** for changed code. <!-- If you have *any* questions to *any* of the points above, just **submit and ask**! This checklist is here to *help* you, not to deter you from contributing! --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/poetry/utils/password_manager.py` Content: ``` 1 from __future__ import annotations 2 3 import dataclasses 4 import logging 5 6 from contextlib import suppress 7 from typing import TYPE_CHECKING 8 9 10 if TYPE_CHECKING: 11 from poetry.config.config import Config 12 13 logger = logging.getLogger(__name__) 14 15 16 class PasswordManagerError(Exception): 17 pass 18 19 20 class PoetryKeyringError(Exception): 21 pass 22 23 24 @dataclasses.dataclass 25 class HTTPAuthCredential: 26 username: str | None = dataclasses.field(default=None) 27 password: str | None = dataclasses.field(default=None) 28 29 30 class PoetryKeyring: 31 def __init__(self, namespace: str) -> None: 32 self._namespace = namespace 33 self._is_available = True 34 35 self._check() 36 37 def is_available(self) -> bool: 38 return self._is_available 39 40 def get_credential( 41 self, *names: str, username: str | None = None 42 ) -> HTTPAuthCredential: 43 default = HTTPAuthCredential(username=username, password=None) 44 45 if not self.is_available(): 46 return default 47 48 import keyring 49 50 for name in names: 51 credential = keyring.get_credential(name, username) 52 if credential: 53 return HTTPAuthCredential( 54 username=credential.username, password=credential.password 55 ) 56 57 return default 58 59 def get_password(self, name: str, username: str) -> str | None: 60 if not self.is_available(): 61 return None 62 63 import keyring 64 import keyring.errors 65 66 name = self.get_entry_name(name) 67 68 try: 69 return keyring.get_password(name, username) 70 except (RuntimeError, keyring.errors.KeyringError): 71 raise PoetryKeyringError( 72 f"Unable to retrieve the password for {name} from the key ring" 73 ) 74 75 def set_password(self, name: str, username: str, password: str) -> None: 76 if not self.is_available(): 77 return 78 79 import keyring 80 import keyring.errors 81 82 name = self.get_entry_name(name) 83 84 try: 85 keyring.set_password(name, username, password) 86 except (RuntimeError, keyring.errors.KeyringError) as e: 87 raise PoetryKeyringError( 88 f"Unable to store the password for {name} in the key ring: {e}" 89 ) 90 91 def delete_password(self, name: str, username: str) -> None: 92 if not self.is_available(): 93 return 94 95 import keyring.errors 96 97 name = self.get_entry_name(name) 98 99 try: 100 keyring.delete_password(name, username) 101 except (RuntimeError, keyring.errors.KeyringError): 102 raise PoetryKeyringError( 103 f"Unable to delete the password for {name} from the key ring" 104 ) 105 106 def get_entry_name(self, name: str) -> str: 107 return f"{self._namespace}-{name}" 108 109 def _check(self) -> None: 110 try: 111 import keyring 112 except ImportError as e: 113 logger.debug("An error occurred while importing keyring: %s", e) 114 self._is_available = False 115 116 return 117 118 backend = keyring.get_keyring() 119 name = backend.name.split(" ")[0] 120 if name in ("fail", "null"): 121 logger.debug("No suitable keyring backend found") 122 self._is_available = False 123 elif "plaintext" in backend.name.lower(): 124 logger.debug("Only a plaintext keyring backend is available. Not using it.") 125 self._is_available = False 126 elif name == "chainer": 127 try: 128 import keyring.backend 129 130 backends = keyring.backend.get_all_keyring() 131 132 self._is_available = any( 133 b.name.split(" ")[0] not in ["chainer", "fail", "null"] 134 and "plaintext" not in b.name.lower() 135 for b in backends 136 ) 137 except ImportError: 138 self._is_available = False 139 140 if not self._is_available: 141 logger.debug("No suitable keyring backends were found") 142 143 144 class PasswordManager: 145 def __init__(self, config: Config) -> None: 146 self._config = config 147 self._keyring: PoetryKeyring | None = None 148 149 @property 150 def keyring(self) -> PoetryKeyring: 151 if self._keyring is None: 152 self._keyring = PoetryKeyring("poetry-repository") 153 154 if not self._keyring.is_available(): 155 logger.debug( 156 "<warning>Keyring is not available, credentials will be stored and " 157 "retrieved from configuration files as plaintext.</>" 158 ) 159 160 return self._keyring 161 162 @staticmethod 163 def warn_plaintext_credentials_stored() -> None: 164 logger.warning("Using a plaintext file to store credentials") 165 166 def set_pypi_token(self, name: str, token: str) -> None: 167 if not self.keyring.is_available(): 168 self.warn_plaintext_credentials_stored() 169 self._config.auth_config_source.add_property(f"pypi-token.{name}", token) 170 else: 171 self.keyring.set_password(name, "__token__", token) 172 173 def get_pypi_token(self, repo_name: str) -> str | None: 174 """Get PyPi token. 175 176 First checks the environment variables for a token, 177 then the configured username/password and the 178 available keyring. 179 180 :param repo_name: Name of repository. 181 :return: Returns a token as a string if found, otherwise None. 182 """ 183 token: str | None = self._config.get(f"pypi-token.{repo_name}") 184 if token: 185 return token 186 187 return self.keyring.get_password(repo_name, "__token__") 188 189 def delete_pypi_token(self, name: str) -> None: 190 if not self.keyring.is_available(): 191 return self._config.auth_config_source.remove_property(f"pypi-token.{name}") 192 193 self.keyring.delete_password(name, "__token__") 194 195 def get_http_auth(self, name: str) -> dict[str, str | None] | None: 196 username = self._config.get(f"http-basic.{name}.username") 197 password = self._config.get(f"http-basic.{name}.password") 198 if not username and not password: 199 return None 200 201 if not password: 202 password = self.keyring.get_password(name, username) 203 204 return { 205 "username": username, 206 "password": password, 207 } 208 209 def set_http_password(self, name: str, username: str, password: str) -> None: 210 auth = {"username": username} 211 212 if not self.keyring.is_available(): 213 self.warn_plaintext_credentials_stored() 214 auth["password"] = password 215 else: 216 self.keyring.set_password(name, username, password) 217 218 self._config.auth_config_source.add_property(f"http-basic.{name}", auth) 219 220 def delete_http_password(self, name: str) -> None: 221 auth = self.get_http_auth(name) 222 if not auth: 223 return 224 225 username = auth.get("username") 226 if username is None: 227 return 228 229 with suppress(PoetryKeyringError): 230 self.keyring.delete_password(name, username) 231 232 self._config.auth_config_source.remove_property(f"http-basic.{name}") 233 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/poetry/utils/password_manager.py b/src/poetry/utils/password_manager.py --- a/src/poetry/utils/password_manager.py +++ b/src/poetry/utils/password_manager.py @@ -47,8 +47,18 @@ import keyring + from keyring.errors import KeyringError + from keyring.errors import KeyringLocked + for name in names: - credential = keyring.get_credential(name, username) + credential = None + try: + credential = keyring.get_credential(name, username) + except KeyringLocked: + logger.debug("Keyring %s is locked", name) + except (KeyringError, RuntimeError): + logger.debug("Accessing keyring %s failed", name, exc_info=True) + if credential: return HTTPAuthCredential( username=credential.username, password=credential.password
{"golden_diff": "diff --git a/src/poetry/utils/password_manager.py b/src/poetry/utils/password_manager.py\n--- a/src/poetry/utils/password_manager.py\n+++ b/src/poetry/utils/password_manager.py\n@@ -47,8 +47,18 @@\n \n import keyring\n \n+ from keyring.errors import KeyringError\n+ from keyring.errors import KeyringLocked\n+\n for name in names:\n- credential = keyring.get_credential(name, username)\n+ credential = None\n+ try:\n+ credential = keyring.get_credential(name, username)\n+ except KeyringLocked:\n+ logger.debug(\"Keyring %s is locked\", name)\n+ except (KeyringError, RuntimeError):\n+ logger.debug(\"Accessing keyring %s failed\", name, exc_info=True)\n+\n if credential:\n return HTTPAuthCredential(\n username=credential.username, password=credential.password\n", "issue": "fix: remove exception when keyring is locked \n# Pull Request Check List\r\n\r\nResolves: #1917\r\n\r\n<!-- This is just a reminder about the most common mistakes. Please make sure that you tick all *appropriate* boxes. But please read our [contribution guide](https://python-poetry.org/docs/contributing/) at least once, it will save you unnecessary review cycles! -->\r\n\r\n- [ ] Added **tests** for changed code.\r\n- [ ] Updated **documentation** for changed code.\r\n\r\n<!-- If you have *any* questions to *any* of the points above, just **submit and ask**! This checklist is here to *help* you, not to deter you from contributing! -->\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nimport logging\n\nfrom contextlib import suppress\nfrom typing import TYPE_CHECKING\n\n\nif TYPE_CHECKING:\n from poetry.config.config import Config\n\nlogger = logging.getLogger(__name__)\n\n\nclass PasswordManagerError(Exception):\n pass\n\n\nclass PoetryKeyringError(Exception):\n pass\n\n\[email protected]\nclass HTTPAuthCredential:\n username: str | None = dataclasses.field(default=None)\n password: str | None = dataclasses.field(default=None)\n\n\nclass PoetryKeyring:\n def __init__(self, namespace: str) -> None:\n self._namespace = namespace\n self._is_available = True\n\n self._check()\n\n def is_available(self) -> bool:\n return self._is_available\n\n def get_credential(\n self, *names: str, username: str | None = None\n ) -> HTTPAuthCredential:\n default = HTTPAuthCredential(username=username, password=None)\n\n if not self.is_available():\n return default\n\n import keyring\n\n for name in names:\n credential = keyring.get_credential(name, username)\n if credential:\n return HTTPAuthCredential(\n username=credential.username, password=credential.password\n )\n\n return default\n\n def get_password(self, name: str, username: str) -> str | None:\n if not self.is_available():\n return None\n\n import keyring\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n return keyring.get_password(name, username)\n except (RuntimeError, keyring.errors.KeyringError):\n raise PoetryKeyringError(\n f\"Unable to retrieve the password for {name} from the key ring\"\n )\n\n def set_password(self, name: str, username: str, password: str) -> None:\n if not self.is_available():\n return\n\n import keyring\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n keyring.set_password(name, username, password)\n except (RuntimeError, keyring.errors.KeyringError) as e:\n raise PoetryKeyringError(\n f\"Unable to store the password for {name} in the key ring: {e}\"\n )\n\n def delete_password(self, name: str, username: str) -> None:\n if not self.is_available():\n return\n\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n keyring.delete_password(name, username)\n except (RuntimeError, keyring.errors.KeyringError):\n raise PoetryKeyringError(\n f\"Unable to delete the password for {name} from the key ring\"\n )\n\n def get_entry_name(self, name: str) -> str:\n return f\"{self._namespace}-{name}\"\n\n def _check(self) -> None:\n try:\n import keyring\n except ImportError as e:\n logger.debug(\"An error occurred while importing keyring: %s\", e)\n self._is_available = False\n\n return\n\n backend = keyring.get_keyring()\n name = backend.name.split(\" \")[0]\n if name in (\"fail\", \"null\"):\n logger.debug(\"No suitable keyring backend found\")\n self._is_available = False\n elif \"plaintext\" in backend.name.lower():\n logger.debug(\"Only a plaintext keyring backend is available. Not using it.\")\n self._is_available = False\n elif name == \"chainer\":\n try:\n import keyring.backend\n\n backends = keyring.backend.get_all_keyring()\n\n self._is_available = any(\n b.name.split(\" \")[0] not in [\"chainer\", \"fail\", \"null\"]\n and \"plaintext\" not in b.name.lower()\n for b in backends\n )\n except ImportError:\n self._is_available = False\n\n if not self._is_available:\n logger.debug(\"No suitable keyring backends were found\")\n\n\nclass PasswordManager:\n def __init__(self, config: Config) -> None:\n self._config = config\n self._keyring: PoetryKeyring | None = None\n\n @property\n def keyring(self) -> PoetryKeyring:\n if self._keyring is None:\n self._keyring = PoetryKeyring(\"poetry-repository\")\n\n if not self._keyring.is_available():\n logger.debug(\n \"<warning>Keyring is not available, credentials will be stored and \"\n \"retrieved from configuration files as plaintext.</>\"\n )\n\n return self._keyring\n\n @staticmethod\n def warn_plaintext_credentials_stored() -> None:\n logger.warning(\"Using a plaintext file to store credentials\")\n\n def set_pypi_token(self, name: str, token: str) -> None:\n if not self.keyring.is_available():\n self.warn_plaintext_credentials_stored()\n self._config.auth_config_source.add_property(f\"pypi-token.{name}\", token)\n else:\n self.keyring.set_password(name, \"__token__\", token)\n\n def get_pypi_token(self, repo_name: str) -> str | None:\n \"\"\"Get PyPi token.\n\n First checks the environment variables for a token,\n then the configured username/password and the\n available keyring.\n\n :param repo_name: Name of repository.\n :return: Returns a token as a string if found, otherwise None.\n \"\"\"\n token: str | None = self._config.get(f\"pypi-token.{repo_name}\")\n if token:\n return token\n\n return self.keyring.get_password(repo_name, \"__token__\")\n\n def delete_pypi_token(self, name: str) -> None:\n if not self.keyring.is_available():\n return self._config.auth_config_source.remove_property(f\"pypi-token.{name}\")\n\n self.keyring.delete_password(name, \"__token__\")\n\n def get_http_auth(self, name: str) -> dict[str, str | None] | None:\n username = self._config.get(f\"http-basic.{name}.username\")\n password = self._config.get(f\"http-basic.{name}.password\")\n if not username and not password:\n return None\n\n if not password:\n password = self.keyring.get_password(name, username)\n\n return {\n \"username\": username,\n \"password\": password,\n }\n\n def set_http_password(self, name: str, username: str, password: str) -> None:\n auth = {\"username\": username}\n\n if not self.keyring.is_available():\n self.warn_plaintext_credentials_stored()\n auth[\"password\"] = password\n else:\n self.keyring.set_password(name, username, password)\n\n self._config.auth_config_source.add_property(f\"http-basic.{name}\", auth)\n\n def delete_http_password(self, name: str) -> None:\n auth = self.get_http_auth(name)\n if not auth:\n return\n\n username = auth.get(\"username\")\n if username is None:\n return\n\n with suppress(PoetryKeyringError):\n self.keyring.delete_password(name, username)\n\n self._config.auth_config_source.remove_property(f\"http-basic.{name}\")\n", "path": "src/poetry/utils/password_manager.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nimport logging\n\nfrom contextlib import suppress\nfrom typing import TYPE_CHECKING\n\n\nif TYPE_CHECKING:\n from poetry.config.config import Config\n\nlogger = logging.getLogger(__name__)\n\n\nclass PasswordManagerError(Exception):\n pass\n\n\nclass PoetryKeyringError(Exception):\n pass\n\n\[email protected]\nclass HTTPAuthCredential:\n username: str | None = dataclasses.field(default=None)\n password: str | None = dataclasses.field(default=None)\n\n\nclass PoetryKeyring:\n def __init__(self, namespace: str) -> None:\n self._namespace = namespace\n self._is_available = True\n\n self._check()\n\n def is_available(self) -> bool:\n return self._is_available\n\n def get_credential(\n self, *names: str, username: str | None = None\n ) -> HTTPAuthCredential:\n default = HTTPAuthCredential(username=username, password=None)\n\n if not self.is_available():\n return default\n\n import keyring\n\n from keyring.errors import KeyringError\n from keyring.errors import KeyringLocked\n\n for name in names:\n credential = None\n try:\n credential = keyring.get_credential(name, username)\n except KeyringLocked:\n logger.debug(\"Keyring %s is locked\", name)\n except (KeyringError, RuntimeError):\n logger.debug(\"Accessing keyring %s failed\", name, exc_info=True)\n\n if credential:\n return HTTPAuthCredential(\n username=credential.username, password=credential.password\n )\n\n return default\n\n def get_password(self, name: str, username: str) -> str | None:\n if not self.is_available():\n return None\n\n import keyring\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n return keyring.get_password(name, username)\n except (RuntimeError, keyring.errors.KeyringError):\n raise PoetryKeyringError(\n f\"Unable to retrieve the password for {name} from the key ring\"\n )\n\n def set_password(self, name: str, username: str, password: str) -> None:\n if not self.is_available():\n return\n\n import keyring\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n keyring.set_password(name, username, password)\n except (RuntimeError, keyring.errors.KeyringError) as e:\n raise PoetryKeyringError(\n f\"Unable to store the password for {name} in the key ring: {e}\"\n )\n\n def delete_password(self, name: str, username: str) -> None:\n if not self.is_available():\n return\n\n import keyring.errors\n\n name = self.get_entry_name(name)\n\n try:\n keyring.delete_password(name, username)\n except (RuntimeError, keyring.errors.KeyringError):\n raise PoetryKeyringError(\n f\"Unable to delete the password for {name} from the key ring\"\n )\n\n def get_entry_name(self, name: str) -> str:\n return f\"{self._namespace}-{name}\"\n\n def _check(self) -> None:\n try:\n import keyring\n except ImportError as e:\n logger.debug(\"An error occurred while importing keyring: %s\", e)\n self._is_available = False\n\n return\n\n backend = keyring.get_keyring()\n name = backend.name.split(\" \")[0]\n if name in (\"fail\", \"null\"):\n logger.debug(\"No suitable keyring backend found\")\n self._is_available = False\n elif \"plaintext\" in backend.name.lower():\n logger.debug(\"Only a plaintext keyring backend is available. Not using it.\")\n self._is_available = False\n elif name == \"chainer\":\n try:\n import keyring.backend\n\n backends = keyring.backend.get_all_keyring()\n\n self._is_available = any(\n b.name.split(\" \")[0] not in [\"chainer\", \"fail\", \"null\"]\n and \"plaintext\" not in b.name.lower()\n for b in backends\n )\n except ImportError:\n self._is_available = False\n\n if not self._is_available:\n logger.debug(\"No suitable keyring backends were found\")\n\n\nclass PasswordManager:\n def __init__(self, config: Config) -> None:\n self._config = config\n self._keyring: PoetryKeyring | None = None\n\n @property\n def keyring(self) -> PoetryKeyring:\n if self._keyring is None:\n self._keyring = PoetryKeyring(\"poetry-repository\")\n\n if not self._keyring.is_available():\n logger.debug(\n \"<warning>Keyring is not available, credentials will be stored and \"\n \"retrieved from configuration files as plaintext.</>\"\n )\n\n return self._keyring\n\n @staticmethod\n def warn_plaintext_credentials_stored() -> None:\n logger.warning(\"Using a plaintext file to store credentials\")\n\n def set_pypi_token(self, name: str, token: str) -> None:\n if not self.keyring.is_available():\n self.warn_plaintext_credentials_stored()\n self._config.auth_config_source.add_property(f\"pypi-token.{name}\", token)\n else:\n self.keyring.set_password(name, \"__token__\", token)\n\n def get_pypi_token(self, repo_name: str) -> str | None:\n \"\"\"Get PyPi token.\n\n First checks the environment variables for a token,\n then the configured username/password and the\n available keyring.\n\n :param repo_name: Name of repository.\n :return: Returns a token as a string if found, otherwise None.\n \"\"\"\n token: str | None = self._config.get(f\"pypi-token.{repo_name}\")\n if token:\n return token\n\n return self.keyring.get_password(repo_name, \"__token__\")\n\n def delete_pypi_token(self, name: str) -> None:\n if not self.keyring.is_available():\n return self._config.auth_config_source.remove_property(f\"pypi-token.{name}\")\n\n self.keyring.delete_password(name, \"__token__\")\n\n def get_http_auth(self, name: str) -> dict[str, str | None] | None:\n username = self._config.get(f\"http-basic.{name}.username\")\n password = self._config.get(f\"http-basic.{name}.password\")\n if not username and not password:\n return None\n\n if not password:\n password = self.keyring.get_password(name, username)\n\n return {\n \"username\": username,\n \"password\": password,\n }\n\n def set_http_password(self, name: str, username: str, password: str) -> None:\n auth = {\"username\": username}\n\n if not self.keyring.is_available():\n self.warn_plaintext_credentials_stored()\n auth[\"password\"] = password\n else:\n self.keyring.set_password(name, username, password)\n\n self._config.auth_config_source.add_property(f\"http-basic.{name}\", auth)\n\n def delete_http_password(self, name: str) -> None:\n auth = self.get_http_auth(name)\n if not auth:\n return\n\n username = auth.get(\"username\")\n if username is None:\n return\n\n with suppress(PoetryKeyringError):\n self.keyring.delete_password(name, username)\n\n self._config.auth_config_source.remove_property(f\"http-basic.{name}\")\n", "path": "src/poetry/utils/password_manager.py"}]}
2,574
200
gh_patches_debug_55289
rasdani/github-patches
git_diff
napari__napari-6452
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ValidationError when trying to plot more than 16 different colors ### 🐛 Bug Report Dear napari team, when I try to plot points with more than 16 different colors I get following error: ```python File [AppData\Local\miniconda3\envs\xparse\lib\site-packages\napari\utils\events\evented_model.py:242](/AppData/Local/miniconda3/envs/xparse/lib/site-packages/napari/utils/events/evented_model.py:242), in EventedModel.__init__(self, **kwargs) [241] -> None: --> [242] super().__init__(**kwargs) [244] self._events.source = self [245] # add event emitters for each field which is mutable File [AppData\Local\miniconda3\envs\xparse\lib\site-packages\pydantic\main.py:341](/Local/miniconda3/envs/xparse/lib/site-packages/pydantic/main.py:341), in pydantic.main.BaseModel.__init__() ValidationError: 1 validation error for ColorManager __root__ Cannot cast array data from dtype('O') to dtype('float64') according to the rule 'safe' (type=type_error) ``` For 16 different colors or less it is no problem. ### 💡 Steps to Reproduce The bug can be reproduced using following code: ```python from skimage import data import numpy as np import napari # set parameters for point generation n_points = 100 n_clusters = 17 points = np.random.rand(n_points, 2) * 100 # start viewer viewer = napari.view_image(data.astronaut(), rgb=True) # set point properties point_properties = { 'abc': np.random.choice([str(elem) for elem in np.arange(n_clusters)], n_points) } # add points points_layer = viewer.add_points( points, properties=point_properties, face_color='abc', face_color_cycle=['magenta', 'green'], edge_width=0.1, ) ``` The number of clusters can be changed with `n_clusters`. ### 💡 Expected Behavior I expect to be able to plot points with more than 16 different colors. Since I only offered a color cycle with two colors, I expect napari to behave the same for >16 points than <=16 points. ### 🌎 Environment napari: 0.4.18 Platform: Windows-10-10.0.19045-SP0 Python: 3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)] Qt: 5.15.2 PyQt5: 5.15.9 NumPy: 1.25.2 SciPy: 1.9.3 Dask: 2023.9.2 VisPy: 0.12.2 magicgui: 0.7.3 superqt: 0.6.0 in-n-out: 0.1.8 app-model: 0.2.2 npe2: 0.7.2 OpenGL: - GL version: 4.6.0 Compatibility Profile Context 23.10.24.05.230830 - MAX_TEXTURE_SIZE: 16384 Screens: - screen 1: resolution 2560x1600, scale 1.0 - screen 2: resolution 3840x2160, scale 1.0 - screen 3: resolution 2880x1800, scale 1.0 Settings path: - [AppData\Local\napari\xparse_57fe1a37b30a9e37de3a06866d324c7f56a92d1a\settings.yaml](/AppData/Local/napari/xparse_57fe1a37b30a9e37de3a06866d324c7f56a92d1a/settings.yaml) Plugins: - napari: 0.4.18 (77 contributions) - napari-console: 0.0.8 (0 contributions) - napari-svg: 0.1.10 (2 contributions) - ome-types: 0.4.2 (2 contributions) ### 💡 Additional Context Also it was really difficult to detect this error. I encountered the problem in a function that was included into a dock widget. And when I tried to add the points via the widget, simply nothing happened. No error message appeared. Is it possible to somehow show the error messages when executing functions from widgets? Beside that it is really great to work with napari! Thanks a lot in advance! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napari/layers/utils/color_manager_utils.py` Content: ``` 1 from typing import Any, Dict, Tuple, Union 2 3 import numpy as np 4 5 from napari.utils.colormaps import Colormap 6 from napari.utils.translations import trans 7 8 9 def guess_continuous(color_map: np.ndarray) -> bool: 10 """Guess if the property is continuous (return True) or categorical (return False) 11 12 The property is guessed as continuous if it is a float or contains over 16 elements. 13 14 Parameters 15 ---------- 16 color_map : np.ndarray 17 The property values to guess if they are continuous 18 19 Returns 20 ------- 21 continuous : bool 22 True of the property is guessed to be continuous, False if not. 23 """ 24 # if the property is a floating type, guess continuous 25 return ( 26 issubclass(color_map.dtype.type, np.floating) 27 or len(np.unique(color_map)) > 16 28 ) 29 30 31 def is_color_mapped(color, properties): 32 """determines if the new color argument is for directly setting or cycle/colormap""" 33 if isinstance(color, str): 34 return color in properties 35 if isinstance(color, dict): 36 return True 37 if isinstance(color, (list, np.ndarray)): 38 return False 39 40 raise ValueError( 41 trans._( 42 'face_color should be the name of a color, an array of colors, or the name of an property', 43 deferred=True, 44 ) 45 ) 46 47 48 def map_property( 49 prop: np.ndarray, 50 colormap: Colormap, 51 contrast_limits: Union[None, Tuple[float, float]] = None, 52 ) -> Tuple[np.ndarray, Tuple[float, float]]: 53 """Apply a colormap to a property 54 55 Parameters 56 ---------- 57 prop : np.ndarray 58 The property to be colormapped 59 colormap : napari.utils.Colormap 60 The colormap object to apply to the property 61 contrast_limits : Union[None, Tuple[float, float]] 62 The contrast limits for applying the colormap to the property. 63 If a 2-tuple is provided, it should be provided as (lower_bound, upper_bound). 64 If None is provided, the contrast limits will be set to (property.min(), property.max()). 65 Default value is None. 66 """ 67 68 if contrast_limits is None: 69 contrast_limits = (prop.min(), prop.max()) 70 normalized_properties = np.interp(prop, contrast_limits, (0, 1)) 71 mapped_properties = colormap.map(normalized_properties) 72 73 return mapped_properties, contrast_limits 74 75 76 def _validate_colormap_mode( 77 values: Dict[str, Any] 78 ) -> Tuple[np.ndarray, Dict[str, Any]]: 79 """Validate the ColorManager field values specific for colormap mode 80 This is called by the root_validator in ColorManager 81 82 Parameters 83 ---------- 84 values : dict 85 The field values that are passed to the ColorManager root validator 86 87 Returns 88 ------- 89 colors : np.ndarray 90 The (Nx4) color array to set as ColorManager.colors 91 values : dict 92 """ 93 color_properties = values['color_properties'].values 94 cmap = values['continuous_colormap'] 95 if len(color_properties) > 0: 96 if values['contrast_limits'] is None: 97 colors, contrast_limits = map_property( 98 prop=color_properties, 99 colormap=cmap, 100 ) 101 values['contrast_limits'] = contrast_limits 102 else: 103 colors, _ = map_property( 104 prop=color_properties, 105 colormap=cmap, 106 contrast_limits=values['contrast_limits'], 107 ) 108 else: 109 colors = np.empty((0, 4)) 110 current_prop_value = values['color_properties'].current_value 111 if current_prop_value is not None: 112 values['current_color'] = cmap.map(current_prop_value)[0] 113 114 if len(colors) == 0: 115 colors = np.empty((0, 4)) 116 117 return colors, values 118 119 120 def _validate_cycle_mode( 121 values: Dict[str, Any] 122 ) -> Tuple[np.ndarray, Dict[str, Any]]: 123 """Validate the ColorManager field values specific for color cycle mode 124 This is called by the root_validator in ColorManager 125 126 Parameters 127 ---------- 128 values : dict 129 The field values that are passed to the ColorManager root validator 130 131 Returns 132 ------- 133 colors : np.ndarray 134 The (Nx4) color array to set as ColorManager.colors 135 values : dict 136 """ 137 color_properties = values['color_properties'].values 138 cmap = values['categorical_colormap'] 139 if len(color_properties) == 0: 140 colors = np.empty((0, 4)) 141 current_prop_value = values['color_properties'].current_value 142 if current_prop_value is not None: 143 values['current_color'] = cmap.map(current_prop_value)[0] 144 else: 145 colors = cmap.map(color_properties) 146 values['categorical_colormap'] = cmap 147 148 return colors, values 149 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napari/layers/utils/color_manager_utils.py b/napari/layers/utils/color_manager_utils.py --- a/napari/layers/utils/color_manager_utils.py +++ b/napari/layers/utils/color_manager_utils.py @@ -22,9 +22,9 @@ True of the property is guessed to be continuous, False if not. """ # if the property is a floating type, guess continuous - return ( - issubclass(color_map.dtype.type, np.floating) - or len(np.unique(color_map)) > 16 + return issubclass(color_map.dtype.type, np.floating) or ( + len(np.unique(color_map)) > 16 + and isinstance(color_map.dtype.type, np.integer) )
{"golden_diff": "diff --git a/napari/layers/utils/color_manager_utils.py b/napari/layers/utils/color_manager_utils.py\n--- a/napari/layers/utils/color_manager_utils.py\n+++ b/napari/layers/utils/color_manager_utils.py\n@@ -22,9 +22,9 @@\n True of the property is guessed to be continuous, False if not.\n \"\"\"\n # if the property is a floating type, guess continuous\n- return (\n- issubclass(color_map.dtype.type, np.floating)\n- or len(np.unique(color_map)) > 16\n+ return issubclass(color_map.dtype.type, np.floating) or (\n+ len(np.unique(color_map)) > 16\n+ and isinstance(color_map.dtype.type, np.integer)\n )\n", "issue": "ValidationError when trying to plot more than 16 different colors\n### \ud83d\udc1b Bug Report\r\n\r\nDear napari team,\r\n\r\nwhen I try to plot points with more than 16 different colors I get following error:\r\n\r\n```python\r\n\r\nFile [AppData\\Local\\miniconda3\\envs\\xparse\\lib\\site-packages\\napari\\utils\\events\\evented_model.py:242](/AppData/Local/miniconda3/envs/xparse/lib/site-packages/napari/utils/events/evented_model.py:242), in EventedModel.__init__(self, **kwargs)\r\n [241] -> None:\r\n--> [242] super().__init__(**kwargs)\r\n [244] self._events.source = self\r\n [245] # add event emitters for each field which is mutable\r\n\r\nFile [AppData\\Local\\miniconda3\\envs\\xparse\\lib\\site-packages\\pydantic\\main.py:341](/Local/miniconda3/envs/xparse/lib/site-packages/pydantic/main.py:341), in pydantic.main.BaseModel.__init__()\r\n\r\nValidationError: 1 validation error for ColorManager\r\n__root__\r\n Cannot cast array data from dtype('O') to dtype('float64') according to the rule 'safe' (type=type_error)\r\n``` \r\n\r\nFor 16 different colors or less it is no problem.\r\n\r\n\r\n### \ud83d\udca1 Steps to Reproduce\r\n\r\nThe bug can be reproduced using following code:\r\n\r\n```python\r\nfrom skimage import data\r\nimport numpy as np\r\nimport napari\r\n\r\n# set parameters for point generation\r\nn_points = 100\r\nn_clusters = 17\r\npoints = np.random.rand(n_points, 2) * 100\r\n\r\n# start viewer\r\nviewer = napari.view_image(data.astronaut(), rgb=True)\r\n\r\n# set point properties\r\npoint_properties = {\r\n 'abc': np.random.choice([str(elem) for elem in np.arange(n_clusters)], n_points)\r\n}\r\n\r\n# add points\r\npoints_layer = viewer.add_points(\r\n points,\r\n properties=point_properties,\r\n face_color='abc',\r\n face_color_cycle=['magenta', 'green'],\r\n edge_width=0.1,\r\n)\r\n``` \r\n\r\nThe number of clusters can be changed with `n_clusters`.\r\n\r\n### \ud83d\udca1 Expected Behavior\r\n\r\nI expect to be able to plot points with more than 16 different colors. Since I only offered a color cycle with two colors, I expect napari to behave the same for >16 points than <=16 points.\r\n\r\n### \ud83c\udf0e Environment\r\n\r\nnapari: 0.4.18\r\nPlatform: Windows-10-10.0.19045-SP0\r\nPython: 3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]\r\nQt: 5.15.2\r\nPyQt5: 5.15.9\r\nNumPy: 1.25.2\r\nSciPy: 1.9.3\r\nDask: 2023.9.2\r\nVisPy: 0.12.2\r\nmagicgui: 0.7.3\r\nsuperqt: 0.6.0\r\nin-n-out: 0.1.8\r\napp-model: 0.2.2\r\nnpe2: 0.7.2\r\n\r\nOpenGL:\r\n - GL version: 4.6.0 Compatibility Profile Context 23.10.24.05.230830\r\n - MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n - screen 1: resolution 2560x1600, scale 1.0\r\n - screen 2: resolution 3840x2160, scale 1.0\r\n - screen 3: resolution 2880x1800, scale 1.0\r\n\r\nSettings path:\r\n - [AppData\\Local\\napari\\xparse_57fe1a37b30a9e37de3a06866d324c7f56a92d1a\\settings.yaml](/AppData/Local/napari/xparse_57fe1a37b30a9e37de3a06866d324c7f56a92d1a/settings.yaml)\r\nPlugins:\r\n - napari: 0.4.18 (77 contributions)\r\n - napari-console: 0.0.8 (0 contributions)\r\n - napari-svg: 0.1.10 (2 contributions)\r\n - ome-types: 0.4.2 (2 contributions)\r\n\r\n### \ud83d\udca1 Additional Context\r\n\r\nAlso it was really difficult to detect this error. I encountered the problem in a function that was included into a dock widget. And when I tried to add the points via the widget, simply nothing happened. No error message appeared. Is it possible to somehow show the error messages when executing functions from widgets?\r\n\r\nBeside that it is really great to work with napari! Thanks a lot in advance!\n", "before_files": [{"content": "from typing import Any, Dict, Tuple, Union\n\nimport numpy as np\n\nfrom napari.utils.colormaps import Colormap\nfrom napari.utils.translations import trans\n\n\ndef guess_continuous(color_map: np.ndarray) -> bool:\n \"\"\"Guess if the property is continuous (return True) or categorical (return False)\n\n The property is guessed as continuous if it is a float or contains over 16 elements.\n\n Parameters\n ----------\n color_map : np.ndarray\n The property values to guess if they are continuous\n\n Returns\n -------\n continuous : bool\n True of the property is guessed to be continuous, False if not.\n \"\"\"\n # if the property is a floating type, guess continuous\n return (\n issubclass(color_map.dtype.type, np.floating)\n or len(np.unique(color_map)) > 16\n )\n\n\ndef is_color_mapped(color, properties):\n \"\"\"determines if the new color argument is for directly setting or cycle/colormap\"\"\"\n if isinstance(color, str):\n return color in properties\n if isinstance(color, dict):\n return True\n if isinstance(color, (list, np.ndarray)):\n return False\n\n raise ValueError(\n trans._(\n 'face_color should be the name of a color, an array of colors, or the name of an property',\n deferred=True,\n )\n )\n\n\ndef map_property(\n prop: np.ndarray,\n colormap: Colormap,\n contrast_limits: Union[None, Tuple[float, float]] = None,\n) -> Tuple[np.ndarray, Tuple[float, float]]:\n \"\"\"Apply a colormap to a property\n\n Parameters\n ----------\n prop : np.ndarray\n The property to be colormapped\n colormap : napari.utils.Colormap\n The colormap object to apply to the property\n contrast_limits : Union[None, Tuple[float, float]]\n The contrast limits for applying the colormap to the property.\n If a 2-tuple is provided, it should be provided as (lower_bound, upper_bound).\n If None is provided, the contrast limits will be set to (property.min(), property.max()).\n Default value is None.\n \"\"\"\n\n if contrast_limits is None:\n contrast_limits = (prop.min(), prop.max())\n normalized_properties = np.interp(prop, contrast_limits, (0, 1))\n mapped_properties = colormap.map(normalized_properties)\n\n return mapped_properties, contrast_limits\n\n\ndef _validate_colormap_mode(\n values: Dict[str, Any]\n) -> Tuple[np.ndarray, Dict[str, Any]]:\n \"\"\"Validate the ColorManager field values specific for colormap mode\n This is called by the root_validator in ColorManager\n\n Parameters\n ----------\n values : dict\n The field values that are passed to the ColorManager root validator\n\n Returns\n -------\n colors : np.ndarray\n The (Nx4) color array to set as ColorManager.colors\n values : dict\n \"\"\"\n color_properties = values['color_properties'].values\n cmap = values['continuous_colormap']\n if len(color_properties) > 0:\n if values['contrast_limits'] is None:\n colors, contrast_limits = map_property(\n prop=color_properties,\n colormap=cmap,\n )\n values['contrast_limits'] = contrast_limits\n else:\n colors, _ = map_property(\n prop=color_properties,\n colormap=cmap,\n contrast_limits=values['contrast_limits'],\n )\n else:\n colors = np.empty((0, 4))\n current_prop_value = values['color_properties'].current_value\n if current_prop_value is not None:\n values['current_color'] = cmap.map(current_prop_value)[0]\n\n if len(colors) == 0:\n colors = np.empty((0, 4))\n\n return colors, values\n\n\ndef _validate_cycle_mode(\n values: Dict[str, Any]\n) -> Tuple[np.ndarray, Dict[str, Any]]:\n \"\"\"Validate the ColorManager field values specific for color cycle mode\n This is called by the root_validator in ColorManager\n\n Parameters\n ----------\n values : dict\n The field values that are passed to the ColorManager root validator\n\n Returns\n -------\n colors : np.ndarray\n The (Nx4) color array to set as ColorManager.colors\n values : dict\n \"\"\"\n color_properties = values['color_properties'].values\n cmap = values['categorical_colormap']\n if len(color_properties) == 0:\n colors = np.empty((0, 4))\n current_prop_value = values['color_properties'].current_value\n if current_prop_value is not None:\n values['current_color'] = cmap.map(current_prop_value)[0]\n else:\n colors = cmap.map(color_properties)\n values['categorical_colormap'] = cmap\n\n return colors, values\n", "path": "napari/layers/utils/color_manager_utils.py"}], "after_files": [{"content": "from typing import Any, Dict, Tuple, Union\n\nimport numpy as np\n\nfrom napari.utils.colormaps import Colormap\nfrom napari.utils.translations import trans\n\n\ndef guess_continuous(color_map: np.ndarray) -> bool:\n \"\"\"Guess if the property is continuous (return True) or categorical (return False)\n\n The property is guessed as continuous if it is a float or contains over 16 elements.\n\n Parameters\n ----------\n color_map : np.ndarray\n The property values to guess if they are continuous\n\n Returns\n -------\n continuous : bool\n True of the property is guessed to be continuous, False if not.\n \"\"\"\n # if the property is a floating type, guess continuous\n return issubclass(color_map.dtype.type, np.floating) or (\n len(np.unique(color_map)) > 16\n and isinstance(color_map.dtype.type, np.integer)\n )\n\n\ndef is_color_mapped(color, properties):\n \"\"\"determines if the new color argument is for directly setting or cycle/colormap\"\"\"\n if isinstance(color, str):\n return color in properties\n if isinstance(color, dict):\n return True\n if isinstance(color, (list, np.ndarray)):\n return False\n\n raise ValueError(\n trans._(\n 'face_color should be the name of a color, an array of colors, or the name of an property',\n deferred=True,\n )\n )\n\n\ndef map_property(\n prop: np.ndarray,\n colormap: Colormap,\n contrast_limits: Union[None, Tuple[float, float]] = None,\n) -> Tuple[np.ndarray, Tuple[float, float]]:\n \"\"\"Apply a colormap to a property\n\n Parameters\n ----------\n prop : np.ndarray\n The property to be colormapped\n colormap : napari.utils.Colormap\n The colormap object to apply to the property\n contrast_limits : Union[None, Tuple[float, float]]\n The contrast limits for applying the colormap to the property.\n If a 2-tuple is provided, it should be provided as (lower_bound, upper_bound).\n If None is provided, the contrast limits will be set to (property.min(), property.max()).\n Default value is None.\n \"\"\"\n\n if contrast_limits is None:\n contrast_limits = (prop.min(), prop.max())\n normalized_properties = np.interp(prop, contrast_limits, (0, 1))\n mapped_properties = colormap.map(normalized_properties)\n\n return mapped_properties, contrast_limits\n\n\ndef _validate_colormap_mode(\n values: Dict[str, Any]\n) -> Tuple[np.ndarray, Dict[str, Any]]:\n \"\"\"Validate the ColorManager field values specific for colormap mode\n This is called by the root_validator in ColorManager\n\n Parameters\n ----------\n values : dict\n The field values that are passed to the ColorManager root validator\n\n Returns\n -------\n colors : np.ndarray\n The (Nx4) color array to set as ColorManager.colors\n values : dict\n \"\"\"\n color_properties = values['color_properties'].values\n cmap = values['continuous_colormap']\n if len(color_properties) > 0:\n if values['contrast_limits'] is None:\n colors, contrast_limits = map_property(\n prop=color_properties,\n colormap=cmap,\n )\n values['contrast_limits'] = contrast_limits\n else:\n colors, _ = map_property(\n prop=color_properties,\n colormap=cmap,\n contrast_limits=values['contrast_limits'],\n )\n else:\n colors = np.empty((0, 4))\n current_prop_value = values['color_properties'].current_value\n if current_prop_value is not None:\n values['current_color'] = cmap.map(current_prop_value)[0]\n\n if len(colors) == 0:\n colors = np.empty((0, 4))\n\n return colors, values\n\n\ndef _validate_cycle_mode(\n values: Dict[str, Any]\n) -> Tuple[np.ndarray, Dict[str, Any]]:\n \"\"\"Validate the ColorManager field values specific for color cycle mode\n This is called by the root_validator in ColorManager\n\n Parameters\n ----------\n values : dict\n The field values that are passed to the ColorManager root validator\n\n Returns\n -------\n colors : np.ndarray\n The (Nx4) color array to set as ColorManager.colors\n values : dict\n \"\"\"\n color_properties = values['color_properties'].values\n cmap = values['categorical_colormap']\n if len(color_properties) == 0:\n colors = np.empty((0, 4))\n current_prop_value = values['color_properties'].current_value\n if current_prop_value is not None:\n values['current_color'] = cmap.map(current_prop_value)[0]\n else:\n colors = cmap.map(color_properties)\n values['categorical_colormap'] = cmap\n\n return colors, values\n", "path": "napari/layers/utils/color_manager_utils.py"}]}
2,779
168
gh_patches_debug_17511
rasdani/github-patches
git_diff
litestar-org__litestar-1771
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `litestar/dto/exceptions.py` Content: ``` 1 from __future__ import annotations 2 3 from litestar.exceptions import ImproperlyConfiguredException 4 5 __all__ = ["DTOException", "UnsupportedType"] 6 7 8 class DTOException(ImproperlyConfiguredException): 9 """Base exception for DTO errors.""" 10 11 12 class UnsupportedType(DTOException): 13 """Raised when a type is not supported by Litestar.""" 14 ``` Path: `litestar/dto/factory/abc.py` Content: ``` 1 from __future__ import annotations 2 3 from abc import ABCMeta, abstractmethod 4 from typing import TYPE_CHECKING, Generic, TypeVar 5 6 from litestar.dto.interface import ConnectionContext, DTOInterface 7 from litestar.enums import RequestEncodingType 8 from litestar.typing import ParsedType 9 10 from ._backends import MsgspecDTOBackend, PydanticDTOBackend 11 from ._backends.abc import BackendContext 12 from .config import DTOConfig 13 from .data_structures import DTOData 14 from .exc import InvalidAnnotation 15 from .utils import parse_configs_from_annotation 16 17 if TYPE_CHECKING: 18 from typing import Any, ClassVar, Collection, Generator 19 20 from typing_extensions import Self 21 22 from litestar.dto.interface import HandlerContext 23 from litestar.dto.types import ForType 24 from litestar.openapi.spec import Reference, Schema 25 from litestar.types.serialization import LitestarEncodableType 26 27 from ._backends import AbstractDTOBackend 28 from .types import FieldDefinition 29 30 __all__ = ["AbstractDTOFactory"] 31 32 T = TypeVar("T") 33 34 35 class AbstractDTOFactory(DTOInterface, Generic[T], metaclass=ABCMeta): 36 """Base class for DTO types.""" 37 38 __slots__ = ("connection_context",) 39 40 config: ClassVar[DTOConfig] 41 """Config objects to define properties of the DTO.""" 42 model_type: ClassVar[type[Any]] 43 """If ``annotation`` is an iterable, this is the inner type, otherwise will be the same as ``annotation``.""" 44 45 _type_backend_map: ClassVar[dict[tuple[ForType, ParsedType, RequestEncodingType | str | None], AbstractDTOBackend]] 46 _handler_backend_map: ClassVar[dict[tuple[ForType, str], AbstractDTOBackend]] 47 48 def __init__(self, connection_context: ConnectionContext) -> None: 49 """Create an AbstractDTOFactory type. 50 51 Args: 52 connection_context: A :class:`ConnectionContext <.ConnectionContext>` instance, which provides 53 information about the connection. 54 """ 55 self.connection_context = connection_context 56 57 def __class_getitem__(cls, annotation: Any) -> type[Self]: 58 parsed_type = ParsedType(annotation) 59 60 if (parsed_type.is_optional and len(parsed_type.args) > 2) or ( 61 parsed_type.is_union and not parsed_type.is_optional 62 ): 63 raise InvalidAnnotation( 64 "Unions are currently not supported as type argument to DTO. Want this? Open an issue." 65 ) 66 67 if parsed_type.is_forward_ref: 68 raise InvalidAnnotation("Forward references are not supported as type argument to DTO") 69 70 # if a configuration is not provided, and the type narrowing is a type var, we don't want to create a subclass 71 configs = parse_configs_from_annotation(parsed_type) 72 if parsed_type.is_type_var and not configs: 73 return cls 74 75 if configs: 76 # provided config is always preferred 77 config = configs[0] 78 elif hasattr(cls, "config"): 79 # if no config is provided, but the class has one assigned, use that 80 config = cls.config 81 else: 82 # otherwise, create a new config 83 config = DTOConfig() 84 85 cls_dict: dict[str, Any] = {"config": config, "_type_backend_map": {}, "_handler_backend_map": {}} 86 if not parsed_type.is_type_var: 87 cls_dict.update(model_type=parsed_type.annotation) 88 89 return type(f"{cls.__name__}[{annotation}]", (cls,), cls_dict) 90 91 def builtins_to_data_type(self, builtins: Any) -> Any: 92 """Coerce the unstructured data into the data type.""" 93 backend = self._get_backend("data", self.connection_context.handler_id) 94 return backend.populate_data_from_builtins(builtins, self.connection_context) 95 96 def bytes_to_data_type(self, raw: bytes) -> Any: 97 """Return the data held by the DTO.""" 98 backend = self._get_backend("data", self.connection_context.handler_id) 99 return backend.populate_data_from_raw(raw, self.connection_context) 100 101 def data_to_encodable_type(self, data: T | Collection[T]) -> LitestarEncodableType: 102 backend = self._get_backend("return", self.connection_context.handler_id) 103 return backend.encode_data(data, self.connection_context) 104 105 @classmethod 106 @abstractmethod 107 def generate_field_definitions(cls, model_type: type[Any]) -> Generator[FieldDefinition, None, None]: 108 """Generate ``FieldDefinition`` instances from ``model_type``. 109 110 Yields: 111 ``FieldDefinition`` instances. 112 """ 113 114 @classmethod 115 @abstractmethod 116 def detect_nested_field(cls, parsed_type: ParsedType) -> bool: 117 """Return ``True`` if ``field_definition`` represents a nested model field. 118 119 Args: 120 parsed_type: inspect type to determine if field represents a nested model. 121 122 Returns: 123 ``True`` if ``parsed_type`` represents a nested model field. 124 """ 125 126 @classmethod 127 def on_registration(cls, handler_context: HandlerContext) -> None: 128 """Called each time the DTO type is encountered during signature modelling. 129 130 Args: 131 handler_context: A :class:`HandlerContext <.HandlerContext>` instance. Provides information about the 132 handler and application of the DTO. 133 """ 134 if handler_context.parsed_type.is_subclass_of(DTOData): 135 parsed_type = handler_context.parsed_type.annotation.parsed_type 136 else: 137 parsed_type = handler_context.parsed_type 138 139 if parsed_type.is_collection: 140 if len(parsed_type.inner_types) != 1: 141 raise InvalidAnnotation("AbstractDTOFactory only supports homogeneous collection types") 142 handler_type = parsed_type.inner_types[0] 143 else: 144 handler_type = parsed_type 145 146 if not handler_type.is_subclass_of(cls.model_type): 147 raise InvalidAnnotation( 148 f"DTO narrowed with '{cls.model_type}', handler type is '{handler_context.parsed_type.annotation}'" 149 ) 150 151 key = (handler_context.dto_for, handler_context.parsed_type, handler_context.request_encoding_type) 152 backend = cls._type_backend_map.get(key) 153 if backend is None: 154 backend_type: type[AbstractDTOBackend] 155 if handler_context.request_encoding_type in { 156 RequestEncodingType.URL_ENCODED, 157 RequestEncodingType.MULTI_PART, 158 }: 159 backend_type = PydanticDTOBackend 160 else: 161 backend_type = MsgspecDTOBackend 162 163 backend_context = BackendContext( 164 cls.config, 165 handler_context.dto_for, 166 handler_context.parsed_type, 167 cls.generate_field_definitions, 168 cls.detect_nested_field, 169 handler_type.annotation, 170 ) 171 backend = cls._type_backend_map.setdefault(key, backend_type(backend_context)) 172 cls._handler_backend_map[(handler_context.dto_for, handler_context.handler_id)] = backend 173 174 @classmethod 175 def create_openapi_schema( 176 cls, 177 dto_for: ForType, 178 handler_id: str, 179 generate_examples: bool, 180 schemas: dict[str, Schema], 181 ) -> Reference | Schema: 182 """Create an OpenAPI request body. 183 184 Returns: 185 OpenAPI request body. 186 """ 187 backend = cls._get_backend(dto_for, handler_id) 188 return backend.create_openapi_schema(generate_examples, schemas) 189 190 @classmethod 191 def _get_backend(cls, dto_for: ForType, handler_id: str) -> AbstractDTOBackend: 192 """Return the backend for the handler/dto_for combo.""" 193 return cls._handler_backend_map[(dto_for, handler_id)] 194 ``` Path: `litestar/contrib/repository/filters.py` Content: ``` 1 """Collection filter datastructures.""" 2 from __future__ import annotations 3 4 from collections import abc # noqa: TCH003 5 from dataclasses import dataclass 6 from datetime import datetime # noqa: TCH003 7 from typing import Generic, Literal, TypeVar 8 9 T = TypeVar("T") 10 11 __all__ = ["BeforeAfter", "CollectionFilter", "LimitOffset", "OrderBy", "SearchFilter"] 12 13 14 @dataclass 15 class BeforeAfter: 16 """Data required to filter a query on a ``datetime`` column.""" 17 18 field_name: str 19 """Name of the model attribute to filter on.""" 20 before: datetime | None 21 """Filter results where field earlier than this.""" 22 after: datetime | None 23 """Filter results where field later than this.""" 24 25 26 @dataclass 27 class CollectionFilter(Generic[T]): 28 """Data required to construct a ``WHERE ... IN (...)`` clause.""" 29 30 field_name: str 31 """Name of the model attribute to filter on.""" 32 values: abc.Collection[T] 33 """Values for ``IN`` clause.""" 34 35 36 @dataclass 37 class LimitOffset: 38 """Data required to add limit/offset filtering to a query.""" 39 40 limit: int 41 """Value for ``LIMIT`` clause of query.""" 42 offset: int 43 """Value for ``OFFSET`` clause of query.""" 44 45 46 @dataclass 47 class OrderBy: 48 """Data required to construct a ``ORDER BY ...`` clause.""" 49 50 field_name: str 51 """Name of the model attribute to sort on.""" 52 sort_order: Literal["asc", "desc"] = "asc" 53 """Sort ascending or descending""" 54 55 56 @dataclass 57 class SearchFilter: 58 """Data required to construct a ``WHERE field_name LIKE '%' || :value || '%'`` clause.""" 59 60 field_name: str 61 """Name of the model attribute to sort on.""" 62 value: str 63 """Values for ``LIKE`` clause.""" 64 ignore_case: bool | None = False 65 """Should the search be case insensitive.""" 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/litestar/contrib/repository/filters.py b/litestar/contrib/repository/filters.py --- a/litestar/contrib/repository/filters.py +++ b/litestar/contrib/repository/filters.py @@ -8,7 +8,13 @@ T = TypeVar("T") -__all__ = ["BeforeAfter", "CollectionFilter", "LimitOffset", "OrderBy", "SearchFilter"] +__all__ = ( + "BeforeAfter", + "CollectionFilter", + "LimitOffset", + "OrderBy", + "SearchFilter", +) @dataclass diff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py --- a/litestar/dto/exceptions.py +++ b/litestar/dto/exceptions.py @@ -2,7 +2,7 @@ from litestar.exceptions import ImproperlyConfiguredException -__all__ = ["DTOException", "UnsupportedType"] +__all__ = ("DTOException", "UnsupportedType") class DTOException(ImproperlyConfiguredException): diff --git a/litestar/dto/factory/abc.py b/litestar/dto/factory/abc.py --- a/litestar/dto/factory/abc.py +++ b/litestar/dto/factory/abc.py @@ -27,7 +27,7 @@ from ._backends import AbstractDTOBackend from .types import FieldDefinition -__all__ = ["AbstractDTOFactory"] +__all__ = ("AbstractDTOFactory",) T = TypeVar("T")
{"golden_diff": "diff --git a/litestar/contrib/repository/filters.py b/litestar/contrib/repository/filters.py\n--- a/litestar/contrib/repository/filters.py\n+++ b/litestar/contrib/repository/filters.py\n@@ -8,7 +8,13 @@\n \n T = TypeVar(\"T\")\n \n-__all__ = [\"BeforeAfter\", \"CollectionFilter\", \"LimitOffset\", \"OrderBy\", \"SearchFilter\"]\n+__all__ = (\n+ \"BeforeAfter\",\n+ \"CollectionFilter\",\n+ \"LimitOffset\",\n+ \"OrderBy\",\n+ \"SearchFilter\",\n+)\n \n \n @dataclass\ndiff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py\n--- a/litestar/dto/exceptions.py\n+++ b/litestar/dto/exceptions.py\n@@ -2,7 +2,7 @@\n \n from litestar.exceptions import ImproperlyConfiguredException\n \n-__all__ = [\"DTOException\", \"UnsupportedType\"]\n+__all__ = (\"DTOException\", \"UnsupportedType\")\n \n \n class DTOException(ImproperlyConfiguredException):\ndiff --git a/litestar/dto/factory/abc.py b/litestar/dto/factory/abc.py\n--- a/litestar/dto/factory/abc.py\n+++ b/litestar/dto/factory/abc.py\n@@ -27,7 +27,7 @@\n from ._backends import AbstractDTOBackend\n from .types import FieldDefinition\n \n-__all__ = [\"AbstractDTOFactory\"]\n+__all__ = (\"AbstractDTOFactory\",)\n \n T = TypeVar(\"T\")\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom litestar.exceptions import ImproperlyConfiguredException\n\n__all__ = [\"DTOException\", \"UnsupportedType\"]\n\n\nclass DTOException(ImproperlyConfiguredException):\n \"\"\"Base exception for DTO errors.\"\"\"\n\n\nclass UnsupportedType(DTOException):\n \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "path": "litestar/dto/exceptions.py"}, {"content": "from __future__ import annotations\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nfrom litestar.dto.interface import ConnectionContext, DTOInterface\nfrom litestar.enums import RequestEncodingType\nfrom litestar.typing import ParsedType\n\nfrom ._backends import MsgspecDTOBackend, PydanticDTOBackend\nfrom ._backends.abc import BackendContext\nfrom .config import DTOConfig\nfrom .data_structures import DTOData\nfrom .exc import InvalidAnnotation\nfrom .utils import parse_configs_from_annotation\n\nif TYPE_CHECKING:\n from typing import Any, ClassVar, Collection, Generator\n\n from typing_extensions import Self\n\n from litestar.dto.interface import HandlerContext\n from litestar.dto.types import ForType\n from litestar.openapi.spec import Reference, Schema\n from litestar.types.serialization import LitestarEncodableType\n\n from ._backends import AbstractDTOBackend\n from .types import FieldDefinition\n\n__all__ = [\"AbstractDTOFactory\"]\n\nT = TypeVar(\"T\")\n\n\nclass AbstractDTOFactory(DTOInterface, Generic[T], metaclass=ABCMeta):\n \"\"\"Base class for DTO types.\"\"\"\n\n __slots__ = (\"connection_context\",)\n\n config: ClassVar[DTOConfig]\n \"\"\"Config objects to define properties of the DTO.\"\"\"\n model_type: ClassVar[type[Any]]\n \"\"\"If ``annotation`` is an iterable, this is the inner type, otherwise will be the same as ``annotation``.\"\"\"\n\n _type_backend_map: ClassVar[dict[tuple[ForType, ParsedType, RequestEncodingType | str | None], AbstractDTOBackend]]\n _handler_backend_map: ClassVar[dict[tuple[ForType, str], AbstractDTOBackend]]\n\n def __init__(self, connection_context: ConnectionContext) -> None:\n \"\"\"Create an AbstractDTOFactory type.\n\n Args:\n connection_context: A :class:`ConnectionContext <.ConnectionContext>` instance, which provides\n information about the connection.\n \"\"\"\n self.connection_context = connection_context\n\n def __class_getitem__(cls, annotation: Any) -> type[Self]:\n parsed_type = ParsedType(annotation)\n\n if (parsed_type.is_optional and len(parsed_type.args) > 2) or (\n parsed_type.is_union and not parsed_type.is_optional\n ):\n raise InvalidAnnotation(\n \"Unions are currently not supported as type argument to DTO. Want this? Open an issue.\"\n )\n\n if parsed_type.is_forward_ref:\n raise InvalidAnnotation(\"Forward references are not supported as type argument to DTO\")\n\n # if a configuration is not provided, and the type narrowing is a type var, we don't want to create a subclass\n configs = parse_configs_from_annotation(parsed_type)\n if parsed_type.is_type_var and not configs:\n return cls\n\n if configs:\n # provided config is always preferred\n config = configs[0]\n elif hasattr(cls, \"config\"):\n # if no config is provided, but the class has one assigned, use that\n config = cls.config\n else:\n # otherwise, create a new config\n config = DTOConfig()\n\n cls_dict: dict[str, Any] = {\"config\": config, \"_type_backend_map\": {}, \"_handler_backend_map\": {}}\n if not parsed_type.is_type_var:\n cls_dict.update(model_type=parsed_type.annotation)\n\n return type(f\"{cls.__name__}[{annotation}]\", (cls,), cls_dict)\n\n def builtins_to_data_type(self, builtins: Any) -> Any:\n \"\"\"Coerce the unstructured data into the data type.\"\"\"\n backend = self._get_backend(\"data\", self.connection_context.handler_id)\n return backend.populate_data_from_builtins(builtins, self.connection_context)\n\n def bytes_to_data_type(self, raw: bytes) -> Any:\n \"\"\"Return the data held by the DTO.\"\"\"\n backend = self._get_backend(\"data\", self.connection_context.handler_id)\n return backend.populate_data_from_raw(raw, self.connection_context)\n\n def data_to_encodable_type(self, data: T | Collection[T]) -> LitestarEncodableType:\n backend = self._get_backend(\"return\", self.connection_context.handler_id)\n return backend.encode_data(data, self.connection_context)\n\n @classmethod\n @abstractmethod\n def generate_field_definitions(cls, model_type: type[Any]) -> Generator[FieldDefinition, None, None]:\n \"\"\"Generate ``FieldDefinition`` instances from ``model_type``.\n\n Yields:\n ``FieldDefinition`` instances.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def detect_nested_field(cls, parsed_type: ParsedType) -> bool:\n \"\"\"Return ``True`` if ``field_definition`` represents a nested model field.\n\n Args:\n parsed_type: inspect type to determine if field represents a nested model.\n\n Returns:\n ``True`` if ``parsed_type`` represents a nested model field.\n \"\"\"\n\n @classmethod\n def on_registration(cls, handler_context: HandlerContext) -> None:\n \"\"\"Called each time the DTO type is encountered during signature modelling.\n\n Args:\n handler_context: A :class:`HandlerContext <.HandlerContext>` instance. Provides information about the\n handler and application of the DTO.\n \"\"\"\n if handler_context.parsed_type.is_subclass_of(DTOData):\n parsed_type = handler_context.parsed_type.annotation.parsed_type\n else:\n parsed_type = handler_context.parsed_type\n\n if parsed_type.is_collection:\n if len(parsed_type.inner_types) != 1:\n raise InvalidAnnotation(\"AbstractDTOFactory only supports homogeneous collection types\")\n handler_type = parsed_type.inner_types[0]\n else:\n handler_type = parsed_type\n\n if not handler_type.is_subclass_of(cls.model_type):\n raise InvalidAnnotation(\n f\"DTO narrowed with '{cls.model_type}', handler type is '{handler_context.parsed_type.annotation}'\"\n )\n\n key = (handler_context.dto_for, handler_context.parsed_type, handler_context.request_encoding_type)\n backend = cls._type_backend_map.get(key)\n if backend is None:\n backend_type: type[AbstractDTOBackend]\n if handler_context.request_encoding_type in {\n RequestEncodingType.URL_ENCODED,\n RequestEncodingType.MULTI_PART,\n }:\n backend_type = PydanticDTOBackend\n else:\n backend_type = MsgspecDTOBackend\n\n backend_context = BackendContext(\n cls.config,\n handler_context.dto_for,\n handler_context.parsed_type,\n cls.generate_field_definitions,\n cls.detect_nested_field,\n handler_type.annotation,\n )\n backend = cls._type_backend_map.setdefault(key, backend_type(backend_context))\n cls._handler_backend_map[(handler_context.dto_for, handler_context.handler_id)] = backend\n\n @classmethod\n def create_openapi_schema(\n cls,\n dto_for: ForType,\n handler_id: str,\n generate_examples: bool,\n schemas: dict[str, Schema],\n ) -> Reference | Schema:\n \"\"\"Create an OpenAPI request body.\n\n Returns:\n OpenAPI request body.\n \"\"\"\n backend = cls._get_backend(dto_for, handler_id)\n return backend.create_openapi_schema(generate_examples, schemas)\n\n @classmethod\n def _get_backend(cls, dto_for: ForType, handler_id: str) -> AbstractDTOBackend:\n \"\"\"Return the backend for the handler/dto_for combo.\"\"\"\n return cls._handler_backend_map[(dto_for, handler_id)]\n", "path": "litestar/dto/factory/abc.py"}, {"content": "\"\"\"Collection filter datastructures.\"\"\"\nfrom __future__ import annotations\n\nfrom collections import abc # noqa: TCH003\nfrom dataclasses import dataclass\nfrom datetime import datetime # noqa: TCH003\nfrom typing import Generic, Literal, TypeVar\n\nT = TypeVar(\"T\")\n\n__all__ = [\"BeforeAfter\", \"CollectionFilter\", \"LimitOffset\", \"OrderBy\", \"SearchFilter\"]\n\n\n@dataclass\nclass BeforeAfter:\n \"\"\"Data required to filter a query on a ``datetime`` column.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to filter on.\"\"\"\n before: datetime | None\n \"\"\"Filter results where field earlier than this.\"\"\"\n after: datetime | None\n \"\"\"Filter results where field later than this.\"\"\"\n\n\n@dataclass\nclass CollectionFilter(Generic[T]):\n \"\"\"Data required to construct a ``WHERE ... IN (...)`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to filter on.\"\"\"\n values: abc.Collection[T]\n \"\"\"Values for ``IN`` clause.\"\"\"\n\n\n@dataclass\nclass LimitOffset:\n \"\"\"Data required to add limit/offset filtering to a query.\"\"\"\n\n limit: int\n \"\"\"Value for ``LIMIT`` clause of query.\"\"\"\n offset: int\n \"\"\"Value for ``OFFSET`` clause of query.\"\"\"\n\n\n@dataclass\nclass OrderBy:\n \"\"\"Data required to construct a ``ORDER BY ...`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to sort on.\"\"\"\n sort_order: Literal[\"asc\", \"desc\"] = \"asc\"\n \"\"\"Sort ascending or descending\"\"\"\n\n\n@dataclass\nclass SearchFilter:\n \"\"\"Data required to construct a ``WHERE field_name LIKE '%' || :value || '%'`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to sort on.\"\"\"\n value: str\n \"\"\"Values for ``LIKE`` clause.\"\"\"\n ignore_case: bool | None = False\n \"\"\"Should the search be case insensitive.\"\"\"\n", "path": "litestar/contrib/repository/filters.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom litestar.exceptions import ImproperlyConfiguredException\n\n__all__ = (\"DTOException\", \"UnsupportedType\")\n\n\nclass DTOException(ImproperlyConfiguredException):\n \"\"\"Base exception for DTO errors.\"\"\"\n\n\nclass UnsupportedType(DTOException):\n \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "path": "litestar/dto/exceptions.py"}, {"content": "from __future__ import annotations\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nfrom litestar.dto.interface import ConnectionContext, DTOInterface\nfrom litestar.enums import RequestEncodingType\nfrom litestar.typing import ParsedType\n\nfrom ._backends import MsgspecDTOBackend, PydanticDTOBackend\nfrom ._backends.abc import BackendContext\nfrom .config import DTOConfig\nfrom .data_structures import DTOData\nfrom .exc import InvalidAnnotation\nfrom .utils import parse_configs_from_annotation\n\nif TYPE_CHECKING:\n from typing import Any, ClassVar, Collection, Generator\n\n from typing_extensions import Self\n\n from litestar.dto.interface import HandlerContext\n from litestar.dto.types import ForType\n from litestar.openapi.spec import Reference, Schema\n from litestar.types.serialization import LitestarEncodableType\n\n from ._backends import AbstractDTOBackend\n from .types import FieldDefinition\n\n__all__ = (\"AbstractDTOFactory\",)\n\nT = TypeVar(\"T\")\n\n\nclass AbstractDTOFactory(DTOInterface, Generic[T], metaclass=ABCMeta):\n \"\"\"Base class for DTO types.\"\"\"\n\n __slots__ = (\"connection_context\",)\n\n config: ClassVar[DTOConfig]\n \"\"\"Config objects to define properties of the DTO.\"\"\"\n model_type: ClassVar[type[Any]]\n \"\"\"If ``annotation`` is an iterable, this is the inner type, otherwise will be the same as ``annotation``.\"\"\"\n\n _type_backend_map: ClassVar[dict[tuple[ForType, ParsedType, RequestEncodingType | str | None], AbstractDTOBackend]]\n _handler_backend_map: ClassVar[dict[tuple[ForType, str], AbstractDTOBackend]]\n\n def __init__(self, connection_context: ConnectionContext) -> None:\n \"\"\"Create an AbstractDTOFactory type.\n\n Args:\n connection_context: A :class:`ConnectionContext <.ConnectionContext>` instance, which provides\n information about the connection.\n \"\"\"\n self.connection_context = connection_context\n\n def __class_getitem__(cls, annotation: Any) -> type[Self]:\n parsed_type = ParsedType(annotation)\n\n if (parsed_type.is_optional and len(parsed_type.args) > 2) or (\n parsed_type.is_union and not parsed_type.is_optional\n ):\n raise InvalidAnnotation(\n \"Unions are currently not supported as type argument to DTO. Want this? Open an issue.\"\n )\n\n if parsed_type.is_forward_ref:\n raise InvalidAnnotation(\"Forward references are not supported as type argument to DTO\")\n\n # if a configuration is not provided, and the type narrowing is a type var, we don't want to create a subclass\n configs = parse_configs_from_annotation(parsed_type)\n if parsed_type.is_type_var and not configs:\n return cls\n\n if configs:\n # provided config is always preferred\n config = configs[0]\n elif hasattr(cls, \"config\"):\n # if no config is provided, but the class has one assigned, use that\n config = cls.config\n else:\n # otherwise, create a new config\n config = DTOConfig()\n\n cls_dict: dict[str, Any] = {\"config\": config, \"_type_backend_map\": {}, \"_handler_backend_map\": {}}\n if not parsed_type.is_type_var:\n cls_dict.update(model_type=parsed_type.annotation)\n\n return type(f\"{cls.__name__}[{annotation}]\", (cls,), cls_dict)\n\n def builtins_to_data_type(self, builtins: Any) -> Any:\n \"\"\"Coerce the unstructured data into the data type.\"\"\"\n backend = self._get_backend(\"data\", self.connection_context.handler_id)\n return backend.populate_data_from_builtins(builtins, self.connection_context)\n\n def bytes_to_data_type(self, raw: bytes) -> Any:\n \"\"\"Return the data held by the DTO.\"\"\"\n backend = self._get_backend(\"data\", self.connection_context.handler_id)\n return backend.populate_data_from_raw(raw, self.connection_context)\n\n def data_to_encodable_type(self, data: T | Collection[T]) -> LitestarEncodableType:\n backend = self._get_backend(\"return\", self.connection_context.handler_id)\n return backend.encode_data(data, self.connection_context)\n\n @classmethod\n @abstractmethod\n def generate_field_definitions(cls, model_type: type[Any]) -> Generator[FieldDefinition, None, None]:\n \"\"\"Generate ``FieldDefinition`` instances from ``model_type``.\n\n Yields:\n ``FieldDefinition`` instances.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def detect_nested_field(cls, parsed_type: ParsedType) -> bool:\n \"\"\"Return ``True`` if ``field_definition`` represents a nested model field.\n\n Args:\n parsed_type: inspect type to determine if field represents a nested model.\n\n Returns:\n ``True`` if ``parsed_type`` represents a nested model field.\n \"\"\"\n\n @classmethod\n def on_registration(cls, handler_context: HandlerContext) -> None:\n \"\"\"Called each time the DTO type is encountered during signature modelling.\n\n Args:\n handler_context: A :class:`HandlerContext <.HandlerContext>` instance. Provides information about the\n handler and application of the DTO.\n \"\"\"\n if handler_context.parsed_type.is_subclass_of(DTOData):\n parsed_type = handler_context.parsed_type.annotation.parsed_type\n else:\n parsed_type = handler_context.parsed_type\n\n if parsed_type.is_collection:\n if len(parsed_type.inner_types) != 1:\n raise InvalidAnnotation(\"AbstractDTOFactory only supports homogeneous collection types\")\n handler_type = parsed_type.inner_types[0]\n else:\n handler_type = parsed_type\n\n if not handler_type.is_subclass_of(cls.model_type):\n raise InvalidAnnotation(\n f\"DTO narrowed with '{cls.model_type}', handler type is '{handler_context.parsed_type.annotation}'\"\n )\n\n key = (handler_context.dto_for, handler_context.parsed_type, handler_context.request_encoding_type)\n backend = cls._type_backend_map.get(key)\n if backend is None:\n backend_type: type[AbstractDTOBackend]\n if handler_context.request_encoding_type in {\n RequestEncodingType.URL_ENCODED,\n RequestEncodingType.MULTI_PART,\n }:\n backend_type = PydanticDTOBackend\n else:\n backend_type = MsgspecDTOBackend\n\n backend_context = BackendContext(\n cls.config,\n handler_context.dto_for,\n handler_context.parsed_type,\n cls.generate_field_definitions,\n cls.detect_nested_field,\n handler_type.annotation,\n )\n backend = cls._type_backend_map.setdefault(key, backend_type(backend_context))\n cls._handler_backend_map[(handler_context.dto_for, handler_context.handler_id)] = backend\n\n @classmethod\n def create_openapi_schema(\n cls,\n dto_for: ForType,\n handler_id: str,\n generate_examples: bool,\n schemas: dict[str, Schema],\n ) -> Reference | Schema:\n \"\"\"Create an OpenAPI request body.\n\n Returns:\n OpenAPI request body.\n \"\"\"\n backend = cls._get_backend(dto_for, handler_id)\n return backend.create_openapi_schema(generate_examples, schemas)\n\n @classmethod\n def _get_backend(cls, dto_for: ForType, handler_id: str) -> AbstractDTOBackend:\n \"\"\"Return the backend for the handler/dto_for combo.\"\"\"\n return cls._handler_backend_map[(dto_for, handler_id)]\n", "path": "litestar/dto/factory/abc.py"}, {"content": "\"\"\"Collection filter datastructures.\"\"\"\nfrom __future__ import annotations\n\nfrom collections import abc # noqa: TCH003\nfrom dataclasses import dataclass\nfrom datetime import datetime # noqa: TCH003\nfrom typing import Generic, Literal, TypeVar\n\nT = TypeVar(\"T\")\n\n__all__ = (\n \"BeforeAfter\",\n \"CollectionFilter\",\n \"LimitOffset\",\n \"OrderBy\",\n \"SearchFilter\",\n)\n\n\n@dataclass\nclass BeforeAfter:\n \"\"\"Data required to filter a query on a ``datetime`` column.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to filter on.\"\"\"\n before: datetime | None\n \"\"\"Filter results where field earlier than this.\"\"\"\n after: datetime | None\n \"\"\"Filter results where field later than this.\"\"\"\n\n\n@dataclass\nclass CollectionFilter(Generic[T]):\n \"\"\"Data required to construct a ``WHERE ... IN (...)`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to filter on.\"\"\"\n values: abc.Collection[T]\n \"\"\"Values for ``IN`` clause.\"\"\"\n\n\n@dataclass\nclass LimitOffset:\n \"\"\"Data required to add limit/offset filtering to a query.\"\"\"\n\n limit: int\n \"\"\"Value for ``LIMIT`` clause of query.\"\"\"\n offset: int\n \"\"\"Value for ``OFFSET`` clause of query.\"\"\"\n\n\n@dataclass\nclass OrderBy:\n \"\"\"Data required to construct a ``ORDER BY ...`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to sort on.\"\"\"\n sort_order: Literal[\"asc\", \"desc\"] = \"asc\"\n \"\"\"Sort ascending or descending\"\"\"\n\n\n@dataclass\nclass SearchFilter:\n \"\"\"Data required to construct a ``WHERE field_name LIKE '%' || :value || '%'`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to sort on.\"\"\"\n value: str\n \"\"\"Values for ``LIKE`` clause.\"\"\"\n ignore_case: bool | None = False\n \"\"\"Should the search be case insensitive.\"\"\"\n", "path": "litestar/contrib/repository/filters.py"}]}
3,198
350
gh_patches_debug_8921
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-870
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Don't hardcode all for AvailabilityZones not relevant for AWS::ElasticLoadBalancingV2::TargetGroup Hello, *cfn-lint version: 0.19.1* *Description of issue.* The following snippet : ``` Resources: DefaultTargetGroup: Type: "AWS::ElasticLoadBalancingV2::TargetGroup" Properties: VpcId: hello Port: 80 Protocol: HTTP HealthCheckIntervalSeconds: 30 HealthCheckPath: "/" HealthCheckPort: "80" HealthCheckProtocol: "HTTP" HealthCheckTimeoutSeconds: 5 HealthyThresholdCount: 5 TargetType: ip Targets: - Id: "10.31.33.28" AvailabilityZone: all Matcher: HttpCode: "200" TargetGroupAttributes: - Key: deregistration_delay.timeout_seconds Value: "20" ``` Triggers this warn message : > W3010 Don't hardcode all for AvailabilityZones In the case of AWS::ElasticLoadBalancingV2::TargetGroup, there is legitimacy to hardcode all for AvailabilityZones : > If the IP address is outside the VPC, this parameter is required. With an Application Load Balancer, if the target type is ip and the IP address is outside the VPC for the target group, the only supported value is all. I'm unsure what to PR here. Should we get rid of this line ? https://github.com/aws-cloudformation/cfn-python-lint/blob/master/src/cfnlint/rules/resources/properties/AvailabilityZone.py#L52 Thanks for the suggestions. [1] https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticloadbalancingv2-targetgroup-targetdescription.html#aws-properties-elasticloadbalancingv2-targetgroup-targetdescription-properties --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/resources/properties/AvailabilityZone.py` Content: ``` 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 from cfnlint import CloudFormationLintRule 18 from cfnlint import RuleMatch 19 20 21 class AvailabilityZone(CloudFormationLintRule): 22 """Check Availibility Zone parameter checks """ 23 id = 'W3010' 24 shortdesc = 'Availability Zone Parameters should not be hardcoded' 25 description = 'Check if an Availability Zone property is hardcoded.' 26 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint' 27 tags = ['parameters', 'availabilityzone'] 28 29 def __init__(self): 30 """Init""" 31 super(AvailabilityZone, self).__init__() 32 resource_type_specs = [ 33 'AWS::DAX::Cluster', 34 'AWS::AutoScaling::AutoScalingGroup', 35 'AWS::RDS::DBCluster', 36 'AWS::EC2::Volume', 37 'AWS::ElasticLoadBalancing::LoadBalancer', 38 'AWS::OpsWorks::Instance', 39 'AWS::RDS::DBInstance', 40 'AWS::EC2::Host', 41 'AWS::EC2::Subnet', 42 'AWS::DMS::ReplicationInstance', 43 'AWS::EC2::Instance' 44 ] 45 46 property_type_specs = [ 47 # Singular 48 'AWS::EC2::LaunchTemplate.Placement', 49 'AWS::EC2::SpotFleet.SpotPlacement', 50 'AWS::EMR::Cluster.PlacementType', 51 'AWS::Glue::Connection.PhysicalConnectionRequirements', 52 'AWS::ElasticLoadBalancingV2::TargetGroup.TargetDescription', 53 'AWS::EC2::SpotFleet.LaunchTemplateOverrides', 54 ] 55 56 for resource_type_spec in resource_type_specs: 57 self.resource_property_types.append(resource_type_spec) 58 for property_type_spec in property_type_specs: 59 self.resource_sub_property_types.append(property_type_spec) 60 61 # pylint: disable=W0613 62 def check_az_value(self, value, path): 63 """Check ref for VPC""" 64 matches = [] 65 66 if path[-1] != 'Fn::GetAZs': 67 message = 'Don\'t hardcode {0} for AvailabilityZones' 68 matches.append(RuleMatch(path, message.format(value))) 69 70 return matches 71 72 def check(self, properties, resource_type, path, cfn): 73 """Check itself""" 74 matches = [] 75 76 matches.extend( 77 cfn.check_value( 78 properties, 'AvailabilityZone', path, 79 check_value=self.check_az_value, check_ref=None, 80 check_find_in_map=None, check_split=None, check_join=None 81 ) 82 ) 83 matches.extend( 84 cfn.check_value( 85 properties, 'AvailabilityZones', path, 86 check_value=self.check_az_value, check_ref=None, 87 check_find_in_map=None, check_split=None, check_join=None 88 ) 89 ) 90 91 return matches 92 93 def match_resource_sub_properties(self, properties, property_type, path, cfn): 94 """Match for sub properties""" 95 matches = [] 96 97 matches.extend(self.check(properties, property_type, path, cfn)) 98 99 return matches 100 101 def match_resource_properties(self, properties, resource_type, path, cfn): 102 """Check CloudFormation Properties""" 103 matches = [] 104 105 matches.extend(self.check(properties, resource_type, path, cfn)) 106 107 return matches 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/resources/properties/AvailabilityZone.py b/src/cfnlint/rules/resources/properties/AvailabilityZone.py --- a/src/cfnlint/rules/resources/properties/AvailabilityZone.py +++ b/src/cfnlint/rules/resources/properties/AvailabilityZone.py @@ -63,9 +63,11 @@ """Check ref for VPC""" matches = [] - if path[-1] != 'Fn::GetAZs': - message = 'Don\'t hardcode {0} for AvailabilityZones' - matches.append(RuleMatch(path, message.format(value))) + # value of `all` is a valide exception in AWS::ElasticLoadBalancingV2::TargetGroup + if value not in ['all']: + if path[-1] != ['Fn::GetAZs']: + message = 'Don\'t hardcode {0} for AvailabilityZones' + matches.append(RuleMatch(path, message.format(value))) return matches
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/AvailabilityZone.py b/src/cfnlint/rules/resources/properties/AvailabilityZone.py\n--- a/src/cfnlint/rules/resources/properties/AvailabilityZone.py\n+++ b/src/cfnlint/rules/resources/properties/AvailabilityZone.py\n@@ -63,9 +63,11 @@\n \"\"\"Check ref for VPC\"\"\"\n matches = []\n \n- if path[-1] != 'Fn::GetAZs':\n- message = 'Don\\'t hardcode {0} for AvailabilityZones'\n- matches.append(RuleMatch(path, message.format(value)))\n+ # value of `all` is a valide exception in AWS::ElasticLoadBalancingV2::TargetGroup\n+ if value not in ['all']:\n+ if path[-1] != ['Fn::GetAZs']:\n+ message = 'Don\\'t hardcode {0} for AvailabilityZones'\n+ matches.append(RuleMatch(path, message.format(value)))\n \n return matches\n", "issue": "Don't hardcode all for AvailabilityZones not relevant for AWS::ElasticLoadBalancingV2::TargetGroup\nHello, \r\n\r\n*cfn-lint version: 0.19.1*\r\n\r\n*Description of issue.*\r\n\r\nThe following snippet : \r\n\r\n```\r\nResources:\r\n DefaultTargetGroup:\r\n Type: \"AWS::ElasticLoadBalancingV2::TargetGroup\"\r\n Properties:\r\n VpcId: hello\r\n Port: 80\r\n Protocol: HTTP\r\n HealthCheckIntervalSeconds: 30\r\n HealthCheckPath: \"/\"\r\n HealthCheckPort: \"80\"\r\n HealthCheckProtocol: \"HTTP\"\r\n HealthCheckTimeoutSeconds: 5\r\n HealthyThresholdCount: 5\r\n TargetType: ip\r\n Targets:\r\n - \r\n Id: \"10.31.33.28\"\r\n AvailabilityZone: all\r\n Matcher:\r\n HttpCode: \"200\"\r\n TargetGroupAttributes:\r\n - Key: deregistration_delay.timeout_seconds\r\n Value: \"20\"\r\n```\r\n\r\nTriggers this warn message : \r\n\r\n> W3010 Don't hardcode all for AvailabilityZones \r\n\r\nIn the case of AWS::ElasticLoadBalancingV2::TargetGroup, there is legitimacy to hardcode all for AvailabilityZones : \r\n> If the IP address is outside the VPC, this parameter is required. With an Application Load Balancer, if the target type is ip and the IP address is outside the VPC for the target group, the only supported value is all. \r\n\r\nI'm unsure what to PR here. Should we get rid of this line ? https://github.com/aws-cloudformation/cfn-python-lint/blob/master/src/cfnlint/rules/resources/properties/AvailabilityZone.py#L52 \r\n\r\nThanks for the suggestions. \r\n\r\n[1] https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticloadbalancingv2-targetgroup-targetdescription.html#aws-properties-elasticloadbalancingv2-targetgroup-targetdescription-properties\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass AvailabilityZone(CloudFormationLintRule):\n \"\"\"Check Availibility Zone parameter checks \"\"\"\n id = 'W3010'\n shortdesc = 'Availability Zone Parameters should not be hardcoded'\n description = 'Check if an Availability Zone property is hardcoded.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['parameters', 'availabilityzone']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(AvailabilityZone, self).__init__()\n resource_type_specs = [\n 'AWS::DAX::Cluster',\n 'AWS::AutoScaling::AutoScalingGroup',\n 'AWS::RDS::DBCluster',\n 'AWS::EC2::Volume',\n 'AWS::ElasticLoadBalancing::LoadBalancer',\n 'AWS::OpsWorks::Instance',\n 'AWS::RDS::DBInstance',\n 'AWS::EC2::Host',\n 'AWS::EC2::Subnet',\n 'AWS::DMS::ReplicationInstance',\n 'AWS::EC2::Instance'\n ]\n\n property_type_specs = [\n # Singular\n 'AWS::EC2::LaunchTemplate.Placement',\n 'AWS::EC2::SpotFleet.SpotPlacement',\n 'AWS::EMR::Cluster.PlacementType',\n 'AWS::Glue::Connection.PhysicalConnectionRequirements',\n 'AWS::ElasticLoadBalancingV2::TargetGroup.TargetDescription',\n 'AWS::EC2::SpotFleet.LaunchTemplateOverrides',\n ]\n\n for resource_type_spec in resource_type_specs:\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in property_type_specs:\n self.resource_sub_property_types.append(property_type_spec)\n\n # pylint: disable=W0613\n def check_az_value(self, value, path):\n \"\"\"Check ref for VPC\"\"\"\n matches = []\n\n if path[-1] != 'Fn::GetAZs':\n message = 'Don\\'t hardcode {0} for AvailabilityZones'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check(self, properties, resource_type, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n properties, 'AvailabilityZone', path,\n check_value=self.check_az_value, check_ref=None,\n check_find_in_map=None, check_split=None, check_join=None\n )\n )\n matches.extend(\n cfn.check_value(\n properties, 'AvailabilityZones', path,\n check_value=self.check_az_value, check_ref=None,\n check_find_in_map=None, check_split=None, check_join=None\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n matches.extend(self.check(properties, property_type, path, cfn))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(self.check(properties, resource_type, path, cfn))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/AvailabilityZone.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass AvailabilityZone(CloudFormationLintRule):\n \"\"\"Check Availibility Zone parameter checks \"\"\"\n id = 'W3010'\n shortdesc = 'Availability Zone Parameters should not be hardcoded'\n description = 'Check if an Availability Zone property is hardcoded.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['parameters', 'availabilityzone']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(AvailabilityZone, self).__init__()\n resource_type_specs = [\n 'AWS::DAX::Cluster',\n 'AWS::AutoScaling::AutoScalingGroup',\n 'AWS::RDS::DBCluster',\n 'AWS::EC2::Volume',\n 'AWS::ElasticLoadBalancing::LoadBalancer',\n 'AWS::OpsWorks::Instance',\n 'AWS::RDS::DBInstance',\n 'AWS::EC2::Host',\n 'AWS::EC2::Subnet',\n 'AWS::DMS::ReplicationInstance',\n 'AWS::EC2::Instance'\n ]\n\n property_type_specs = [\n # Singular\n 'AWS::EC2::LaunchTemplate.Placement',\n 'AWS::EC2::SpotFleet.SpotPlacement',\n 'AWS::EMR::Cluster.PlacementType',\n 'AWS::Glue::Connection.PhysicalConnectionRequirements',\n 'AWS::ElasticLoadBalancingV2::TargetGroup.TargetDescription',\n 'AWS::EC2::SpotFleet.LaunchTemplateOverrides',\n ]\n\n for resource_type_spec in resource_type_specs:\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in property_type_specs:\n self.resource_sub_property_types.append(property_type_spec)\n\n # pylint: disable=W0613\n def check_az_value(self, value, path):\n \"\"\"Check ref for VPC\"\"\"\n matches = []\n\n # value of `all` is a valide exception in AWS::ElasticLoadBalancingV2::TargetGroup\n if value not in ['all']:\n if path[-1] != ['Fn::GetAZs']:\n message = 'Don\\'t hardcode {0} for AvailabilityZones'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check(self, properties, resource_type, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n properties, 'AvailabilityZone', path,\n check_value=self.check_az_value, check_ref=None,\n check_find_in_map=None, check_split=None, check_join=None\n )\n )\n matches.extend(\n cfn.check_value(\n properties, 'AvailabilityZones', path,\n check_value=self.check_az_value, check_ref=None,\n check_find_in_map=None, check_split=None, check_join=None\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n matches.extend(self.check(properties, property_type, path, cfn))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(self.check(properties, resource_type, path, cfn))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/AvailabilityZone.py"}]}
1,816
218
gh_patches_debug_20
rasdani/github-patches
git_diff
google__pytype-251
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add test_data to MANIFEST.in This PR also needs to be imported and re-exported rather than merged directly. I'm planning to use this one to test the import process fix I sent you. Fixes https://github.com/google/pytype/issues/245. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pytype/__version__.py` Content: ``` 1 # pylint: skip-file 2 __version__ = '2019.02.13' 3 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pytype/__version__.py b/pytype/__version__.py --- a/pytype/__version__.py +++ b/pytype/__version__.py @@ -1,2 +1,2 @@ # pylint: skip-file -__version__ = '2019.02.13' +__version__ = '2019.03.01'
{"golden_diff": "diff --git a/pytype/__version__.py b/pytype/__version__.py\n--- a/pytype/__version__.py\n+++ b/pytype/__version__.py\n@@ -1,2 +1,2 @@\n # pylint: skip-file\n-__version__ = '2019.02.13'\n+__version__ = '2019.03.01'\n", "issue": "Add test_data to MANIFEST.in\nThis PR also needs to be imported and re-exported rather than merged directly. I'm planning to use this one to test the import process fix I sent you.\r\n\r\nFixes https://github.com/google/pytype/issues/245.\n", "before_files": [{"content": "# pylint: skip-file\n__version__ = '2019.02.13'\n", "path": "pytype/__version__.py"}], "after_files": [{"content": "# pylint: skip-file\n__version__ = '2019.03.01'\n", "path": "pytype/__version__.py"}]}
341
86
gh_patches_debug_12523
rasdani/github-patches
git_diff
DataDog__dd-trace-py-1116
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DDLogger rewrites LogRecord.msg, which causes Sentry events duplication Sentry uses `LogRecord.msg` to identify log events. LogRecord.msg is the log message template, to be formatted on demand. When rewriting `msg`, one should not enrich it with arbitrary values, like `logging_bucket.skipped`. The line ``` record.msg = '{}, {} additional messages skipped'.format(record.msg, logging_bucket.skipped) ``` should be something like: ``` record.msg = '{}, %s additional messages skipped'.format(record.msg) record.args = record.args + (logging_bucket.skipped,) ``` Culprit: https://github.com/DataDog/dd-trace-py/blob/914cbca4ba5ec53ff17cb67164cb51b7bcd91ac2/ddtrace/internal/logger.py#L113 Example of message duplication: ![image](https://user-images.githubusercontent.com/43616133/67687095-ae372580-f98f-11e9-9b81-2a0ee61f175c.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/internal/logger.py` Content: ``` 1 import collections 2 import logging 3 4 from ..utils.formats import get_env 5 6 7 def get_logger(name): 8 """ 9 Retrieve or create a ``DDLogger`` instance. 10 11 This function mirrors the behavior of `logging.getLogger`. 12 13 If no logger with the provided name has been fetched before then 14 a new one is created. 15 16 If a previous logger has been created then it is returned. 17 18 DEV: We do not want to mess with `logging.setLoggerClass()` 19 That will totally mess with the user's loggers, we want 20 just our own, selective loggers to be DDLoggers 21 22 :param name: The name of the logger to fetch or create 23 :type name: str 24 :return: The logger instance 25 :rtype: ``DDLogger`` 26 """ 27 # DEV: `logging.Logger.manager` refers to the single root `logging.Manager` instance 28 # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1824-L1826 # noqa 29 manager = logging.Logger.manager 30 31 # If the logger does not exist yet, create it 32 # DEV: `Manager.loggerDict` is a dict mapping logger name to logger 33 # DEV: This is a simplified version of `logging.Manager.getLogger` 34 # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1221-L1253 # noqa 35 if name not in manager.loggerDict: 36 manager.loggerDict[name] = DDLogger(name=name) 37 38 # Get our logger 39 logger = manager.loggerDict[name] 40 41 # If this log manager has a `_fixupParents` method then call it on our logger 42 # DEV: This helper is used to ensure our logger has an appropriate `Logger.parent` set, 43 # without this then we cannot take advantage of the root loggers handlers 44 # https://github.com/python/cpython/blob/7c7839329c2c66d051960ab1df096aed1cc9343e/Lib/logging/__init__.py#L1272-L1294 # noqa 45 # DEV: `_fixupParents` has been around for awhile, but add the `hasattr` guard... just in case. 46 if hasattr(manager, '_fixupParents'): 47 manager._fixupParents(logger) 48 49 # Return out logger 50 return logger 51 52 53 class DDLogger(logging.Logger): 54 """ 55 Custom rate limited logger used by ``ddtrace`` 56 57 This logger class is used to rate limit the output of 58 log messages from within the ``ddtrace`` package. 59 """ 60 __slots__ = ('buckets', 'rate_limit') 61 62 # Named tuple used for keeping track of a log lines current time bucket and the number of log lines skipped 63 LoggingBucket = collections.namedtuple('LoggingBucket', ('bucket', 'skipped')) 64 65 def __init__(self, *args, **kwargs): 66 """Constructor for ``DDLogger``""" 67 super(DDLogger, self).__init__(*args, **kwargs) 68 69 # Dict to keep track of the current time bucket per name/level/pathname/lineno 70 self.buckets = collections.defaultdict(lambda: DDLogger.LoggingBucket(0, 0)) 71 72 # Allow 1 log record per name/level/pathname/lineno every 60 seconds by default 73 # Allow configuring via `DD_LOGGING_RATE_LIMIT` 74 # DEV: `DD_LOGGING_RATE_LIMIT=0` means to disable all rate limiting 75 self.rate_limit = int(get_env('logging', 'rate_limit', default=60)) 76 77 def handle(self, record): 78 """ 79 Function used to call the handlers for a log line. 80 81 This implementation will first determine if this log line should 82 be logged or rate limited, and then call the base ``logging.Logger.handle`` 83 function if it should be logged 84 85 DEV: This method has all of it's code inlined to reduce on functions calls 86 87 :param record: The log record being logged 88 :type record: ``logging.LogRecord`` 89 """ 90 # If rate limiting has been disabled (`DD_LOGGING_RATE_LIMIT=0`) then apply no rate limit 91 if not self.rate_limit: 92 super(DDLogger, self).handle(record) 93 return 94 95 # Allow 1 log record by name/level/pathname/lineno every X seconds 96 # DEV: current unix time / rate (e.g. 300 seconds) = time bucket 97 # int(1546615098.8404942 / 300) = 515538 98 # DEV: LogRecord `created` is a unix timestamp/float 99 # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` 100 current_bucket = int(record.created / self.rate_limit) 101 102 # Limit based on logger name, record level, filename, and line number 103 # ('ddtrace.writer', 'DEBUG', '../site-packages/ddtrace/writer.py', 137) 104 # This way each unique log message can get logged at least once per time period 105 # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` 106 key = (record.name, record.levelno, record.pathname, record.lineno) 107 108 # Only log this message if the time bucket has changed from the previous time we ran 109 logging_bucket = self.buckets[key] 110 if logging_bucket.bucket != current_bucket: 111 # Append count of skipped messages if we have skipped some since our last logging 112 if logging_bucket.skipped: 113 record.msg = '{}, {} additional messages skipped'.format(record.msg, logging_bucket.skipped) 114 115 # Reset our bucket 116 self.buckets[key] = DDLogger.LoggingBucket(current_bucket, 0) 117 118 # Call the base handle to actually log this record 119 super(DDLogger, self).handle(record) 120 else: 121 # Increment the count of records we have skipped 122 # DEV: `self.buckets[key]` is a tuple which is immutable so recreate instead 123 self.buckets[key] = DDLogger.LoggingBucket(logging_bucket.bucket, logging_bucket.skipped + 1) 124 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/internal/logger.py b/ddtrace/internal/logger.py --- a/ddtrace/internal/logger.py +++ b/ddtrace/internal/logger.py @@ -110,7 +110,8 @@ if logging_bucket.bucket != current_bucket: # Append count of skipped messages if we have skipped some since our last logging if logging_bucket.skipped: - record.msg = '{}, {} additional messages skipped'.format(record.msg, logging_bucket.skipped) + record.msg = '{}, %s additional messages skipped'.format(record.msg) + record.args = record.args + (logging_bucket.skipped, ) # Reset our bucket self.buckets[key] = DDLogger.LoggingBucket(current_bucket, 0)
{"golden_diff": "diff --git a/ddtrace/internal/logger.py b/ddtrace/internal/logger.py\n--- a/ddtrace/internal/logger.py\n+++ b/ddtrace/internal/logger.py\n@@ -110,7 +110,8 @@\n if logging_bucket.bucket != current_bucket:\n # Append count of skipped messages if we have skipped some since our last logging\n if logging_bucket.skipped:\n- record.msg = '{}, {} additional messages skipped'.format(record.msg, logging_bucket.skipped)\n+ record.msg = '{}, %s additional messages skipped'.format(record.msg)\n+ record.args = record.args + (logging_bucket.skipped, )\n \n # Reset our bucket\n self.buckets[key] = DDLogger.LoggingBucket(current_bucket, 0)\n", "issue": "DDLogger rewrites LogRecord.msg, which causes Sentry events duplication\nSentry uses `LogRecord.msg` to identify log events. LogRecord.msg is the log message template, to be formatted on demand.\r\n\r\nWhen rewriting `msg`, one should not enrich it with arbitrary values, like `logging_bucket.skipped`.\r\n\r\nThe line\r\n```\r\n record.msg = '{}, {} additional messages skipped'.format(record.msg, logging_bucket.skipped)\r\n```\r\n\r\nshould be something like:\r\n\r\n```\r\n record.msg = '{}, %s additional messages skipped'.format(record.msg)\r\n record.args = record.args + (logging_bucket.skipped,)\r\n```\r\n\r\nCulprit:\r\nhttps://github.com/DataDog/dd-trace-py/blob/914cbca4ba5ec53ff17cb67164cb51b7bcd91ac2/ddtrace/internal/logger.py#L113\r\n\r\nExample of message duplication:\r\n![image](https://user-images.githubusercontent.com/43616133/67687095-ae372580-f98f-11e9-9b81-2a0ee61f175c.png)\r\n\n", "before_files": [{"content": "import collections\nimport logging\n\nfrom ..utils.formats import get_env\n\n\ndef get_logger(name):\n \"\"\"\n Retrieve or create a ``DDLogger`` instance.\n\n This function mirrors the behavior of `logging.getLogger`.\n\n If no logger with the provided name has been fetched before then\n a new one is created.\n\n If a previous logger has been created then it is returned.\n\n DEV: We do not want to mess with `logging.setLoggerClass()`\n That will totally mess with the user's loggers, we want\n just our own, selective loggers to be DDLoggers\n\n :param name: The name of the logger to fetch or create\n :type name: str\n :return: The logger instance\n :rtype: ``DDLogger``\n \"\"\"\n # DEV: `logging.Logger.manager` refers to the single root `logging.Manager` instance\n # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1824-L1826 # noqa\n manager = logging.Logger.manager\n\n # If the logger does not exist yet, create it\n # DEV: `Manager.loggerDict` is a dict mapping logger name to logger\n # DEV: This is a simplified version of `logging.Manager.getLogger`\n # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1221-L1253 # noqa\n if name not in manager.loggerDict:\n manager.loggerDict[name] = DDLogger(name=name)\n\n # Get our logger\n logger = manager.loggerDict[name]\n\n # If this log manager has a `_fixupParents` method then call it on our logger\n # DEV: This helper is used to ensure our logger has an appropriate `Logger.parent` set,\n # without this then we cannot take advantage of the root loggers handlers\n # https://github.com/python/cpython/blob/7c7839329c2c66d051960ab1df096aed1cc9343e/Lib/logging/__init__.py#L1272-L1294 # noqa\n # DEV: `_fixupParents` has been around for awhile, but add the `hasattr` guard... just in case.\n if hasattr(manager, '_fixupParents'):\n manager._fixupParents(logger)\n\n # Return out logger\n return logger\n\n\nclass DDLogger(logging.Logger):\n \"\"\"\n Custom rate limited logger used by ``ddtrace``\n\n This logger class is used to rate limit the output of\n log messages from within the ``ddtrace`` package.\n \"\"\"\n __slots__ = ('buckets', 'rate_limit')\n\n # Named tuple used for keeping track of a log lines current time bucket and the number of log lines skipped\n LoggingBucket = collections.namedtuple('LoggingBucket', ('bucket', 'skipped'))\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor for ``DDLogger``\"\"\"\n super(DDLogger, self).__init__(*args, **kwargs)\n\n # Dict to keep track of the current time bucket per name/level/pathname/lineno\n self.buckets = collections.defaultdict(lambda: DDLogger.LoggingBucket(0, 0))\n\n # Allow 1 log record per name/level/pathname/lineno every 60 seconds by default\n # Allow configuring via `DD_LOGGING_RATE_LIMIT`\n # DEV: `DD_LOGGING_RATE_LIMIT=0` means to disable all rate limiting\n self.rate_limit = int(get_env('logging', 'rate_limit', default=60))\n\n def handle(self, record):\n \"\"\"\n Function used to call the handlers for a log line.\n\n This implementation will first determine if this log line should\n be logged or rate limited, and then call the base ``logging.Logger.handle``\n function if it should be logged\n\n DEV: This method has all of it's code inlined to reduce on functions calls\n\n :param record: The log record being logged\n :type record: ``logging.LogRecord``\n \"\"\"\n # If rate limiting has been disabled (`DD_LOGGING_RATE_LIMIT=0`) then apply no rate limit\n if not self.rate_limit:\n super(DDLogger, self).handle(record)\n return\n\n # Allow 1 log record by name/level/pathname/lineno every X seconds\n # DEV: current unix time / rate (e.g. 300 seconds) = time bucket\n # int(1546615098.8404942 / 300) = 515538\n # DEV: LogRecord `created` is a unix timestamp/float\n # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10`\n current_bucket = int(record.created / self.rate_limit)\n\n # Limit based on logger name, record level, filename, and line number\n # ('ddtrace.writer', 'DEBUG', '../site-packages/ddtrace/writer.py', 137)\n # This way each unique log message can get logged at least once per time period\n # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10`\n key = (record.name, record.levelno, record.pathname, record.lineno)\n\n # Only log this message if the time bucket has changed from the previous time we ran\n logging_bucket = self.buckets[key]\n if logging_bucket.bucket != current_bucket:\n # Append count of skipped messages if we have skipped some since our last logging\n if logging_bucket.skipped:\n record.msg = '{}, {} additional messages skipped'.format(record.msg, logging_bucket.skipped)\n\n # Reset our bucket\n self.buckets[key] = DDLogger.LoggingBucket(current_bucket, 0)\n\n # Call the base handle to actually log this record\n super(DDLogger, self).handle(record)\n else:\n # Increment the count of records we have skipped\n # DEV: `self.buckets[key]` is a tuple which is immutable so recreate instead\n self.buckets[key] = DDLogger.LoggingBucket(logging_bucket.bucket, logging_bucket.skipped + 1)\n", "path": "ddtrace/internal/logger.py"}], "after_files": [{"content": "import collections\nimport logging\n\nfrom ..utils.formats import get_env\n\n\ndef get_logger(name):\n \"\"\"\n Retrieve or create a ``DDLogger`` instance.\n\n This function mirrors the behavior of `logging.getLogger`.\n\n If no logger with the provided name has been fetched before then\n a new one is created.\n\n If a previous logger has been created then it is returned.\n\n DEV: We do not want to mess with `logging.setLoggerClass()`\n That will totally mess with the user's loggers, we want\n just our own, selective loggers to be DDLoggers\n\n :param name: The name of the logger to fetch or create\n :type name: str\n :return: The logger instance\n :rtype: ``DDLogger``\n \"\"\"\n # DEV: `logging.Logger.manager` refers to the single root `logging.Manager` instance\n # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1824-L1826 # noqa\n manager = logging.Logger.manager\n\n # If the logger does not exist yet, create it\n # DEV: `Manager.loggerDict` is a dict mapping logger name to logger\n # DEV: This is a simplified version of `logging.Manager.getLogger`\n # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1221-L1253 # noqa\n if name not in manager.loggerDict:\n manager.loggerDict[name] = DDLogger(name=name)\n\n # Get our logger\n logger = manager.loggerDict[name]\n\n # If this log manager has a `_fixupParents` method then call it on our logger\n # DEV: This helper is used to ensure our logger has an appropriate `Logger.parent` set,\n # without this then we cannot take advantage of the root loggers handlers\n # https://github.com/python/cpython/blob/7c7839329c2c66d051960ab1df096aed1cc9343e/Lib/logging/__init__.py#L1272-L1294 # noqa\n # DEV: `_fixupParents` has been around for awhile, but add the `hasattr` guard... just in case.\n if hasattr(manager, '_fixupParents'):\n manager._fixupParents(logger)\n\n # Return out logger\n return logger\n\n\nclass DDLogger(logging.Logger):\n \"\"\"\n Custom rate limited logger used by ``ddtrace``\n\n This logger class is used to rate limit the output of\n log messages from within the ``ddtrace`` package.\n \"\"\"\n __slots__ = ('buckets', 'rate_limit')\n\n # Named tuple used for keeping track of a log lines current time bucket and the number of log lines skipped\n LoggingBucket = collections.namedtuple('LoggingBucket', ('bucket', 'skipped'))\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor for ``DDLogger``\"\"\"\n super(DDLogger, self).__init__(*args, **kwargs)\n\n # Dict to keep track of the current time bucket per name/level/pathname/lineno\n self.buckets = collections.defaultdict(lambda: DDLogger.LoggingBucket(0, 0))\n\n # Allow 1 log record per name/level/pathname/lineno every 60 seconds by default\n # Allow configuring via `DD_LOGGING_RATE_LIMIT`\n # DEV: `DD_LOGGING_RATE_LIMIT=0` means to disable all rate limiting\n self.rate_limit = int(get_env('logging', 'rate_limit', default=60))\n\n def handle(self, record):\n \"\"\"\n Function used to call the handlers for a log line.\n\n This implementation will first determine if this log line should\n be logged or rate limited, and then call the base ``logging.Logger.handle``\n function if it should be logged\n\n DEV: This method has all of it's code inlined to reduce on functions calls\n\n :param record: The log record being logged\n :type record: ``logging.LogRecord``\n \"\"\"\n # If rate limiting has been disabled (`DD_LOGGING_RATE_LIMIT=0`) then apply no rate limit\n if not self.rate_limit:\n super(DDLogger, self).handle(record)\n return\n\n # Allow 1 log record by name/level/pathname/lineno every X seconds\n # DEV: current unix time / rate (e.g. 300 seconds) = time bucket\n # int(1546615098.8404942 / 300) = 515538\n # DEV: LogRecord `created` is a unix timestamp/float\n # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10`\n current_bucket = int(record.created / self.rate_limit)\n\n # Limit based on logger name, record level, filename, and line number\n # ('ddtrace.writer', 'DEBUG', '../site-packages/ddtrace/writer.py', 137)\n # This way each unique log message can get logged at least once per time period\n # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10`\n key = (record.name, record.levelno, record.pathname, record.lineno)\n\n # Only log this message if the time bucket has changed from the previous time we ran\n logging_bucket = self.buckets[key]\n if logging_bucket.bucket != current_bucket:\n # Append count of skipped messages if we have skipped some since our last logging\n if logging_bucket.skipped:\n record.msg = '{}, %s additional messages skipped'.format(record.msg)\n record.args = record.args + (logging_bucket.skipped, )\n\n # Reset our bucket\n self.buckets[key] = DDLogger.LoggingBucket(current_bucket, 0)\n\n # Call the base handle to actually log this record\n super(DDLogger, self).handle(record)\n else:\n # Increment the count of records we have skipped\n # DEV: `self.buckets[key]` is a tuple which is immutable so recreate instead\n self.buckets[key] = DDLogger.LoggingBucket(logging_bucket.bucket, logging_bucket.skipped + 1)\n", "path": "ddtrace/internal/logger.py"}]}
2,233
156
gh_patches_debug_7061
rasdani/github-patches
git_diff
mindsdb__lightwood-1051
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Lightwood pip packages creates tests module Installing lightwood creates 'tests' module in python site-packages Steps to reproduce: - `pip install lightwood` - in python - `import tests` - `print(tests.__file__) ` It will show that 'tests' is in site-packages --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import sys 2 import setuptools 3 import os 4 5 6 def remove_requirements(requirements, name, replace=''): 7 new_requirements = [] 8 for requirement in requirements: 9 if requirement.split(' ')[0] != name: 10 new_requirements.append(requirement) 11 elif replace is not None: 12 new_requirements.append(replace) 13 return new_requirements 14 15 16 sys_platform = sys.platform 17 18 about = {} 19 with open("lightwood/__about__.py") as fp: 20 exec(fp.read(), about) 21 22 with open("README.md", "r") as fh: 23 long_description = fh.read() 24 25 with open('requirements.txt') as req_file: 26 requirements = [req.strip() for req in req_file.read().splitlines()] 27 28 extra_requirements = {} 29 for fn in os.listdir('.'): 30 if fn.startswith('requirements_') and fn.endswith('.txt'): 31 extra_name = fn.replace('requirements_', '').replace('.txt', '') 32 with open(fn) as fp: 33 extra = [req.strip() for req in fp.read().splitlines()] 34 extra_requirements[extra_name] = extra 35 full_requirements = [] 36 for v in extra_requirements.values(): 37 full_requirements += v 38 extra_requirements['all_extras'] = list(set(full_requirements)) 39 40 # Windows specific requirements 41 if sys_platform in ['win32', 'cygwin', 'windows']: 42 # These have to be installed manually or via the installers in windows 43 requirements = remove_requirements(requirements, 'torch') 44 45 setuptools.setup( 46 name=about['__title__'], 47 version=about['__version__'], 48 url=about['__github__'], 49 download_url=about['__pypi__'], 50 license=about['__license__'], 51 author=about['__author__'], 52 author_email=about['__email__'], 53 description=about['__description__'], 54 long_description=long_description, 55 long_description_content_type="text/markdown", 56 packages=setuptools.find_packages(), 57 package_data={'project': ['requirements.txt']}, 58 install_requires=requirements, 59 extras_require=extra_requirements, 60 classifiers=[ 61 "Programming Language :: Python :: 3", 62 "License :: OSI Approved :: MIT License", 63 "Operating System :: OS Independent", 64 ], 65 python_requires=">=3.7" 66 ) 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -53,7 +53,7 @@ description=about['__description__'], long_description=long_description, long_description_content_type="text/markdown", - packages=setuptools.find_packages(), + packages=setuptools.find_packages(exclude=["tests", "tests.*"]), package_data={'project': ['requirements.txt']}, install_requires=requirements, extras_require=extra_requirements,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,7 @@\n description=about['__description__'],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n- packages=setuptools.find_packages(),\n+ packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={'project': ['requirements.txt']},\n install_requires=requirements,\n extras_require=extra_requirements,\n", "issue": "Lightwood pip packages creates tests module\nInstalling lightwood creates 'tests' module in python site-packages\r\n\r\nSteps to reproduce:\r\n- `pip install lightwood`\r\n- in python\r\n - `import tests`\r\n - `print(tests.__file__) `\r\nIt will show that 'tests' is in site-packages\n", "before_files": [{"content": "import sys\nimport setuptools\nimport os\n\n\ndef remove_requirements(requirements, name, replace=''):\n new_requirements = []\n for requirement in requirements:\n if requirement.split(' ')[0] != name:\n new_requirements.append(requirement)\n elif replace is not None:\n new_requirements.append(replace)\n return new_requirements\n\n\nsys_platform = sys.platform\n\nabout = {}\nwith open(\"lightwood/__about__.py\") as fp:\n exec(fp.read(), about)\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nwith open('requirements.txt') as req_file:\n requirements = [req.strip() for req in req_file.read().splitlines()]\n\nextra_requirements = {}\nfor fn in os.listdir('.'):\n if fn.startswith('requirements_') and fn.endswith('.txt'):\n extra_name = fn.replace('requirements_', '').replace('.txt', '')\n with open(fn) as fp:\n extra = [req.strip() for req in fp.read().splitlines()]\n extra_requirements[extra_name] = extra\nfull_requirements = []\nfor v in extra_requirements.values():\n full_requirements += v\nextra_requirements['all_extras'] = list(set(full_requirements))\n\n# Windows specific requirements\nif sys_platform in ['win32', 'cygwin', 'windows']:\n # These have to be installed manually or via the installers in windows\n requirements = remove_requirements(requirements, 'torch')\n\nsetuptools.setup(\n name=about['__title__'],\n version=about['__version__'],\n url=about['__github__'],\n download_url=about['__pypi__'],\n license=about['__license__'],\n author=about['__author__'],\n author_email=about['__email__'],\n description=about['__description__'],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n package_data={'project': ['requirements.txt']},\n install_requires=requirements,\n extras_require=extra_requirements,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "import sys\nimport setuptools\nimport os\n\n\ndef remove_requirements(requirements, name, replace=''):\n new_requirements = []\n for requirement in requirements:\n if requirement.split(' ')[0] != name:\n new_requirements.append(requirement)\n elif replace is not None:\n new_requirements.append(replace)\n return new_requirements\n\n\nsys_platform = sys.platform\n\nabout = {}\nwith open(\"lightwood/__about__.py\") as fp:\n exec(fp.read(), about)\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nwith open('requirements.txt') as req_file:\n requirements = [req.strip() for req in req_file.read().splitlines()]\n\nextra_requirements = {}\nfor fn in os.listdir('.'):\n if fn.startswith('requirements_') and fn.endswith('.txt'):\n extra_name = fn.replace('requirements_', '').replace('.txt', '')\n with open(fn) as fp:\n extra = [req.strip() for req in fp.read().splitlines()]\n extra_requirements[extra_name] = extra\nfull_requirements = []\nfor v in extra_requirements.values():\n full_requirements += v\nextra_requirements['all_extras'] = list(set(full_requirements))\n\n# Windows specific requirements\nif sys_platform in ['win32', 'cygwin', 'windows']:\n # These have to be installed manually or via the installers in windows\n requirements = remove_requirements(requirements, 'torch')\n\nsetuptools.setup(\n name=about['__title__'],\n version=about['__version__'],\n url=about['__github__'],\n download_url=about['__pypi__'],\n license=about['__license__'],\n author=about['__author__'],\n author_email=about['__email__'],\n description=about['__description__'],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={'project': ['requirements.txt']},\n install_requires=requirements,\n extras_require=extra_requirements,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\"\n)\n", "path": "setup.py"}]}
909
104
gh_patches_debug_18477
rasdani/github-patches
git_diff
saleor__saleor-1416
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Logging does not redirect to ?next= link ### What I'm trying to achieve Currently Saleor has an option to redirect user to particular URL after being asked to log in - which isn't working ATM, beacuse user gets redirected to storefront main page. ### Steps to reproduce the problem 1. Go to auth-protected URL (such as `/dashboard`) 2. Log in ### What I expected to happen To redirect user to requested page. ### What happened instead/how it failed User gets redirected to `/` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/registration/views.py` Content: ``` 1 from __future__ import unicode_literals 2 3 from django.conf import settings 4 from django.contrib import auth, messages 5 from django.contrib.auth import views as django_views 6 from django.contrib.auth.decorators import login_required 7 from django.shortcuts import redirect 8 from django.template.response import TemplateResponse 9 from django.urls import reverse_lazy 10 from django.utils.translation import ugettext_lazy as _ 11 12 from saleor.cart.utils import find_and_assign_anonymous_cart 13 14 from .forms import LoginForm, PasswordSetUpForm, SignupForm 15 16 17 @find_and_assign_anonymous_cart() 18 def login(request): 19 kwargs = { 20 'template_name': 'account/login.html', 'authentication_form': LoginForm} 21 return django_views.LoginView.as_view(**kwargs)(request, **kwargs) 22 23 24 @login_required 25 def logout(request): 26 auth.logout(request) 27 messages.success(request, _('You have been successfully logged out.')) 28 return redirect(settings.LOGIN_REDIRECT_URL) 29 30 31 def signup(request): 32 form = SignupForm(request.POST or None) 33 if form.is_valid(): 34 form.save() 35 password = form.cleaned_data.get('password') 36 email = form.cleaned_data.get('email') 37 user = auth.authenticate(request=request, email=email, 38 password=password) 39 if user: 40 auth.login(request, user) 41 messages.success(request, _('User has been created')) 42 redirect_url = request.POST.get('next', '') 43 if redirect_url: 44 return redirect(redirect_url) 45 else: 46 return redirect(settings.LOGIN_REDIRECT_URL) 47 ctx = {'form': form} 48 return TemplateResponse(request, 'account/signup.html', ctx) 49 50 51 def password_reset(request): 52 kwargs = { 53 'template_name': 'account/password_reset.html', 54 'success_url': reverse_lazy('account_reset_password_done'), 55 'email_template_name': 'account/email/password_reset_message.txt', 56 'subject_template_name': 'account/email/password_reset_subject.txt'} 57 return django_views.PasswordResetView.as_view(**kwargs)(request, **kwargs) 58 59 60 class PasswordResetConfirm(django_views.PasswordResetConfirmView): 61 template_name = 'account/password_reset_from_key.html' 62 success_url = reverse_lazy('account_reset_password_complete') 63 set_password_form = PasswordSetUpForm 64 token = None 65 uidb64 = None 66 67 68 def password_reset_confirm(request, uidb64=None, token=None): 69 kwargs = { 70 'template_name': 'account/password_reset_from_key.html', 71 'success_url': reverse_lazy('account_reset_password_complete'), 72 'set_password_form': 'PasswordSetUpForm', 73 'token': token, 74 'uidb64': uidb64} 75 return PasswordResetConfirm.as_view(**kwargs)( 76 request, **kwargs) 77 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/registration/views.py b/saleor/registration/views.py --- a/saleor/registration/views.py +++ b/saleor/registration/views.py @@ -34,16 +34,13 @@ form.save() password = form.cleaned_data.get('password') email = form.cleaned_data.get('email') - user = auth.authenticate(request=request, email=email, - password=password) + user = auth.authenticate( + request=request, email=email, password=password) if user: auth.login(request, user) messages.success(request, _('User has been created')) - redirect_url = request.POST.get('next', '') - if redirect_url: - return redirect(redirect_url) - else: - return redirect(settings.LOGIN_REDIRECT_URL) + redirect_url = request.POST.get('next', settings.LOGIN_REDIRECT_URL) + return redirect(redirect_url) ctx = {'form': form} return TemplateResponse(request, 'account/signup.html', ctx)
{"golden_diff": "diff --git a/saleor/registration/views.py b/saleor/registration/views.py\n--- a/saleor/registration/views.py\n+++ b/saleor/registration/views.py\n@@ -34,16 +34,13 @@\n form.save()\n password = form.cleaned_data.get('password')\n email = form.cleaned_data.get('email')\n- user = auth.authenticate(request=request, email=email,\n- password=password)\n+ user = auth.authenticate(\n+ request=request, email=email, password=password)\n if user:\n auth.login(request, user)\n messages.success(request, _('User has been created'))\n- redirect_url = request.POST.get('next', '')\n- if redirect_url:\n- return redirect(redirect_url)\n- else:\n- return redirect(settings.LOGIN_REDIRECT_URL)\n+ redirect_url = request.POST.get('next', settings.LOGIN_REDIRECT_URL)\n+ return redirect(redirect_url)\n ctx = {'form': form}\n return TemplateResponse(request, 'account/signup.html', ctx)\n", "issue": "Logging does not redirect to ?next= link\n### What I'm trying to achieve\r\n\r\nCurrently Saleor has an option to redirect user to particular URL after being asked to log in - which isn't working ATM, beacuse user gets redirected to storefront main page.\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Go to auth-protected URL (such as `/dashboard`)\r\n2. Log in\r\n\r\n### What I expected to happen\r\n\r\nTo redirect user to requested page.\r\n\r\n### What happened instead/how it failed\r\n\r\nUser gets redirected to `/`\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.contrib import auth, messages\nfrom django.contrib.auth import views as django_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom saleor.cart.utils import find_and_assign_anonymous_cart\n\nfrom .forms import LoginForm, PasswordSetUpForm, SignupForm\n\n\n@find_and_assign_anonymous_cart()\ndef login(request):\n kwargs = {\n 'template_name': 'account/login.html', 'authentication_form': LoginForm}\n return django_views.LoginView.as_view(**kwargs)(request, **kwargs)\n\n\n@login_required\ndef logout(request):\n auth.logout(request)\n messages.success(request, _('You have been successfully logged out.'))\n return redirect(settings.LOGIN_REDIRECT_URL)\n\n\ndef signup(request):\n form = SignupForm(request.POST or None)\n if form.is_valid():\n form.save()\n password = form.cleaned_data.get('password')\n email = form.cleaned_data.get('email')\n user = auth.authenticate(request=request, email=email,\n password=password)\n if user:\n auth.login(request, user)\n messages.success(request, _('User has been created'))\n redirect_url = request.POST.get('next', '')\n if redirect_url:\n return redirect(redirect_url)\n else:\n return redirect(settings.LOGIN_REDIRECT_URL)\n ctx = {'form': form}\n return TemplateResponse(request, 'account/signup.html', ctx)\n\n\ndef password_reset(request):\n kwargs = {\n 'template_name': 'account/password_reset.html',\n 'success_url': reverse_lazy('account_reset_password_done'),\n 'email_template_name': 'account/email/password_reset_message.txt',\n 'subject_template_name': 'account/email/password_reset_subject.txt'}\n return django_views.PasswordResetView.as_view(**kwargs)(request, **kwargs)\n\n\nclass PasswordResetConfirm(django_views.PasswordResetConfirmView):\n template_name = 'account/password_reset_from_key.html'\n success_url = reverse_lazy('account_reset_password_complete')\n set_password_form = PasswordSetUpForm\n token = None\n uidb64 = None\n\n\ndef password_reset_confirm(request, uidb64=None, token=None):\n kwargs = {\n 'template_name': 'account/password_reset_from_key.html',\n 'success_url': reverse_lazy('account_reset_password_complete'),\n 'set_password_form': 'PasswordSetUpForm',\n 'token': token,\n 'uidb64': uidb64}\n return PasswordResetConfirm.as_view(**kwargs)(\n request, **kwargs)\n", "path": "saleor/registration/views.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.contrib import auth, messages\nfrom django.contrib.auth import views as django_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom saleor.cart.utils import find_and_assign_anonymous_cart\n\nfrom .forms import LoginForm, PasswordSetUpForm, SignupForm\n\n\n@find_and_assign_anonymous_cart()\ndef login(request):\n kwargs = {\n 'template_name': 'account/login.html', 'authentication_form': LoginForm}\n return django_views.LoginView.as_view(**kwargs)(request, **kwargs)\n\n\n@login_required\ndef logout(request):\n auth.logout(request)\n messages.success(request, _('You have been successfully logged out.'))\n return redirect(settings.LOGIN_REDIRECT_URL)\n\n\ndef signup(request):\n form = SignupForm(request.POST or None)\n if form.is_valid():\n form.save()\n password = form.cleaned_data.get('password')\n email = form.cleaned_data.get('email')\n user = auth.authenticate(\n request=request, email=email, password=password)\n if user:\n auth.login(request, user)\n messages.success(request, _('User has been created'))\n redirect_url = request.POST.get('next', settings.LOGIN_REDIRECT_URL)\n return redirect(redirect_url)\n ctx = {'form': form}\n return TemplateResponse(request, 'account/signup.html', ctx)\n\n\ndef password_reset(request):\n kwargs = {\n 'template_name': 'account/password_reset.html',\n 'success_url': reverse_lazy('account_reset_password_done'),\n 'email_template_name': 'account/email/password_reset_message.txt',\n 'subject_template_name': 'account/email/password_reset_subject.txt'}\n return django_views.PasswordResetView.as_view(**kwargs)(request, **kwargs)\n\n\nclass PasswordResetConfirm(django_views.PasswordResetConfirmView):\n template_name = 'account/password_reset_from_key.html'\n success_url = reverse_lazy('account_reset_password_complete')\n set_password_form = PasswordSetUpForm\n token = None\n uidb64 = None\n\n\ndef password_reset_confirm(request, uidb64=None, token=None):\n kwargs = {\n 'template_name': 'account/password_reset_from_key.html',\n 'success_url': reverse_lazy('account_reset_password_complete'),\n 'set_password_form': 'PasswordSetUpForm',\n 'token': token,\n 'uidb64': uidb64}\n return PasswordResetConfirm.as_view(**kwargs)(\n request, **kwargs)\n", "path": "saleor/registration/views.py"}]}
1,072
219
gh_patches_debug_31772
rasdani/github-patches
git_diff
SciTools__cartopy-1162
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove path_to_geos heuristics Ultimately it'd be nicer to remove the heuristic guesswork from path_to_geos by modifying MPL to actually use the CLOSE_POLY vertex code when a path describes a polygon. In the meantime, we can get a chunk of that benefit by removing the heuristics and making GeoAxes.contourf() ensure the resulting paths use CLOSE_POLY. (Originally raised in #63.) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/cartopy/mpl/patch.py` Content: ``` 1 # (C) British Crown Copyright 2011 - 2018, Met Office 2 # 3 # This file is part of cartopy. 4 # 5 # cartopy is free software: you can redistribute it and/or modify it under 6 # the terms of the GNU Lesser General Public License as published by the 7 # Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # cartopy is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU Lesser General Public License for more details. 14 # 15 # You should have received a copy of the GNU Lesser General Public License 16 # along with cartopy. If not, see <https://www.gnu.org/licenses/>. 17 """ 18 Provide shapely geometry <-> matplotlib path support. 19 20 21 See also `Shapely Geometric Objects <see_also_shapely>`_ 22 and `Matplotlib Path API <http://matplotlib.org/api/path_api.html>`_. 23 24 .. see_also_shapely: 25 http://toblerity.org/shapely/manual.html#geometric-objects 26 27 """ 28 29 from __future__ import (absolute_import, division, print_function) 30 31 import numpy as np 32 from matplotlib.path import Path 33 import shapely.geometry as sgeom 34 35 36 def geos_to_path(shape): 37 """ 38 Create a list of :class:`matplotlib.path.Path` objects that describe 39 a shape. 40 41 Parameters 42 ---------- 43 shape 44 A list, tuple or single instance of any of the following 45 types: :class:`shapely.geometry.point.Point`, 46 :class:`shapely.geometry.linestring.LineString`, 47 :class:`shapely.geometry.polygon.Polygon`, 48 :class:`shapely.geometry.multipoint.MultiPoint`, 49 :class:`shapely.geometry.multipolygon.MultiPolygon`, 50 :class:`shapely.geometry.multilinestring.MultiLineString`, 51 :class:`shapely.geometry.collection.GeometryCollection`, 52 or any type with a _as_mpl_path() method. 53 54 Returns 55 ------- 56 paths 57 A list of :class:`matplotlib.path.Path` objects. 58 59 """ 60 if isinstance(shape, (list, tuple)): 61 paths = [] 62 for shp in shape: 63 paths.extend(geos_to_path(shp)) 64 return paths 65 66 if isinstance(shape, (sgeom.LineString, sgeom.Point)): 67 return [Path(np.column_stack(shape.xy))] 68 elif isinstance(shape, sgeom.Polygon): 69 def poly_codes(poly): 70 codes = np.ones(len(poly.xy[0])) * Path.LINETO 71 codes[0] = Path.MOVETO 72 return codes 73 if shape.is_empty: 74 return [] 75 vertices = np.concatenate([np.array(shape.exterior.xy)] + 76 [np.array(ring.xy) for ring in 77 shape.interiors], 1).T 78 codes = np.concatenate([poly_codes(shape.exterior)] + 79 [poly_codes(ring) for ring in shape.interiors]) 80 return [Path(vertices, codes)] 81 elif isinstance(shape, (sgeom.MultiPolygon, sgeom.GeometryCollection, 82 sgeom.MultiLineString, sgeom.MultiPoint)): 83 paths = [] 84 for geom in shape.geoms: 85 paths.extend(geos_to_path(geom)) 86 return paths 87 elif hasattr(shape, '_as_mpl_path'): 88 vertices, codes = shape._as_mpl_path() 89 return [Path(vertices, codes)] 90 else: 91 raise ValueError('Unsupported shape type {}.'.format(type(shape))) 92 93 94 def path_segments(path, **kwargs): 95 """ 96 Create an array of vertices and a corresponding array of codes from a 97 :class:`matplotlib.path.Path`. 98 99 Parameters 100 ---------- 101 path 102 A :class:`matplotlib.path.Path` instance. 103 104 Other Parameters 105 ---------------- 106 kwargs 107 See :func:`matplotlib.path.iter_segments` for details of the keyword 108 arguments. 109 110 Returns 111 ------- 112 vertices, codes 113 A (vertices, codes) tuple, where vertices is a numpy array of 114 coordinates, and codes is a numpy array of matplotlib path codes. 115 See :class:`matplotlib.path.Path` for information on the types of 116 codes and their meanings. 117 118 """ 119 pth = path.cleaned(**kwargs) 120 return pth.vertices[:-1, :], pth.codes[:-1] 121 122 123 def path_to_geos(path, force_ccw=False): 124 """ 125 Create a list of Shapely geometric objects from a 126 :class:`matplotlib.path.Path`. 127 128 Parameters 129 ---------- 130 path 131 A :class:`matplotlib.path.Path` instance. 132 133 Other Parameters 134 ---------------- 135 force_ccw 136 Boolean flag determining whether the path can be inverted to enforce 137 ccw. Defaults to False. 138 139 Returns 140 ------- 141 A list of instances of the following type(s): 142 :class:`shapely.geometry.polygon.Polygon`, 143 :class:`shapely.geometry.linestring.LineString` and/or 144 :class:`shapely.geometry.multilinestring.MultiLineString`. 145 146 """ 147 # Convert path into numpy array of vertices (and associated codes) 148 path_verts, path_codes = path_segments(path, curves=False) 149 150 # Split into subarrays such that each subarray consists of connected 151 # line segments based on the start of each one being marked by a 152 # matplotlib MOVETO code. 153 verts_split_inds = np.where(path_codes == Path.MOVETO)[0] 154 verts_split = np.split(path_verts, verts_split_inds) 155 codes_split = np.split(path_codes, verts_split_inds) 156 157 # Iterate through the vertices generating a list of 158 # (external_geom, [internal_polygons]) tuples. 159 other_result_geoms = [] 160 collection = [] 161 for path_verts, path_codes in zip(verts_split, codes_split): 162 if len(path_verts) == 0: 163 continue 164 165 # XXX A path can be given which does not end with close poly, in that 166 # situation, we have to guess? 167 verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :], 168 axis=1) 169 if all(verts_same_as_first): 170 geom = sgeom.Point(path_verts[0, :]) 171 elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY: 172 geom = sgeom.Polygon(path_verts[:-1, :]) 173 elif path_verts.shape[0] > 3 and verts_same_as_first[-1]: 174 geom = sgeom.Polygon(path_verts) 175 else: 176 geom = sgeom.LineString(path_verts) 177 178 # If geom is a Polygon and is contained within the last geom in 179 # collection, add it to its list of internal polygons, otherwise 180 # simply append it as a new external geom. 181 if geom.is_empty: 182 pass 183 elif (len(collection) > 0 and 184 isinstance(collection[-1][0], sgeom.Polygon) and 185 isinstance(geom, sgeom.Polygon) and 186 collection[-1][0].contains(geom.exterior)): 187 collection[-1][1].append(geom.exterior) 188 elif isinstance(geom, sgeom.Point): 189 other_result_geoms.append(geom) 190 else: 191 collection.append((geom, [])) 192 193 # Convert each (external_geom, [internal_polygons]) pair into a 194 # a shapely Polygon that encapsulates the internal polygons, if the 195 # external geom is a LineString leave it alone. 196 geom_collection = [] 197 for external_geom, internal_polys in collection: 198 if internal_polys: 199 # XXX worry about islands within lakes 200 geom = sgeom.Polygon(external_geom.exterior, internal_polys) 201 else: 202 geom = external_geom 203 204 # Correctly orientate the polygon (ccw) 205 if isinstance(geom, sgeom.Polygon): 206 if force_ccw and not geom.exterior.is_ccw: 207 geom = sgeom.polygon.orient(geom) 208 209 geom_collection.append(geom) 210 211 # If the geom_collection only contains LineStrings combine them 212 # into a single MultiLinestring. 213 if geom_collection and all(isinstance(geom, sgeom.LineString) for 214 geom in geom_collection): 215 geom_collection = [sgeom.MultiLineString(geom_collection)] 216 217 # Remove any zero area Polygons 218 def not_zero_poly(geom): 219 return ((isinstance(geom, sgeom.Polygon) and not geom._is_empty and 220 geom.area != 0) or 221 not isinstance(geom, sgeom.Polygon)) 222 223 result = list(filter(not_zero_poly, geom_collection)) 224 225 return result + other_result_geoms 226 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/cartopy/mpl/patch.py b/lib/cartopy/mpl/patch.py --- a/lib/cartopy/mpl/patch.py +++ b/lib/cartopy/mpl/patch.py @@ -29,6 +29,7 @@ from __future__ import (absolute_import, division, print_function) import numpy as np +import matplotlib from matplotlib.path import Path import shapely.geometry as sgeom @@ -69,6 +70,7 @@ def poly_codes(poly): codes = np.ones(len(poly.xy[0])) * Path.LINETO codes[0] = Path.MOVETO + codes[-1] = Path.CLOSEPOLY return codes if shape.is_empty: return [] @@ -162,15 +164,16 @@ if len(path_verts) == 0: continue - # XXX A path can be given which does not end with close poly, in that - # situation, we have to guess? verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :], axis=1) if all(verts_same_as_first): geom = sgeom.Point(path_verts[0, :]) elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY: geom = sgeom.Polygon(path_verts[:-1, :]) - elif path_verts.shape[0] > 3 and verts_same_as_first[-1]: + elif (matplotlib.__version__ < '2.2.0' and + # XXX A path can be given which does not end with close poly, + # in that situation, we have to guess? + path_verts.shape[0] > 3 and verts_same_as_first[-1]): geom = sgeom.Polygon(path_verts) else: geom = sgeom.LineString(path_verts)
{"golden_diff": "diff --git a/lib/cartopy/mpl/patch.py b/lib/cartopy/mpl/patch.py\n--- a/lib/cartopy/mpl/patch.py\n+++ b/lib/cartopy/mpl/patch.py\n@@ -29,6 +29,7 @@\n from __future__ import (absolute_import, division, print_function)\n \n import numpy as np\n+import matplotlib\n from matplotlib.path import Path\n import shapely.geometry as sgeom\n \n@@ -69,6 +70,7 @@\n def poly_codes(poly):\n codes = np.ones(len(poly.xy[0])) * Path.LINETO\n codes[0] = Path.MOVETO\n+ codes[-1] = Path.CLOSEPOLY\n return codes\n if shape.is_empty:\n return []\n@@ -162,15 +164,16 @@\n if len(path_verts) == 0:\n continue\n \n- # XXX A path can be given which does not end with close poly, in that\n- # situation, we have to guess?\n verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :],\n axis=1)\n if all(verts_same_as_first):\n geom = sgeom.Point(path_verts[0, :])\n elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY:\n geom = sgeom.Polygon(path_verts[:-1, :])\n- elif path_verts.shape[0] > 3 and verts_same_as_first[-1]:\n+ elif (matplotlib.__version__ < '2.2.0' and\n+ # XXX A path can be given which does not end with close poly,\n+ # in that situation, we have to guess?\n+ path_verts.shape[0] > 3 and verts_same_as_first[-1]):\n geom = sgeom.Polygon(path_verts)\n else:\n geom = sgeom.LineString(path_verts)\n", "issue": "Remove path_to_geos heuristics\nUltimately it'd be nicer to remove the heuristic guesswork from path_to_geos by modifying MPL to actually use the CLOSE_POLY vertex code when a path describes a polygon. In the meantime, we can get a chunk of that benefit by removing the heuristics and making GeoAxes.contourf() ensure the resulting paths use CLOSE_POLY.\n\n(Originally raised in #63.)\n\n", "before_files": [{"content": "# (C) British Crown Copyright 2011 - 2018, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"\nProvide shapely geometry <-> matplotlib path support.\n\n\nSee also `Shapely Geometric Objects <see_also_shapely>`_\nand `Matplotlib Path API <http://matplotlib.org/api/path_api.html>`_.\n\n.. see_also_shapely:\n http://toblerity.org/shapely/manual.html#geometric-objects\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nimport numpy as np\nfrom matplotlib.path import Path\nimport shapely.geometry as sgeom\n\n\ndef geos_to_path(shape):\n \"\"\"\n Create a list of :class:`matplotlib.path.Path` objects that describe\n a shape.\n\n Parameters\n ----------\n shape\n A list, tuple or single instance of any of the following\n types: :class:`shapely.geometry.point.Point`,\n :class:`shapely.geometry.linestring.LineString`,\n :class:`shapely.geometry.polygon.Polygon`,\n :class:`shapely.geometry.multipoint.MultiPoint`,\n :class:`shapely.geometry.multipolygon.MultiPolygon`,\n :class:`shapely.geometry.multilinestring.MultiLineString`,\n :class:`shapely.geometry.collection.GeometryCollection`,\n or any type with a _as_mpl_path() method.\n\n Returns\n -------\n paths\n A list of :class:`matplotlib.path.Path` objects.\n\n \"\"\"\n if isinstance(shape, (list, tuple)):\n paths = []\n for shp in shape:\n paths.extend(geos_to_path(shp))\n return paths\n\n if isinstance(shape, (sgeom.LineString, sgeom.Point)):\n return [Path(np.column_stack(shape.xy))]\n elif isinstance(shape, sgeom.Polygon):\n def poly_codes(poly):\n codes = np.ones(len(poly.xy[0])) * Path.LINETO\n codes[0] = Path.MOVETO\n return codes\n if shape.is_empty:\n return []\n vertices = np.concatenate([np.array(shape.exterior.xy)] +\n [np.array(ring.xy) for ring in\n shape.interiors], 1).T\n codes = np.concatenate([poly_codes(shape.exterior)] +\n [poly_codes(ring) for ring in shape.interiors])\n return [Path(vertices, codes)]\n elif isinstance(shape, (sgeom.MultiPolygon, sgeom.GeometryCollection,\n sgeom.MultiLineString, sgeom.MultiPoint)):\n paths = []\n for geom in shape.geoms:\n paths.extend(geos_to_path(geom))\n return paths\n elif hasattr(shape, '_as_mpl_path'):\n vertices, codes = shape._as_mpl_path()\n return [Path(vertices, codes)]\n else:\n raise ValueError('Unsupported shape type {}.'.format(type(shape)))\n\n\ndef path_segments(path, **kwargs):\n \"\"\"\n Create an array of vertices and a corresponding array of codes from a\n :class:`matplotlib.path.Path`.\n\n Parameters\n ----------\n path\n A :class:`matplotlib.path.Path` instance.\n\n Other Parameters\n ----------------\n kwargs\n See :func:`matplotlib.path.iter_segments` for details of the keyword\n arguments.\n\n Returns\n -------\n vertices, codes\n A (vertices, codes) tuple, where vertices is a numpy array of\n coordinates, and codes is a numpy array of matplotlib path codes.\n See :class:`matplotlib.path.Path` for information on the types of\n codes and their meanings.\n\n \"\"\"\n pth = path.cleaned(**kwargs)\n return pth.vertices[:-1, :], pth.codes[:-1]\n\n\ndef path_to_geos(path, force_ccw=False):\n \"\"\"\n Create a list of Shapely geometric objects from a\n :class:`matplotlib.path.Path`.\n\n Parameters\n ----------\n path\n A :class:`matplotlib.path.Path` instance.\n\n Other Parameters\n ----------------\n force_ccw\n Boolean flag determining whether the path can be inverted to enforce\n ccw. Defaults to False.\n\n Returns\n -------\n A list of instances of the following type(s):\n :class:`shapely.geometry.polygon.Polygon`,\n :class:`shapely.geometry.linestring.LineString` and/or\n :class:`shapely.geometry.multilinestring.MultiLineString`.\n\n \"\"\"\n # Convert path into numpy array of vertices (and associated codes)\n path_verts, path_codes = path_segments(path, curves=False)\n\n # Split into subarrays such that each subarray consists of connected\n # line segments based on the start of each one being marked by a\n # matplotlib MOVETO code.\n verts_split_inds = np.where(path_codes == Path.MOVETO)[0]\n verts_split = np.split(path_verts, verts_split_inds)\n codes_split = np.split(path_codes, verts_split_inds)\n\n # Iterate through the vertices generating a list of\n # (external_geom, [internal_polygons]) tuples.\n other_result_geoms = []\n collection = []\n for path_verts, path_codes in zip(verts_split, codes_split):\n if len(path_verts) == 0:\n continue\n\n # XXX A path can be given which does not end with close poly, in that\n # situation, we have to guess?\n verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :],\n axis=1)\n if all(verts_same_as_first):\n geom = sgeom.Point(path_verts[0, :])\n elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY:\n geom = sgeom.Polygon(path_verts[:-1, :])\n elif path_verts.shape[0] > 3 and verts_same_as_first[-1]:\n geom = sgeom.Polygon(path_verts)\n else:\n geom = sgeom.LineString(path_verts)\n\n # If geom is a Polygon and is contained within the last geom in\n # collection, add it to its list of internal polygons, otherwise\n # simply append it as a new external geom.\n if geom.is_empty:\n pass\n elif (len(collection) > 0 and\n isinstance(collection[-1][0], sgeom.Polygon) and\n isinstance(geom, sgeom.Polygon) and\n collection[-1][0].contains(geom.exterior)):\n collection[-1][1].append(geom.exterior)\n elif isinstance(geom, sgeom.Point):\n other_result_geoms.append(geom)\n else:\n collection.append((geom, []))\n\n # Convert each (external_geom, [internal_polygons]) pair into a\n # a shapely Polygon that encapsulates the internal polygons, if the\n # external geom is a LineString leave it alone.\n geom_collection = []\n for external_geom, internal_polys in collection:\n if internal_polys:\n # XXX worry about islands within lakes\n geom = sgeom.Polygon(external_geom.exterior, internal_polys)\n else:\n geom = external_geom\n\n # Correctly orientate the polygon (ccw)\n if isinstance(geom, sgeom.Polygon):\n if force_ccw and not geom.exterior.is_ccw:\n geom = sgeom.polygon.orient(geom)\n\n geom_collection.append(geom)\n\n # If the geom_collection only contains LineStrings combine them\n # into a single MultiLinestring.\n if geom_collection and all(isinstance(geom, sgeom.LineString) for\n geom in geom_collection):\n geom_collection = [sgeom.MultiLineString(geom_collection)]\n\n # Remove any zero area Polygons\n def not_zero_poly(geom):\n return ((isinstance(geom, sgeom.Polygon) and not geom._is_empty and\n geom.area != 0) or\n not isinstance(geom, sgeom.Polygon))\n\n result = list(filter(not_zero_poly, geom_collection))\n\n return result + other_result_geoms\n", "path": "lib/cartopy/mpl/patch.py"}], "after_files": [{"content": "# (C) British Crown Copyright 2011 - 2018, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"\nProvide shapely geometry <-> matplotlib path support.\n\n\nSee also `Shapely Geometric Objects <see_also_shapely>`_\nand `Matplotlib Path API <http://matplotlib.org/api/path_api.html>`_.\n\n.. see_also_shapely:\n http://toblerity.org/shapely/manual.html#geometric-objects\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nimport numpy as np\nimport matplotlib\nfrom matplotlib.path import Path\nimport shapely.geometry as sgeom\n\n\ndef geos_to_path(shape):\n \"\"\"\n Create a list of :class:`matplotlib.path.Path` objects that describe\n a shape.\n\n Parameters\n ----------\n shape\n A list, tuple or single instance of any of the following\n types: :class:`shapely.geometry.point.Point`,\n :class:`shapely.geometry.linestring.LineString`,\n :class:`shapely.geometry.polygon.Polygon`,\n :class:`shapely.geometry.multipoint.MultiPoint`,\n :class:`shapely.geometry.multipolygon.MultiPolygon`,\n :class:`shapely.geometry.multilinestring.MultiLineString`,\n :class:`shapely.geometry.collection.GeometryCollection`,\n or any type with a _as_mpl_path() method.\n\n Returns\n -------\n paths\n A list of :class:`matplotlib.path.Path` objects.\n\n \"\"\"\n if isinstance(shape, (list, tuple)):\n paths = []\n for shp in shape:\n paths.extend(geos_to_path(shp))\n return paths\n\n if isinstance(shape, (sgeom.LineString, sgeom.Point)):\n return [Path(np.column_stack(shape.xy))]\n elif isinstance(shape, sgeom.Polygon):\n def poly_codes(poly):\n codes = np.ones(len(poly.xy[0])) * Path.LINETO\n codes[0] = Path.MOVETO\n codes[-1] = Path.CLOSEPOLY\n return codes\n if shape.is_empty:\n return []\n vertices = np.concatenate([np.array(shape.exterior.xy)] +\n [np.array(ring.xy) for ring in\n shape.interiors], 1).T\n codes = np.concatenate([poly_codes(shape.exterior)] +\n [poly_codes(ring) for ring in shape.interiors])\n return [Path(vertices, codes)]\n elif isinstance(shape, (sgeom.MultiPolygon, sgeom.GeometryCollection,\n sgeom.MultiLineString, sgeom.MultiPoint)):\n paths = []\n for geom in shape.geoms:\n paths.extend(geos_to_path(geom))\n return paths\n elif hasattr(shape, '_as_mpl_path'):\n vertices, codes = shape._as_mpl_path()\n return [Path(vertices, codes)]\n else:\n raise ValueError('Unsupported shape type {}.'.format(type(shape)))\n\n\ndef path_segments(path, **kwargs):\n \"\"\"\n Create an array of vertices and a corresponding array of codes from a\n :class:`matplotlib.path.Path`.\n\n Parameters\n ----------\n path\n A :class:`matplotlib.path.Path` instance.\n\n Other Parameters\n ----------------\n kwargs\n See :func:`matplotlib.path.iter_segments` for details of the keyword\n arguments.\n\n Returns\n -------\n vertices, codes\n A (vertices, codes) tuple, where vertices is a numpy array of\n coordinates, and codes is a numpy array of matplotlib path codes.\n See :class:`matplotlib.path.Path` for information on the types of\n codes and their meanings.\n\n \"\"\"\n pth = path.cleaned(**kwargs)\n return pth.vertices[:-1, :], pth.codes[:-1]\n\n\ndef path_to_geos(path, force_ccw=False):\n \"\"\"\n Create a list of Shapely geometric objects from a\n :class:`matplotlib.path.Path`.\n\n Parameters\n ----------\n path\n A :class:`matplotlib.path.Path` instance.\n\n Other Parameters\n ----------------\n force_ccw\n Boolean flag determining whether the path can be inverted to enforce\n ccw. Defaults to False.\n\n Returns\n -------\n A list of instances of the following type(s):\n :class:`shapely.geometry.polygon.Polygon`,\n :class:`shapely.geometry.linestring.LineString` and/or\n :class:`shapely.geometry.multilinestring.MultiLineString`.\n\n \"\"\"\n # Convert path into numpy array of vertices (and associated codes)\n path_verts, path_codes = path_segments(path, curves=False)\n\n # Split into subarrays such that each subarray consists of connected\n # line segments based on the start of each one being marked by a\n # matplotlib MOVETO code.\n verts_split_inds = np.where(path_codes == Path.MOVETO)[0]\n verts_split = np.split(path_verts, verts_split_inds)\n codes_split = np.split(path_codes, verts_split_inds)\n\n # Iterate through the vertices generating a list of\n # (external_geom, [internal_polygons]) tuples.\n other_result_geoms = []\n collection = []\n for path_verts, path_codes in zip(verts_split, codes_split):\n if len(path_verts) == 0:\n continue\n\n verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :],\n axis=1)\n if all(verts_same_as_first):\n geom = sgeom.Point(path_verts[0, :])\n elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY:\n geom = sgeom.Polygon(path_verts[:-1, :])\n elif (matplotlib.__version__ < '2.2.0' and\n # XXX A path can be given which does not end with close poly,\n # in that situation, we have to guess?\n path_verts.shape[0] > 3 and verts_same_as_first[-1]):\n geom = sgeom.Polygon(path_verts)\n else:\n geom = sgeom.LineString(path_verts)\n\n # If geom is a Polygon and is contained within the last geom in\n # collection, add it to its list of internal polygons, otherwise\n # simply append it as a new external geom.\n if geom.is_empty:\n pass\n elif (len(collection) > 0 and\n isinstance(collection[-1][0], sgeom.Polygon) and\n isinstance(geom, sgeom.Polygon) and\n collection[-1][0].contains(geom.exterior)):\n collection[-1][1].append(geom.exterior)\n elif isinstance(geom, sgeom.Point):\n other_result_geoms.append(geom)\n else:\n collection.append((geom, []))\n\n # Convert each (external_geom, [internal_polygons]) pair into a\n # a shapely Polygon that encapsulates the internal polygons, if the\n # external geom is a LineString leave it alone.\n geom_collection = []\n for external_geom, internal_polys in collection:\n if internal_polys:\n # XXX worry about islands within lakes\n geom = sgeom.Polygon(external_geom.exterior, internal_polys)\n else:\n geom = external_geom\n\n # Correctly orientate the polygon (ccw)\n if isinstance(geom, sgeom.Polygon):\n if force_ccw and not geom.exterior.is_ccw:\n geom = sgeom.polygon.orient(geom)\n\n geom_collection.append(geom)\n\n # If the geom_collection only contains LineStrings combine them\n # into a single MultiLinestring.\n if geom_collection and all(isinstance(geom, sgeom.LineString) for\n geom in geom_collection):\n geom_collection = [sgeom.MultiLineString(geom_collection)]\n\n # Remove any zero area Polygons\n def not_zero_poly(geom):\n return ((isinstance(geom, sgeom.Polygon) and not geom._is_empty and\n geom.area != 0) or\n not isinstance(geom, sgeom.Polygon))\n\n result = list(filter(not_zero_poly, geom_collection))\n\n return result + other_result_geoms\n", "path": "lib/cartopy/mpl/patch.py"}]}
2,806
422
gh_patches_debug_48346
rasdani/github-patches
git_diff
interlegis__sapl-3164
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Não permitir que se altere campos rotulo_prefixo_texto e rotulo_sufixo_texto via interface admin <!--- Forneça um resumo geral da _issue_ no título acima --> ## Comportamento Esperado <!--- Se você está descrevendo um _bug_, conte-nos o que deveria acontecer. --> <!--- Se você está sugerindo uma mudança/melhoria, conte-nos como deve funcionar. --> ## Comportamento Atual <!--- Se está descrevendo um bug, conte-nos o que acontece em vez do comportamento esperado. --> <!--- Se está sugerindo uma mudança/melhoria, explique a diferença com o comportamento atual. --> ## Possível Solução <!--- Não é obrigatório, mas sugira uma possível correção/razão para o bug --> <!--- ou ideias de como implementar a adição/mudança. --> ## Passos para Reproduzir (para bugs) <!--- Forneça um link para um exemplo, ou um conjunto de passos inequívocos --> <!--- para reproduzir esse bug. Inclua código para reproduzir, se relevante. --> 1. 2. 3. 4. ## Contexto <!--- Como esse problema o afeta? O que você está tentando realizar? --> <!--- Fornecer o contexto nos ajuda a encontrar uma solução que seja mais útil no mundo real --> ## Imagens do Ocorrido <!--- Representação visual em vídeo ou imagem do ocorrido --> <!--- Se está descrevendo um bug poste imagens ou vídeos na reprodução do bug citado, caso se aplique --> ## Seu Ambiente <!--- Inclua detalhes relevantes sobre o ambiente em que você presenciou/experienciou o bug. --> * Versão usada (_Release_): * Nome e versão do navegador: * Nome e versão do Sistema Operacional (desktop ou mobile): * Link para o seu projeto (Caso de fork deste projeto): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sapl/compilacao/admin.py` Content: ``` 1 from sapl.utils import register_all_models_in_admin 2 3 register_all_models_in_admin(__name__) 4 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sapl/compilacao/admin.py b/sapl/compilacao/admin.py --- a/sapl/compilacao/admin.py +++ b/sapl/compilacao/admin.py @@ -1,3 +1,12 @@ +from django.contrib import admin +from sapl.compilacao.models import TipoDispositivo from sapl.utils import register_all_models_in_admin register_all_models_in_admin(__name__) +admin.site.unregister(TipoDispositivo) + + [email protected](TipoDispositivo) +class TipoDispositivoAdmin(admin.ModelAdmin): + readonly_fields = ("rotulo_prefixo_texto", "rotulo_sufixo_texto",) + list_display = [f.name for f in TipoDispositivo._meta.fields if f.name != 'id']
{"golden_diff": "diff --git a/sapl/compilacao/admin.py b/sapl/compilacao/admin.py\n--- a/sapl/compilacao/admin.py\n+++ b/sapl/compilacao/admin.py\n@@ -1,3 +1,12 @@\n+from django.contrib import admin\n+from sapl.compilacao.models import TipoDispositivo\n from sapl.utils import register_all_models_in_admin\n \n register_all_models_in_admin(__name__)\n+admin.site.unregister(TipoDispositivo)\n+\n+\[email protected](TipoDispositivo)\n+class TipoDispositivoAdmin(admin.ModelAdmin):\n+ readonly_fields = (\"rotulo_prefixo_texto\", \"rotulo_sufixo_texto\",)\n+ list_display = [f.name for f in TipoDispositivo._meta.fields if f.name != 'id']\n", "issue": "N\u00e3o permitir que se altere campos rotulo_prefixo_texto e rotulo_sufixo_texto via interface admin\n<!--- Forne\u00e7a um resumo geral da _issue_ no t\u00edtulo acima -->\r\n\r\n## Comportamento Esperado\r\n<!--- Se voc\u00ea est\u00e1 descrevendo um _bug_, conte-nos o que deveria acontecer. -->\r\n<!--- Se voc\u00ea est\u00e1 sugerindo uma mudan\u00e7a/melhoria, conte-nos como deve funcionar. -->\r\n\r\n## Comportamento Atual\r\n<!--- Se est\u00e1 descrevendo um bug, conte-nos o que acontece em vez do comportamento esperado. -->\r\n<!--- Se est\u00e1 sugerindo uma mudan\u00e7a/melhoria, explique a diferen\u00e7a com o comportamento atual. -->\r\n\r\n## Poss\u00edvel Solu\u00e7\u00e3o\r\n<!--- N\u00e3o \u00e9 obrigat\u00f3rio, mas sugira uma poss\u00edvel corre\u00e7\u00e3o/raz\u00e3o para o bug -->\r\n<!--- ou ideias de como implementar a adi\u00e7\u00e3o/mudan\u00e7a. -->\r\n\r\n## Passos para Reproduzir (para bugs)\r\n<!--- Forne\u00e7a um link para um exemplo, ou um conjunto de passos inequ\u00edvocos -->\r\n<!--- para reproduzir esse bug. Inclua c\u00f3digo para reproduzir, se relevante. -->\r\n1.\r\n2.\r\n3.\r\n4.\r\n\r\n## Contexto\r\n<!--- Como esse problema o afeta? O que voc\u00ea est\u00e1 tentando realizar? -->\r\n<!--- Fornecer o contexto nos ajuda a encontrar uma solu\u00e7\u00e3o que seja mais \u00fatil no mundo real -->\r\n\r\n## Imagens do Ocorrido\r\n<!--- Representa\u00e7\u00e3o visual em v\u00eddeo ou imagem do ocorrido -->\r\n<!--- Se est\u00e1 descrevendo um bug poste imagens ou v\u00eddeos na reprodu\u00e7\u00e3o do bug citado, caso se aplique -->\r\n\r\n## Seu Ambiente\r\n<!--- Inclua detalhes relevantes sobre o ambiente em que voc\u00ea presenciou/experienciou o bug. -->\r\n* Vers\u00e3o usada (_Release_):\r\n* Nome e vers\u00e3o do navegador:\r\n* Nome e vers\u00e3o do Sistema Operacional (desktop ou mobile):\r\n* Link para o seu projeto (Caso de fork deste projeto):\r\n\n", "before_files": [{"content": "from sapl.utils import register_all_models_in_admin\n\nregister_all_models_in_admin(__name__)\n", "path": "sapl/compilacao/admin.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom sapl.compilacao.models import TipoDispositivo\nfrom sapl.utils import register_all_models_in_admin\n\nregister_all_models_in_admin(__name__)\nadmin.site.unregister(TipoDispositivo)\n\n\[email protected](TipoDispositivo)\nclass TipoDispositivoAdmin(admin.ModelAdmin):\n readonly_fields = (\"rotulo_prefixo_texto\", \"rotulo_sufixo_texto\",)\n list_display = [f.name for f in TipoDispositivo._meta.fields if f.name != 'id']\n", "path": "sapl/compilacao/admin.py"}]}
721
175
gh_patches_debug_35512
rasdani/github-patches
git_diff
pyro-ppl__pyro-1070
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pyro.poutine module is absent from sphinx docs For example, there is no documentation for `pyro.poutine.block()` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyro/poutine/__init__.py` Content: ``` 1 from __future__ import absolute_import, division, print_function 2 3 import functools 4 from six.moves import xrange 5 6 from pyro.poutine import util 7 8 from .block_messenger import BlockMessenger 9 from .condition_messenger import ConditionMessenger 10 from .enumerate_messenger import EnumerateMessenger # noqa: F401 11 from .escape_messenger import EscapeMessenger 12 from .indep_messenger import IndepMessenger # noqa: F401 13 from .infer_config_messenger import InferConfigMessenger 14 from .lift_messenger import LiftMessenger 15 from .messenger import _PYRO_STACK, Messenger # noqa: F401 16 from .replay_messenger import ReplayMessenger 17 from .scale_messenger import ScaleMessenger 18 from .trace import Trace # noqa: F401 19 from .trace_messenger import TraceMessenger 20 21 ############################################ 22 # Begin primitive operations 23 ############################################ 24 25 26 def trace(fn=None, graph_type=None, param_only=None): 27 """ 28 :param fn: a stochastic function (callable containing pyro primitive calls) 29 :param graph_type: string that specifies the kind of graph to construct 30 :param param_only: if true, only records params and not samples 31 :returns: stochastic function wrapped in a TraceHandler 32 :rtype: pyro.poutine.TraceHandler 33 34 Alias for TraceHandler constructor. 35 36 Given a callable that contains Pyro primitive calls, return a TraceHandler callable 37 that records the inputs and outputs to those primitive calls 38 and their dependencies. 39 40 Adds trace data structure site constructors to primitive stacks 41 """ 42 msngr = TraceMessenger(graph_type=graph_type, param_only=param_only) 43 return msngr(fn) if fn is not None else msngr 44 45 46 def replay(fn=None, trace=None, sites=None): 47 """ 48 :param fn: a stochastic function (callable containing pyro primitive calls) 49 :param trace: a Trace data structure to replay against 50 :param sites: list or dict of names of sample sites in fn to replay against, 51 defaulting to all sites 52 :returns: stochastic function wrapped in a ReplayHandler 53 :rtype: pyro.poutine.ReplayHandler 54 55 Alias for ReplayHandler constructor. 56 57 Given a callable that contains Pyro primitive calls, 58 return a callable that runs the original, reusing the values at sites in trace 59 at those sites in the new trace 60 """ 61 msngr = ReplayMessenger(trace=trace, sites=sites) 62 return msngr(fn) if fn is not None else msngr 63 64 65 def lift(fn=None, prior=None): 66 """ 67 :param fn: function whose parameters will be lifted to random values 68 :param prior: prior function in the form of a Distribution or a dict of stochastic fns 69 :returns: stochastic function wrapped in LiftHandler 70 71 Given a stochastic function with param calls and a prior distribution, 72 create a stochastic function where all param calls are replaced by sampling from prior. 73 Prior should be a callable or a dict of names to callables. 74 """ 75 msngr = LiftMessenger(prior=prior) 76 return msngr(fn) if fn is not None else msngr 77 78 79 def block(fn=None, hide=None, expose=None, hide_types=None, expose_types=None): 80 """ 81 :param fn: a stochastic function (callable containing pyro primitive calls) 82 :param hide: list of site names to hide 83 :param expose: list of site names to be exposed while all others hidden 84 :param hide_types: list of site types to be hidden 85 :param expose_types: list of site types to be exposed while all others hidden 86 :returns: stochastic function wrapped in a BlockHandler 87 :rtype: pyro.poutine.BlockHandler 88 89 Alias for BlockHandler constructor. 90 91 Given a callable that contains Pyro primitive calls, 92 selectively hide some of those calls from poutines higher up the stack 93 """ 94 msngr = BlockMessenger(hide=hide, expose=expose, 95 hide_types=hide_types, expose_types=expose_types) 96 return msngr(fn) if fn is not None else msngr 97 98 99 def escape(fn=None, escape_fn=None): 100 """ 101 :param fn: a stochastic function (callable containing pyro primitive calls) 102 :param escape_fn: function that takes a partial trace and a site 103 and returns a boolean value to decide whether to exit at that site 104 :returns: stochastic function wrapped in EscapeHandler 105 106 Alias for EscapeHandler constructor. 107 108 Given a callable that contains Pyro primitive calls, 109 evaluate escape_fn on each site, and if the result is True, 110 raise a NonlocalExit exception that stops execution 111 and returns the offending site. 112 """ 113 msngr = EscapeMessenger(escape_fn) 114 return msngr(fn) if fn is not None else msngr 115 116 117 def condition(fn=None, data=None): 118 """ 119 :param fn: a stochastic function (callable containing pyro primitive calls) 120 :param data: a dict or a Trace 121 :returns: stochastic function wrapped in a ConditionHandler 122 :rtype: pyro.poutine.ConditionHandler 123 124 Alias for ConditionHandler constructor. 125 126 Given a stochastic function with some sample statements 127 and a dictionary of observations at names, 128 change the sample statements at those names into observes 129 with those values 130 """ 131 msngr = ConditionMessenger(data=data) 132 return msngr(fn) if fn is not None else msngr 133 134 135 def infer_config(fn=None, config_fn=None): 136 """ 137 :param fn: a stochastic function (callable containing pyro primitive calls) 138 :param config_fn: a callable taking a site and returning an infer dict 139 140 Alias for :class:`~pyro.poutine.infer_config_messenger.InferConfigHandler` constructor. 141 142 Given a callable that contains Pyro primitive calls 143 and a callable taking a trace site and returning a dictionary, 144 updates the value of the infer kwarg at a sample site to config_fn(site) 145 """ 146 msngr = InferConfigMessenger(config_fn) 147 return msngr(fn) if fn is not None else msngr 148 149 150 def scale(fn=None, scale=None): 151 """ 152 :param scale: a positive scaling factor 153 :rtype: pyro.poutine.ScaleMessenger 154 155 Alias for ScaleMessenger constructor. 156 157 Given a stochastic function with some sample statements and a positive 158 scale factor, scale the score of all sample and observe sites in the 159 function. 160 """ 161 msngr = ScaleMessenger(scale=scale) 162 # XXX temporary compatibility fix 163 return msngr(fn) if callable(fn) else msngr 164 165 166 def indep(fn=None, name=None, size=None, dim=None): 167 """ 168 Alias for IndepMessenger constructor. 169 170 This messenger keeps track of stack of independence information declared by 171 nested ``irange`` and ``iarange`` contexts. This information is stored in 172 a ``cond_indep_stack`` at each sample/observe site for consumption by 173 ``TraceMessenger``. 174 """ 175 msngr = IndepMessenger(name=name, size=size, dim=dim) 176 return msngr(fn) if fn is not None else msngr 177 178 179 def enum(fn=None, first_available_dim=None): 180 """ 181 :param int first_available_dim: The first tensor dimension (counting 182 from the right) that is available for parallel enumeration. This 183 dimension and all dimensions left may be used internally by Pyro. 184 185 Alias for EnumerateMessenger constructor. 186 187 Enumerates in parallel over discrete sample sites marked 188 ``infer={"enumerate": "parallel"}``. 189 """ 190 msngr = EnumerateMessenger(first_available_dim=first_available_dim) 191 return msngr(fn) if fn is not None else msngr 192 193 194 ######################################### 195 # Begin composite operations 196 ######################################### 197 198 def do(fn=None, data=None): 199 """ 200 :param fn: a stochastic function (callable containing pyro primitive calls) 201 :param data: a dict or a Trace 202 :returns: stochastic function wrapped in a BlockHandler and ConditionHandler 203 :rtype: pyro.poutine.BlockHandler 204 205 Given a stochastic function with some sample statements 206 and a dictionary of values at names, 207 set the return values of those sites equal to the values 208 and hide them from the rest of the stack 209 as if they were hard-coded to those values 210 by using BlockHandler 211 """ 212 def wrapper(wrapped): 213 return block(condition(wrapped, data=data), hide=list(data.keys())) 214 return wrapper(fn) if fn is not None else wrapper 215 216 217 def queue(fn=None, queue=None, max_tries=None, 218 extend_fn=None, escape_fn=None, num_samples=None): 219 """ 220 :param fn: a stochastic function (callable containing pyro primitive calls) 221 :param queue: a queue data structure like multiprocessing.Queue to hold partial traces 222 :param max_tries: maximum number of attempts to compute a single complete trace 223 :param extend_fn: function (possibly stochastic) that takes a partial trace and a site 224 and returns a list of extended traces 225 :param escape_fn: function (possibly stochastic) that takes a partial trace and a site 226 and returns a boolean value to decide whether to exit 227 :param num_samples: optional number of extended traces for extend_fn to return 228 :returns: stochastic function wrapped in poutine logic 229 230 Given a stochastic function and a queue, 231 return a return value from a complete trace in the queue 232 """ 233 234 if max_tries is None: 235 max_tries = int(1e6) 236 237 if extend_fn is None: 238 extend_fn = util.enum_extend 239 240 if escape_fn is None: 241 escape_fn = util.discrete_escape 242 243 if num_samples is None: 244 num_samples = -1 245 246 def wrapper(wrapped): 247 def _fn(*args, **kwargs): 248 249 for i in xrange(max_tries): 250 assert not queue.empty(), \ 251 "trying to get() from an empty queue will deadlock" 252 253 next_trace = queue.get() 254 try: 255 ftr = trace(escape(replay(wrapped, trace=next_trace), 256 escape_fn=functools.partial(escape_fn, 257 next_trace))) 258 return ftr(*args, **kwargs) 259 except util.NonlocalExit as site_container: 260 site_container.reset_stack() 261 for tr in extend_fn(ftr.trace.copy(), site_container.site, 262 num_samples=num_samples): 263 queue.put(tr) 264 265 raise ValueError("max tries ({}) exceeded".format(str(max_tries))) 266 return _fn 267 268 return wrapper(fn) if fn is not None else wrapper 269 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyro/poutine/__init__.py b/pyro/poutine/__init__.py --- a/pyro/poutine/__init__.py +++ b/pyro/poutine/__init__.py @@ -48,7 +48,7 @@ :param fn: a stochastic function (callable containing pyro primitive calls) :param trace: a Trace data structure to replay against :param sites: list or dict of names of sample sites in fn to replay against, - defaulting to all sites + defaulting to all sites :returns: stochastic function wrapped in a ReplayHandler :rtype: pyro.poutine.ReplayHandler @@ -99,8 +99,8 @@ def escape(fn=None, escape_fn=None): """ :param fn: a stochastic function (callable containing pyro primitive calls) - :param escape_fn: function that takes a partial trace and a site - and returns a boolean value to decide whether to exit at that site + :param escape_fn: function that takes a partial trace and a site, + and returns a boolean value to decide whether to exit at that site :returns: stochastic function wrapped in EscapeHandler Alias for EscapeHandler constructor. @@ -220,10 +220,10 @@ :param fn: a stochastic function (callable containing pyro primitive calls) :param queue: a queue data structure like multiprocessing.Queue to hold partial traces :param max_tries: maximum number of attempts to compute a single complete trace - :param extend_fn: function (possibly stochastic) that takes a partial trace and a site - and returns a list of extended traces - :param escape_fn: function (possibly stochastic) that takes a partial trace and a site - and returns a boolean value to decide whether to exit + :param extend_fn: function (possibly stochastic) that takes a partial trace and a site, + and returns a list of extended traces + :param escape_fn: function (possibly stochastic) that takes a partial trace and a site, + and returns a boolean value to decide whether to exit :param num_samples: optional number of extended traces for extend_fn to return :returns: stochastic function wrapped in poutine logic
{"golden_diff": "diff --git a/pyro/poutine/__init__.py b/pyro/poutine/__init__.py\n--- a/pyro/poutine/__init__.py\n+++ b/pyro/poutine/__init__.py\n@@ -48,7 +48,7 @@\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param trace: a Trace data structure to replay against\n :param sites: list or dict of names of sample sites in fn to replay against,\n- defaulting to all sites\n+ defaulting to all sites\n :returns: stochastic function wrapped in a ReplayHandler\n :rtype: pyro.poutine.ReplayHandler\n \n@@ -99,8 +99,8 @@\n def escape(fn=None, escape_fn=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n- :param escape_fn: function that takes a partial trace and a site\n- and returns a boolean value to decide whether to exit at that site\n+ :param escape_fn: function that takes a partial trace and a site,\n+ and returns a boolean value to decide whether to exit at that site\n :returns: stochastic function wrapped in EscapeHandler\n \n Alias for EscapeHandler constructor.\n@@ -220,10 +220,10 @@\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param queue: a queue data structure like multiprocessing.Queue to hold partial traces\n :param max_tries: maximum number of attempts to compute a single complete trace\n- :param extend_fn: function (possibly stochastic) that takes a partial trace and a site\n- and returns a list of extended traces\n- :param escape_fn: function (possibly stochastic) that takes a partial trace and a site\n- and returns a boolean value to decide whether to exit\n+ :param extend_fn: function (possibly stochastic) that takes a partial trace and a site,\n+ and returns a list of extended traces\n+ :param escape_fn: function (possibly stochastic) that takes a partial trace and a site,\n+ and returns a boolean value to decide whether to exit\n :param num_samples: optional number of extended traces for extend_fn to return\n :returns: stochastic function wrapped in poutine logic\n", "issue": "pyro.poutine module is absent from sphinx docs\nFor example, there is no documentation for `pyro.poutine.block()`\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport functools\nfrom six.moves import xrange\n\nfrom pyro.poutine import util\n\nfrom .block_messenger import BlockMessenger\nfrom .condition_messenger import ConditionMessenger\nfrom .enumerate_messenger import EnumerateMessenger # noqa: F401\nfrom .escape_messenger import EscapeMessenger\nfrom .indep_messenger import IndepMessenger # noqa: F401\nfrom .infer_config_messenger import InferConfigMessenger\nfrom .lift_messenger import LiftMessenger\nfrom .messenger import _PYRO_STACK, Messenger # noqa: F401\nfrom .replay_messenger import ReplayMessenger\nfrom .scale_messenger import ScaleMessenger\nfrom .trace import Trace # noqa: F401\nfrom .trace_messenger import TraceMessenger\n\n############################################\n# Begin primitive operations\n############################################\n\n\ndef trace(fn=None, graph_type=None, param_only=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param graph_type: string that specifies the kind of graph to construct\n :param param_only: if true, only records params and not samples\n :returns: stochastic function wrapped in a TraceHandler\n :rtype: pyro.poutine.TraceHandler\n\n Alias for TraceHandler constructor.\n\n Given a callable that contains Pyro primitive calls, return a TraceHandler callable\n that records the inputs and outputs to those primitive calls\n and their dependencies.\n\n Adds trace data structure site constructors to primitive stacks\n \"\"\"\n msngr = TraceMessenger(graph_type=graph_type, param_only=param_only)\n return msngr(fn) if fn is not None else msngr\n\n\ndef replay(fn=None, trace=None, sites=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param trace: a Trace data structure to replay against\n :param sites: list or dict of names of sample sites in fn to replay against,\n defaulting to all sites\n :returns: stochastic function wrapped in a ReplayHandler\n :rtype: pyro.poutine.ReplayHandler\n\n Alias for ReplayHandler constructor.\n\n Given a callable that contains Pyro primitive calls,\n return a callable that runs the original, reusing the values at sites in trace\n at those sites in the new trace\n \"\"\"\n msngr = ReplayMessenger(trace=trace, sites=sites)\n return msngr(fn) if fn is not None else msngr\n\n\ndef lift(fn=None, prior=None):\n \"\"\"\n :param fn: function whose parameters will be lifted to random values\n :param prior: prior function in the form of a Distribution or a dict of stochastic fns\n :returns: stochastic function wrapped in LiftHandler\n\n Given a stochastic function with param calls and a prior distribution,\n create a stochastic function where all param calls are replaced by sampling from prior.\n Prior should be a callable or a dict of names to callables.\n \"\"\"\n msngr = LiftMessenger(prior=prior)\n return msngr(fn) if fn is not None else msngr\n\n\ndef block(fn=None, hide=None, expose=None, hide_types=None, expose_types=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param hide: list of site names to hide\n :param expose: list of site names to be exposed while all others hidden\n :param hide_types: list of site types to be hidden\n :param expose_types: list of site types to be exposed while all others hidden\n :returns: stochastic function wrapped in a BlockHandler\n :rtype: pyro.poutine.BlockHandler\n\n Alias for BlockHandler constructor.\n\n Given a callable that contains Pyro primitive calls,\n selectively hide some of those calls from poutines higher up the stack\n \"\"\"\n msngr = BlockMessenger(hide=hide, expose=expose,\n hide_types=hide_types, expose_types=expose_types)\n return msngr(fn) if fn is not None else msngr\n\n\ndef escape(fn=None, escape_fn=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param escape_fn: function that takes a partial trace and a site\n and returns a boolean value to decide whether to exit at that site\n :returns: stochastic function wrapped in EscapeHandler\n\n Alias for EscapeHandler constructor.\n\n Given a callable that contains Pyro primitive calls,\n evaluate escape_fn on each site, and if the result is True,\n raise a NonlocalExit exception that stops execution\n and returns the offending site.\n \"\"\"\n msngr = EscapeMessenger(escape_fn)\n return msngr(fn) if fn is not None else msngr\n\n\ndef condition(fn=None, data=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param data: a dict or a Trace\n :returns: stochastic function wrapped in a ConditionHandler\n :rtype: pyro.poutine.ConditionHandler\n\n Alias for ConditionHandler constructor.\n\n Given a stochastic function with some sample statements\n and a dictionary of observations at names,\n change the sample statements at those names into observes\n with those values\n \"\"\"\n msngr = ConditionMessenger(data=data)\n return msngr(fn) if fn is not None else msngr\n\n\ndef infer_config(fn=None, config_fn=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param config_fn: a callable taking a site and returning an infer dict\n\n Alias for :class:`~pyro.poutine.infer_config_messenger.InferConfigHandler` constructor.\n\n Given a callable that contains Pyro primitive calls\n and a callable taking a trace site and returning a dictionary,\n updates the value of the infer kwarg at a sample site to config_fn(site)\n \"\"\"\n msngr = InferConfigMessenger(config_fn)\n return msngr(fn) if fn is not None else msngr\n\n\ndef scale(fn=None, scale=None):\n \"\"\"\n :param scale: a positive scaling factor\n :rtype: pyro.poutine.ScaleMessenger\n\n Alias for ScaleMessenger constructor.\n\n Given a stochastic function with some sample statements and a positive\n scale factor, scale the score of all sample and observe sites in the\n function.\n \"\"\"\n msngr = ScaleMessenger(scale=scale)\n # XXX temporary compatibility fix\n return msngr(fn) if callable(fn) else msngr\n\n\ndef indep(fn=None, name=None, size=None, dim=None):\n \"\"\"\n Alias for IndepMessenger constructor.\n\n This messenger keeps track of stack of independence information declared by\n nested ``irange`` and ``iarange`` contexts. This information is stored in\n a ``cond_indep_stack`` at each sample/observe site for consumption by\n ``TraceMessenger``.\n \"\"\"\n msngr = IndepMessenger(name=name, size=size, dim=dim)\n return msngr(fn) if fn is not None else msngr\n\n\ndef enum(fn=None, first_available_dim=None):\n \"\"\"\n :param int first_available_dim: The first tensor dimension (counting\n from the right) that is available for parallel enumeration. This\n dimension and all dimensions left may be used internally by Pyro.\n\n Alias for EnumerateMessenger constructor.\n\n Enumerates in parallel over discrete sample sites marked\n ``infer={\"enumerate\": \"parallel\"}``.\n \"\"\"\n msngr = EnumerateMessenger(first_available_dim=first_available_dim)\n return msngr(fn) if fn is not None else msngr\n\n\n#########################################\n# Begin composite operations\n#########################################\n\ndef do(fn=None, data=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param data: a dict or a Trace\n :returns: stochastic function wrapped in a BlockHandler and ConditionHandler\n :rtype: pyro.poutine.BlockHandler\n\n Given a stochastic function with some sample statements\n and a dictionary of values at names,\n set the return values of those sites equal to the values\n and hide them from the rest of the stack\n as if they were hard-coded to those values\n by using BlockHandler\n \"\"\"\n def wrapper(wrapped):\n return block(condition(wrapped, data=data), hide=list(data.keys()))\n return wrapper(fn) if fn is not None else wrapper\n\n\ndef queue(fn=None, queue=None, max_tries=None,\n extend_fn=None, escape_fn=None, num_samples=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param queue: a queue data structure like multiprocessing.Queue to hold partial traces\n :param max_tries: maximum number of attempts to compute a single complete trace\n :param extend_fn: function (possibly stochastic) that takes a partial trace and a site\n and returns a list of extended traces\n :param escape_fn: function (possibly stochastic) that takes a partial trace and a site\n and returns a boolean value to decide whether to exit\n :param num_samples: optional number of extended traces for extend_fn to return\n :returns: stochastic function wrapped in poutine logic\n\n Given a stochastic function and a queue,\n return a return value from a complete trace in the queue\n \"\"\"\n\n if max_tries is None:\n max_tries = int(1e6)\n\n if extend_fn is None:\n extend_fn = util.enum_extend\n\n if escape_fn is None:\n escape_fn = util.discrete_escape\n\n if num_samples is None:\n num_samples = -1\n\n def wrapper(wrapped):\n def _fn(*args, **kwargs):\n\n for i in xrange(max_tries):\n assert not queue.empty(), \\\n \"trying to get() from an empty queue will deadlock\"\n\n next_trace = queue.get()\n try:\n ftr = trace(escape(replay(wrapped, trace=next_trace),\n escape_fn=functools.partial(escape_fn,\n next_trace)))\n return ftr(*args, **kwargs)\n except util.NonlocalExit as site_container:\n site_container.reset_stack()\n for tr in extend_fn(ftr.trace.copy(), site_container.site,\n num_samples=num_samples):\n queue.put(tr)\n\n raise ValueError(\"max tries ({}) exceeded\".format(str(max_tries)))\n return _fn\n\n return wrapper(fn) if fn is not None else wrapper\n", "path": "pyro/poutine/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport functools\nfrom six.moves import xrange\n\nfrom pyro.poutine import util\n\nfrom .block_messenger import BlockMessenger\nfrom .condition_messenger import ConditionMessenger\nfrom .enumerate_messenger import EnumerateMessenger # noqa: F401\nfrom .escape_messenger import EscapeMessenger\nfrom .indep_messenger import IndepMessenger # noqa: F401\nfrom .infer_config_messenger import InferConfigMessenger\nfrom .lift_messenger import LiftMessenger\nfrom .messenger import _PYRO_STACK, Messenger # noqa: F401\nfrom .replay_messenger import ReplayMessenger\nfrom .scale_messenger import ScaleMessenger\nfrom .trace import Trace # noqa: F401\nfrom .trace_messenger import TraceMessenger\n\n############################################\n# Begin primitive operations\n############################################\n\n\ndef trace(fn=None, graph_type=None, param_only=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param graph_type: string that specifies the kind of graph to construct\n :param param_only: if true, only records params and not samples\n :returns: stochastic function wrapped in a TraceHandler\n :rtype: pyro.poutine.TraceHandler\n\n Alias for TraceHandler constructor.\n\n Given a callable that contains Pyro primitive calls, return a TraceHandler callable\n that records the inputs and outputs to those primitive calls\n and their dependencies.\n\n Adds trace data structure site constructors to primitive stacks\n \"\"\"\n msngr = TraceMessenger(graph_type=graph_type, param_only=param_only)\n return msngr(fn) if fn is not None else msngr\n\n\ndef replay(fn=None, trace=None, sites=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param trace: a Trace data structure to replay against\n :param sites: list or dict of names of sample sites in fn to replay against,\n defaulting to all sites\n :returns: stochastic function wrapped in a ReplayHandler\n :rtype: pyro.poutine.ReplayHandler\n\n Alias for ReplayHandler constructor.\n\n Given a callable that contains Pyro primitive calls,\n return a callable that runs the original, reusing the values at sites in trace\n at those sites in the new trace\n \"\"\"\n msngr = ReplayMessenger(trace=trace, sites=sites)\n return msngr(fn) if fn is not None else msngr\n\n\ndef lift(fn=None, prior=None):\n \"\"\"\n :param fn: function whose parameters will be lifted to random values\n :param prior: prior function in the form of a Distribution or a dict of stochastic fns\n :returns: stochastic function wrapped in LiftHandler\n\n Given a stochastic function with param calls and a prior distribution,\n create a stochastic function where all param calls are replaced by sampling from prior.\n Prior should be a callable or a dict of names to callables.\n \"\"\"\n msngr = LiftMessenger(prior=prior)\n return msngr(fn) if fn is not None else msngr\n\n\ndef block(fn=None, hide=None, expose=None, hide_types=None, expose_types=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param hide: list of site names to hide\n :param expose: list of site names to be exposed while all others hidden\n :param hide_types: list of site types to be hidden\n :param expose_types: list of site types to be exposed while all others hidden\n :returns: stochastic function wrapped in a BlockHandler\n :rtype: pyro.poutine.BlockHandler\n\n Alias for BlockHandler constructor.\n\n Given a callable that contains Pyro primitive calls,\n selectively hide some of those calls from poutines higher up the stack\n \"\"\"\n msngr = BlockMessenger(hide=hide, expose=expose,\n hide_types=hide_types, expose_types=expose_types)\n return msngr(fn) if fn is not None else msngr\n\n\ndef escape(fn=None, escape_fn=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param escape_fn: function that takes a partial trace and a site,\n and returns a boolean value to decide whether to exit at that site\n :returns: stochastic function wrapped in EscapeHandler\n\n Alias for EscapeHandler constructor.\n\n Given a callable that contains Pyro primitive calls,\n evaluate escape_fn on each site, and if the result is True,\n raise a NonlocalExit exception that stops execution\n and returns the offending site.\n \"\"\"\n msngr = EscapeMessenger(escape_fn)\n return msngr(fn) if fn is not None else msngr\n\n\ndef condition(fn=None, data=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param data: a dict or a Trace\n :returns: stochastic function wrapped in a ConditionHandler\n :rtype: pyro.poutine.ConditionHandler\n\n Alias for ConditionHandler constructor.\n\n Given a stochastic function with some sample statements\n and a dictionary of observations at names,\n change the sample statements at those names into observes\n with those values\n \"\"\"\n msngr = ConditionMessenger(data=data)\n return msngr(fn) if fn is not None else msngr\n\n\ndef infer_config(fn=None, config_fn=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param config_fn: a callable taking a site and returning an infer dict\n\n Alias for :class:`~pyro.poutine.infer_config_messenger.InferConfigHandler` constructor.\n\n Given a callable that contains Pyro primitive calls\n and a callable taking a trace site and returning a dictionary,\n updates the value of the infer kwarg at a sample site to config_fn(site)\n \"\"\"\n msngr = InferConfigMessenger(config_fn)\n return msngr(fn) if fn is not None else msngr\n\n\ndef scale(fn=None, scale=None):\n \"\"\"\n :param scale: a positive scaling factor\n :rtype: pyro.poutine.ScaleMessenger\n\n Alias for ScaleMessenger constructor.\n\n Given a stochastic function with some sample statements and a positive\n scale factor, scale the score of all sample and observe sites in the\n function.\n \"\"\"\n msngr = ScaleMessenger(scale=scale)\n # XXX temporary compatibility fix\n return msngr(fn) if callable(fn) else msngr\n\n\ndef indep(fn=None, name=None, size=None, dim=None):\n \"\"\"\n Alias for IndepMessenger constructor.\n\n This messenger keeps track of stack of independence information declared by\n nested ``irange`` and ``iarange`` contexts. This information is stored in\n a ``cond_indep_stack`` at each sample/observe site for consumption by\n ``TraceMessenger``.\n \"\"\"\n msngr = IndepMessenger(name=name, size=size, dim=dim)\n return msngr(fn) if fn is not None else msngr\n\n\ndef enum(fn=None, first_available_dim=None):\n \"\"\"\n :param int first_available_dim: The first tensor dimension (counting\n from the right) that is available for parallel enumeration. This\n dimension and all dimensions left may be used internally by Pyro.\n\n Alias for EnumerateMessenger constructor.\n\n Enumerates in parallel over discrete sample sites marked\n ``infer={\"enumerate\": \"parallel\"}``.\n \"\"\"\n msngr = EnumerateMessenger(first_available_dim=first_available_dim)\n return msngr(fn) if fn is not None else msngr\n\n\n#########################################\n# Begin composite operations\n#########################################\n\ndef do(fn=None, data=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param data: a dict or a Trace\n :returns: stochastic function wrapped in a BlockHandler and ConditionHandler\n :rtype: pyro.poutine.BlockHandler\n\n Given a stochastic function with some sample statements\n and a dictionary of values at names,\n set the return values of those sites equal to the values\n and hide them from the rest of the stack\n as if they were hard-coded to those values\n by using BlockHandler\n \"\"\"\n def wrapper(wrapped):\n return block(condition(wrapped, data=data), hide=list(data.keys()))\n return wrapper(fn) if fn is not None else wrapper\n\n\ndef queue(fn=None, queue=None, max_tries=None,\n extend_fn=None, escape_fn=None, num_samples=None):\n \"\"\"\n :param fn: a stochastic function (callable containing pyro primitive calls)\n :param queue: a queue data structure like multiprocessing.Queue to hold partial traces\n :param max_tries: maximum number of attempts to compute a single complete trace\n :param extend_fn: function (possibly stochastic) that takes a partial trace and a site,\n and returns a list of extended traces\n :param escape_fn: function (possibly stochastic) that takes a partial trace and a site,\n and returns a boolean value to decide whether to exit\n :param num_samples: optional number of extended traces for extend_fn to return\n :returns: stochastic function wrapped in poutine logic\n\n Given a stochastic function and a queue,\n return a return value from a complete trace in the queue\n \"\"\"\n\n if max_tries is None:\n max_tries = int(1e6)\n\n if extend_fn is None:\n extend_fn = util.enum_extend\n\n if escape_fn is None:\n escape_fn = util.discrete_escape\n\n if num_samples is None:\n num_samples = -1\n\n def wrapper(wrapped):\n def _fn(*args, **kwargs):\n\n for i in xrange(max_tries):\n assert not queue.empty(), \\\n \"trying to get() from an empty queue will deadlock\"\n\n next_trace = queue.get()\n try:\n ftr = trace(escape(replay(wrapped, trace=next_trace),\n escape_fn=functools.partial(escape_fn,\n next_trace)))\n return ftr(*args, **kwargs)\n except util.NonlocalExit as site_container:\n site_container.reset_stack()\n for tr in extend_fn(ftr.trace.copy(), site_container.site,\n num_samples=num_samples):\n queue.put(tr)\n\n raise ValueError(\"max tries ({}) exceeded\".format(str(max_tries)))\n return _fn\n\n return wrapper(fn) if fn is not None else wrapper\n", "path": "pyro/poutine/__init__.py"}]}
3,285
493
gh_patches_debug_22915
rasdani/github-patches
git_diff
ietf-tools__datatracker-4695
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- test_docs_for_ad randomly spuriously fails We occasionally see this failure: ====================================================================== FAIL: test_docs_for_ad (ietf.doc.tests.SearchTests.test_docs_for_ad) ---------------------------------------------------------------------- Traceback (most recent call last): File "/__w/datatracker/datatracker/ietf/doc/tests.py", line 301, in test_docs_for_ad self.assertEqual(r.status_code, 200) AssertionError: 404 != 200 It's clearly a test-harness randomly generated data data mismatch with the view being tested. Investigation is needed to see if this is a real (but obscure corner) bug, or just insufficient constraints on the generated data issue. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ietf/person/factories.py` Content: ``` 1 # Copyright The IETF Trust 2015-2020, All Rights Reserved 2 # -*- coding: utf-8 -*- 3 4 5 import factory 6 from factory.fuzzy import FuzzyChoice 7 import faker 8 import faker.config 9 import os 10 import random 11 import shutil 12 13 from unidecode import unidecode 14 15 from django.conf import settings 16 from django.contrib.auth.models import User 17 from django.utils.text import slugify 18 from django.utils.encoding import force_text 19 20 import debug # pyflakes:ignore 21 22 from ietf.person.models import Person, Alias, Email, PersonalApiKey, PersonApiKeyEvent, PERSON_API_KEY_ENDPOINTS 23 from ietf.person.name import normalize_name, unidecode_name 24 25 26 fake = faker.Factory.create() 27 28 def setup(): 29 global acceptable_fakers 30 # The transliteration of some arabic and devanagari names introduces 31 # non-alphabetic characgters that don't work with the draft author 32 # extraction code, and also don't seem to match the way people with arabic 33 # names romanize arabic names. Exlude those locales from name generation 34 # in order to avoid test failures. 35 locales = set( [ l for l in faker.config.AVAILABLE_LOCALES if not (l.startswith('ar_') or l.startswith('sg_') or l=='fr_QC') ] ) 36 acceptable_fakers = [faker.Faker(locale) for locale in locales] 37 setup() 38 39 def random_faker(): 40 global acceptable_fakers 41 return random.sample(acceptable_fakers, 1)[0] 42 43 class UserFactory(factory.django.DjangoModelFactory): 44 class Meta: 45 model = User 46 django_get_or_create = ('username',) 47 exclude = ['faker', ] 48 49 faker = factory.LazyFunction(random_faker) 50 first_name = factory.LazyAttribute(lambda o: o.faker.first_name()) 51 last_name = factory.LazyAttribute(lambda o: o.faker.last_name()) 52 email = factory.LazyAttributeSequence(lambda u, n: '%s.%s_%d@%s'%( slugify(unidecode(u.first_name)), 53 slugify(unidecode(u.last_name)), n, fake.domain_name())) # type: ignore 54 username = factory.LazyAttribute(lambda u: u.email) 55 56 @factory.post_generation 57 def set_password(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument 58 obj.set_password( '%s+password' % obj.username ) # pylint: disable=no-value-for-parameter 59 60 class PersonFactory(factory.django.DjangoModelFactory): 61 class Meta: 62 model = Person 63 64 user = factory.SubFactory(UserFactory) 65 name = factory.LazyAttribute(lambda p: normalize_name('%s %s'%(p.user.first_name, p.user.last_name))) 66 ascii = factory.LazyAttribute(lambda p: force_text(unidecode_name(p.name))) 67 68 class Params: 69 with_bio = factory.Trait(biography = "\n\n".join(fake.paragraphs())) # type: ignore 70 71 @factory.post_generation 72 def default_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument 73 make_alias = getattr(AliasFactory, 'create' if create else 'build') 74 make_alias(person=obj,name=obj.name) 75 make_alias(person=obj,name=obj.ascii) 76 if obj.name != obj.plain_name(): 77 make_alias(person=obj,name=obj.plain_name()) 78 if obj.ascii != obj.plain_ascii(): 79 make_alias(person=obj,name=obj.plain_ascii()) 80 81 @factory.post_generation 82 def default_emails(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument 83 if extracted is None: 84 extracted = True 85 if create and extracted: 86 make_email = getattr(EmailFactory, 'create' if create else 'build') 87 make_email(person=obj, address=obj.user.email) 88 89 @factory.post_generation 90 def default_photo(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument 91 import atexit 92 if obj.biography: 93 photo_name = obj.photo_name() 94 media_name = "%s/%s.jpg" % (settings.PHOTOS_DIRNAME, photo_name) 95 obj.photo = media_name 96 obj.photo_thumb = media_name 97 photosrc = os.path.join(settings.TEST_DATA_DIR, "profile-default.jpg") 98 photodst = os.path.join(settings.PHOTOS_DIR, photo_name + '.jpg') 99 if not os.path.exists(photodst): 100 shutil.copy(photosrc, photodst) 101 def delete_file(file): 102 os.unlink(file) 103 atexit.register(delete_file, photodst) 104 105 class AliasFactory(factory.django.DjangoModelFactory): 106 class Meta: 107 model = Alias 108 109 @classmethod 110 def _create(cls, model_class, *args, **kwargs): 111 person = kwargs['person'] 112 name = kwargs['name'] 113 existing_aliases = set(model_class.objects.filter(person=person).values_list('name', flat=True)) 114 if not name in existing_aliases: 115 obj = model_class(*args, **kwargs) 116 obj.save() 117 return obj 118 119 name = factory.Faker('name') 120 121 def fake_email_address(n): 122 address_field = [ f for f in Email._meta.fields if f.name == 'address'][0] 123 count = 0 124 while True: 125 address = '%s.%s_%d@%s' % ( 126 slugify(unidecode(fake.first_name())), 127 slugify(unidecode(fake.last_name())), 128 n, fake.domain_name() 129 ) 130 count += 1 131 if len(address) <= address_field.max_length: 132 break 133 if count >= 10: 134 raise RuntimeError("Failed generating a fake email address to fit in Email.address(max_length=%s)"%address_field.max_lenth) 135 return address 136 137 class EmailFactory(factory.django.DjangoModelFactory): 138 class Meta: 139 model = Email 140 django_get_or_create = ('address',) 141 142 address = factory.Sequence(fake_email_address) 143 person = factory.SubFactory(PersonFactory) 144 145 active = True 146 primary = False 147 origin = factory.LazyAttribute(lambda obj: obj.person.user.username if obj.person.user else '') 148 149 150 class PersonalApiKeyFactory(factory.django.DjangoModelFactory): 151 person = factory.SubFactory(PersonFactory) 152 endpoint = FuzzyChoice(PERSON_API_KEY_ENDPOINTS) 153 154 class Meta: 155 model = PersonalApiKey 156 157 class PersonApiKeyEventFactory(factory.django.DjangoModelFactory): 158 key = factory.SubFactory(PersonalApiKeyFactory) 159 person = factory.LazyAttribute(lambda o: o.key.person) 160 type = 'apikey_login' 161 desc = factory.Faker('sentence', nb_words=6) 162 163 class Meta: 164 model = PersonApiKeyEvent 165 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ietf/person/factories.py b/ietf/person/factories.py --- a/ietf/person/factories.py +++ b/ietf/person/factories.py @@ -11,6 +11,7 @@ import shutil from unidecode import unidecode +from unicodedata import normalize from django.conf import settings from django.contrib.auth.models import User @@ -47,8 +48,9 @@ exclude = ['faker', ] faker = factory.LazyFunction(random_faker) - first_name = factory.LazyAttribute(lambda o: o.faker.first_name()) - last_name = factory.LazyAttribute(lambda o: o.faker.last_name()) + # normalize these i18n Unicode strings in the same way the database does + first_name = factory.LazyAttribute(lambda o: normalize("NFKC", o.faker.first_name())) + last_name = factory.LazyAttribute(lambda o: normalize("NFKC", o.faker.last_name())) email = factory.LazyAttributeSequence(lambda u, n: '%s.%s_%d@%s'%( slugify(unidecode(u.first_name)), slugify(unidecode(u.last_name)), n, fake.domain_name())) # type: ignore username = factory.LazyAttribute(lambda u: u.email)
{"golden_diff": "diff --git a/ietf/person/factories.py b/ietf/person/factories.py\n--- a/ietf/person/factories.py\n+++ b/ietf/person/factories.py\n@@ -11,6 +11,7 @@\n import shutil\n \n from unidecode import unidecode\n+from unicodedata import normalize\n \n from django.conf import settings\n from django.contrib.auth.models import User\n@@ -47,8 +48,9 @@\n exclude = ['faker', ]\n \n faker = factory.LazyFunction(random_faker)\n- first_name = factory.LazyAttribute(lambda o: o.faker.first_name())\n- last_name = factory.LazyAttribute(lambda o: o.faker.last_name())\n+ # normalize these i18n Unicode strings in the same way the database does\n+ first_name = factory.LazyAttribute(lambda o: normalize(\"NFKC\", o.faker.first_name()))\n+ last_name = factory.LazyAttribute(lambda o: normalize(\"NFKC\", o.faker.last_name()))\n email = factory.LazyAttributeSequence(lambda u, n: '%s.%s_%d@%s'%( slugify(unidecode(u.first_name)),\n slugify(unidecode(u.last_name)), n, fake.domain_name())) # type: ignore\n username = factory.LazyAttribute(lambda u: u.email)\n", "issue": "test_docs_for_ad randomly spuriously fails\nWe occasionally see this failure:\r\n ======================================================================\r\nFAIL: test_docs_for_ad (ietf.doc.tests.SearchTests.test_docs_for_ad)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/__w/datatracker/datatracker/ietf/doc/tests.py\", line 301, in test_docs_for_ad\r\n self.assertEqual(r.status_code, 200)\r\nAssertionError: 404 != 200\r\n\r\nIt's clearly a test-harness randomly generated data data mismatch with the view being tested. Investigation is needed to see if this is a real (but obscure corner) bug, or just insufficient constraints on the generated data issue. \n", "before_files": [{"content": "# Copyright The IETF Trust 2015-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nimport factory\nfrom factory.fuzzy import FuzzyChoice\nimport faker \nimport faker.config\nimport os\nimport random\nimport shutil\n\nfrom unidecode import unidecode\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils.text import slugify\nfrom django.utils.encoding import force_text\n\nimport debug # pyflakes:ignore\n\nfrom ietf.person.models import Person, Alias, Email, PersonalApiKey, PersonApiKeyEvent, PERSON_API_KEY_ENDPOINTS\nfrom ietf.person.name import normalize_name, unidecode_name\n\n\nfake = faker.Factory.create()\n\ndef setup():\n global acceptable_fakers\n # The transliteration of some arabic and devanagari names introduces\n # non-alphabetic characgters that don't work with the draft author\n # extraction code, and also don't seem to match the way people with arabic\n # names romanize arabic names. Exlude those locales from name generation\n # in order to avoid test failures.\n locales = set( [ l for l in faker.config.AVAILABLE_LOCALES if not (l.startswith('ar_') or l.startswith('sg_') or l=='fr_QC') ] )\n acceptable_fakers = [faker.Faker(locale) for locale in locales]\nsetup()\n\ndef random_faker():\n global acceptable_fakers\n return random.sample(acceptable_fakers, 1)[0]\n\nclass UserFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = User\n django_get_or_create = ('username',)\n exclude = ['faker', ]\n\n faker = factory.LazyFunction(random_faker)\n first_name = factory.LazyAttribute(lambda o: o.faker.first_name())\n last_name = factory.LazyAttribute(lambda o: o.faker.last_name())\n email = factory.LazyAttributeSequence(lambda u, n: '%s.%s_%d@%s'%( slugify(unidecode(u.first_name)),\n slugify(unidecode(u.last_name)), n, fake.domain_name())) # type: ignore\n username = factory.LazyAttribute(lambda u: u.email)\n\n @factory.post_generation\n def set_password(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument\n obj.set_password( '%s+password' % obj.username ) # pylint: disable=no-value-for-parameter\n\nclass PersonFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Person\n\n user = factory.SubFactory(UserFactory)\n name = factory.LazyAttribute(lambda p: normalize_name('%s %s'%(p.user.first_name, p.user.last_name)))\n ascii = factory.LazyAttribute(lambda p: force_text(unidecode_name(p.name)))\n\n class Params:\n with_bio = factory.Trait(biography = \"\\n\\n\".join(fake.paragraphs())) # type: ignore\n\n @factory.post_generation\n def default_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument\n make_alias = getattr(AliasFactory, 'create' if create else 'build')\n make_alias(person=obj,name=obj.name)\n make_alias(person=obj,name=obj.ascii)\n if obj.name != obj.plain_name():\n make_alias(person=obj,name=obj.plain_name())\n if obj.ascii != obj.plain_ascii():\n make_alias(person=obj,name=obj.plain_ascii())\n\n @factory.post_generation\n def default_emails(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument\n if extracted is None:\n extracted = True\n if create and extracted:\n make_email = getattr(EmailFactory, 'create' if create else 'build')\n make_email(person=obj, address=obj.user.email)\n\n @factory.post_generation\n def default_photo(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument\n import atexit\n if obj.biography:\n photo_name = obj.photo_name()\n media_name = \"%s/%s.jpg\" % (settings.PHOTOS_DIRNAME, photo_name)\n obj.photo = media_name\n obj.photo_thumb = media_name\n photosrc = os.path.join(settings.TEST_DATA_DIR, \"profile-default.jpg\")\n photodst = os.path.join(settings.PHOTOS_DIR, photo_name + '.jpg')\n if not os.path.exists(photodst):\n shutil.copy(photosrc, photodst)\n def delete_file(file):\n os.unlink(file)\n atexit.register(delete_file, photodst)\n\nclass AliasFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Alias\n\n @classmethod\n def _create(cls, model_class, *args, **kwargs):\n person = kwargs['person']\n name = kwargs['name']\n existing_aliases = set(model_class.objects.filter(person=person).values_list('name', flat=True))\n if not name in existing_aliases:\n obj = model_class(*args, **kwargs)\n obj.save()\n return obj\n\n name = factory.Faker('name')\n\ndef fake_email_address(n):\n address_field = [ f for f in Email._meta.fields if f.name == 'address'][0]\n count = 0\n while True:\n address = '%s.%s_%d@%s' % (\n slugify(unidecode(fake.first_name())),\n slugify(unidecode(fake.last_name())),\n n, fake.domain_name()\n )\n count += 1\n if len(address) <= address_field.max_length:\n break\n if count >= 10:\n raise RuntimeError(\"Failed generating a fake email address to fit in Email.address(max_length=%s)\"%address_field.max_lenth)\n return address\n\nclass EmailFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Email\n django_get_or_create = ('address',)\n\n address = factory.Sequence(fake_email_address)\n person = factory.SubFactory(PersonFactory)\n\n active = True\n primary = False\n origin = factory.LazyAttribute(lambda obj: obj.person.user.username if obj.person.user else '')\n\n\nclass PersonalApiKeyFactory(factory.django.DjangoModelFactory):\n person = factory.SubFactory(PersonFactory)\n endpoint = FuzzyChoice(PERSON_API_KEY_ENDPOINTS)\n\n class Meta:\n model = PersonalApiKey\n\nclass PersonApiKeyEventFactory(factory.django.DjangoModelFactory):\n key = factory.SubFactory(PersonalApiKeyFactory)\n person = factory.LazyAttribute(lambda o: o.key.person)\n type = 'apikey_login'\n desc = factory.Faker('sentence', nb_words=6)\n\n class Meta:\n model = PersonApiKeyEvent\n", "path": "ietf/person/factories.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2015-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nimport factory\nfrom factory.fuzzy import FuzzyChoice\nimport faker \nimport faker.config\nimport os\nimport random\nimport shutil\n\nfrom unidecode import unidecode\nfrom unicodedata import normalize\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils.text import slugify\nfrom django.utils.encoding import force_text\n\nimport debug # pyflakes:ignore\n\nfrom ietf.person.models import Person, Alias, Email, PersonalApiKey, PersonApiKeyEvent, PERSON_API_KEY_ENDPOINTS\nfrom ietf.person.name import normalize_name, unidecode_name\n\n\nfake = faker.Factory.create()\n\ndef setup():\n global acceptable_fakers\n # The transliteration of some arabic and devanagari names introduces\n # non-alphabetic characgters that don't work with the draft author\n # extraction code, and also don't seem to match the way people with arabic\n # names romanize arabic names. Exlude those locales from name generation\n # in order to avoid test failures.\n locales = set( [ l for l in faker.config.AVAILABLE_LOCALES if not (l.startswith('ar_') or l.startswith('sg_') or l=='fr_QC') ] )\n acceptable_fakers = [faker.Faker(locale) for locale in locales]\nsetup()\n\ndef random_faker():\n global acceptable_fakers\n return random.sample(acceptable_fakers, 1)[0]\n\nclass UserFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = User\n django_get_or_create = ('username',)\n exclude = ['faker', ]\n\n faker = factory.LazyFunction(random_faker)\n # normalize these i18n Unicode strings in the same way the database does\n first_name = factory.LazyAttribute(lambda o: normalize(\"NFKC\", o.faker.first_name()))\n last_name = factory.LazyAttribute(lambda o: normalize(\"NFKC\", o.faker.last_name()))\n email = factory.LazyAttributeSequence(lambda u, n: '%s.%s_%d@%s'%( slugify(unidecode(u.first_name)),\n slugify(unidecode(u.last_name)), n, fake.domain_name())) # type: ignore\n username = factory.LazyAttribute(lambda u: u.email)\n\n @factory.post_generation\n def set_password(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument\n obj.set_password( '%s+password' % obj.username ) # pylint: disable=no-value-for-parameter\n\nclass PersonFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Person\n\n user = factory.SubFactory(UserFactory)\n name = factory.LazyAttribute(lambda p: normalize_name('%s %s'%(p.user.first_name, p.user.last_name)))\n ascii = factory.LazyAttribute(lambda p: force_text(unidecode_name(p.name)))\n\n class Params:\n with_bio = factory.Trait(biography = \"\\n\\n\".join(fake.paragraphs())) # type: ignore\n\n @factory.post_generation\n def default_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument\n make_alias = getattr(AliasFactory, 'create' if create else 'build')\n make_alias(person=obj,name=obj.name)\n make_alias(person=obj,name=obj.ascii)\n if obj.name != obj.plain_name():\n make_alias(person=obj,name=obj.plain_name())\n if obj.ascii != obj.plain_ascii():\n make_alias(person=obj,name=obj.plain_ascii())\n\n @factory.post_generation\n def default_emails(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument\n if extracted is None:\n extracted = True\n if create and extracted:\n make_email = getattr(EmailFactory, 'create' if create else 'build')\n make_email(person=obj, address=obj.user.email)\n\n @factory.post_generation\n def default_photo(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument\n import atexit\n if obj.biography:\n photo_name = obj.photo_name()\n media_name = \"%s/%s.jpg\" % (settings.PHOTOS_DIRNAME, photo_name)\n obj.photo = media_name\n obj.photo_thumb = media_name\n photosrc = os.path.join(settings.TEST_DATA_DIR, \"profile-default.jpg\")\n photodst = os.path.join(settings.PHOTOS_DIR, photo_name + '.jpg')\n if not os.path.exists(photodst):\n shutil.copy(photosrc, photodst)\n def delete_file(file):\n os.unlink(file)\n atexit.register(delete_file, photodst)\n\nclass AliasFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Alias\n\n @classmethod\n def _create(cls, model_class, *args, **kwargs):\n person = kwargs['person']\n name = kwargs['name']\n existing_aliases = set(model_class.objects.filter(person=person).values_list('name', flat=True))\n if not name in existing_aliases:\n obj = model_class(*args, **kwargs)\n obj.save()\n return obj\n\n name = factory.Faker('name')\n\ndef fake_email_address(n):\n address_field = [ f for f in Email._meta.fields if f.name == 'address'][0]\n count = 0\n while True:\n address = '%s.%s_%d@%s' % (\n slugify(unidecode(fake.first_name())),\n slugify(unidecode(fake.last_name())),\n n, fake.domain_name()\n )\n count += 1\n if len(address) <= address_field.max_length:\n break\n if count >= 10:\n raise RuntimeError(\"Failed generating a fake email address to fit in Email.address(max_length=%s)\"%address_field.max_lenth)\n return address\n\nclass EmailFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Email\n django_get_or_create = ('address',)\n\n address = factory.Sequence(fake_email_address)\n person = factory.SubFactory(PersonFactory)\n\n active = True\n primary = False\n origin = factory.LazyAttribute(lambda obj: obj.person.user.username if obj.person.user else '')\n\n\nclass PersonalApiKeyFactory(factory.django.DjangoModelFactory):\n person = factory.SubFactory(PersonFactory)\n endpoint = FuzzyChoice(PERSON_API_KEY_ENDPOINTS)\n\n class Meta:\n model = PersonalApiKey\n\nclass PersonApiKeyEventFactory(factory.django.DjangoModelFactory):\n key = factory.SubFactory(PersonalApiKeyFactory)\n person = factory.LazyAttribute(lambda o: o.key.person)\n type = 'apikey_login'\n desc = factory.Faker('sentence', nb_words=6)\n\n class Meta:\n model = PersonApiKeyEvent\n", "path": "ietf/person/factories.py"}]}
2,244
282
gh_patches_debug_9994
rasdani/github-patches
git_diff
urllib3__urllib3-603
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Really don't spam InsecurePlatformWarning urllib3 should configure its warnings using append=True to avoid overriding the user's preferences as specified with python -W or PYTHONWARNINGS. If this issue were fixed, the user could work around pypa/pip#2681 with ``` export PYTHONWARNINGS="ignore:A true SSLContext object is not available" ``` Additionally, the urllib3 docs are very unclear about why this is considered worth warning the end user about, particularly given that adding this strange message has effectively introduced a bug in hundreds of other projects. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `urllib3/__init__.py` Content: ``` 1 """ 2 urllib3 - Thread-safe connection pooling and re-using. 3 """ 4 5 __author__ = 'Andrey Petrov ([email protected])' 6 __license__ = 'MIT' 7 __version__ = '1.10.2' 8 9 10 from .connectionpool import ( 11 HTTPConnectionPool, 12 HTTPSConnectionPool, 13 connection_from_url 14 ) 15 16 from . import exceptions 17 from .filepost import encode_multipart_formdata 18 from .poolmanager import PoolManager, ProxyManager, proxy_from_url 19 from .response import HTTPResponse 20 from .util.request import make_headers 21 from .util.url import get_host 22 from .util.timeout import Timeout 23 from .util.retry import Retry 24 25 26 # Set default logging handler to avoid "No handler found" warnings. 27 import logging 28 try: # Python 2.7+ 29 from logging import NullHandler 30 except ImportError: 31 class NullHandler(logging.Handler): 32 def emit(self, record): 33 pass 34 35 logging.getLogger(__name__).addHandler(NullHandler()) 36 37 def add_stderr_logger(level=logging.DEBUG): 38 """ 39 Helper for quickly adding a StreamHandler to the logger. Useful for 40 debugging. 41 42 Returns the handler after adding it. 43 """ 44 # This method needs to be in this __init__.py to get the __name__ correct 45 # even if urllib3 is vendored within another package. 46 logger = logging.getLogger(__name__) 47 handler = logging.StreamHandler() 48 handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) 49 logger.addHandler(handler) 50 logger.setLevel(level) 51 logger.debug('Added a stderr logging handler to logger: %s' % __name__) 52 return handler 53 54 # ... Clean up. 55 del NullHandler 56 57 58 import warnings 59 # SecurityWarning's always go off by default. 60 warnings.simplefilter('always', exceptions.SecurityWarning) 61 # InsecurePlatformWarning's don't vary between requests, so we keep it default. 62 warnings.simplefilter('default', exceptions.InsecurePlatformWarning) 63 64 def disable_warnings(category=exceptions.HTTPWarning): 65 """ 66 Helper for quickly disabling all urllib3 warnings. 67 """ 68 warnings.simplefilter('ignore', category) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/urllib3/__init__.py b/urllib3/__init__.py --- a/urllib3/__init__.py +++ b/urllib3/__init__.py @@ -57,9 +57,10 @@ import warnings # SecurityWarning's always go off by default. -warnings.simplefilter('always', exceptions.SecurityWarning) +warnings.simplefilter('always', exceptions.SecurityWarning, append=True) # InsecurePlatformWarning's don't vary between requests, so we keep it default. -warnings.simplefilter('default', exceptions.InsecurePlatformWarning) +warnings.simplefilter('default', exceptions.InsecurePlatformWarning, + append=True) def disable_warnings(category=exceptions.HTTPWarning): """
{"golden_diff": "diff --git a/urllib3/__init__.py b/urllib3/__init__.py\n--- a/urllib3/__init__.py\n+++ b/urllib3/__init__.py\n@@ -57,9 +57,10 @@\n \n import warnings\n # SecurityWarning's always go off by default.\n-warnings.simplefilter('always', exceptions.SecurityWarning)\n+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)\n # InsecurePlatformWarning's don't vary between requests, so we keep it default.\n-warnings.simplefilter('default', exceptions.InsecurePlatformWarning)\n+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,\n+ append=True)\n \n def disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n", "issue": "Really don't spam InsecurePlatformWarning\nurllib3 should configure its warnings using append=True to avoid overriding the user's preferences as specified with python -W or PYTHONWARNINGS.\n\nIf this issue were fixed, the user could work around pypa/pip#2681 with\n\n```\nexport PYTHONWARNINGS=\"ignore:A true SSLContext object is not available\"\n```\n\nAdditionally, the urllib3 docs are very unclear about why this is considered worth warning the end user about, particularly given that adding this strange message has effectively introduced a bug in hundreds of other projects.\n\n", "before_files": [{"content": "\"\"\"\nurllib3 - Thread-safe connection pooling and re-using.\n\"\"\"\n\n__author__ = 'Andrey Petrov ([email protected])'\n__license__ = 'MIT'\n__version__ = '1.10.2'\n\n\nfrom .connectionpool import (\n HTTPConnectionPool,\n HTTPSConnectionPool,\n connection_from_url\n)\n\nfrom . import exceptions\nfrom .filepost import encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import HTTPResponse\nfrom .util.request import make_headers\nfrom .util.url import get_host\nfrom .util.timeout import Timeout\nfrom .util.retry import Retry\n\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\ntry: # Python 2.7+\n from logging import NullHandler\nexcept ImportError:\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\ndef add_stderr_logger(level=logging.DEBUG):\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug('Added a stderr logging handler to logger: %s' % __name__)\n return handler\n\n# ... Clean up.\ndel NullHandler\n\n\nimport warnings\n# SecurityWarning's always go off by default.\nwarnings.simplefilter('always', exceptions.SecurityWarning)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter('default', exceptions.InsecurePlatformWarning)\n\ndef disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter('ignore', category)\n", "path": "urllib3/__init__.py"}], "after_files": [{"content": "\"\"\"\nurllib3 - Thread-safe connection pooling and re-using.\n\"\"\"\n\n__author__ = 'Andrey Petrov ([email protected])'\n__license__ = 'MIT'\n__version__ = '1.10.2'\n\n\nfrom .connectionpool import (\n HTTPConnectionPool,\n HTTPSConnectionPool,\n connection_from_url\n)\n\nfrom . import exceptions\nfrom .filepost import encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import HTTPResponse\nfrom .util.request import make_headers\nfrom .util.url import get_host\nfrom .util.timeout import Timeout\nfrom .util.retry import Retry\n\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\ntry: # Python 2.7+\n from logging import NullHandler\nexcept ImportError:\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\ndef add_stderr_logger(level=logging.DEBUG):\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug('Added a stderr logging handler to logger: %s' % __name__)\n return handler\n\n# ... Clean up.\ndel NullHandler\n\n\nimport warnings\n# SecurityWarning's always go off by default.\nwarnings.simplefilter('always', exceptions.SecurityWarning, append=True)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter('default', exceptions.InsecurePlatformWarning,\n append=True)\n\ndef disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter('ignore', category)\n", "path": "urllib3/__init__.py"}]}
960
157
gh_patches_debug_17913
rasdani/github-patches
git_diff
Parsl__parsl-1956
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provider base and cluster provider to be added to reference **Is your feature request related to a problem? Please describe.** In the last set of doc updates where we trimmed some of the developer docs we've moved to relying more on the references to point someone to. It looks like the [provider base](https://github.com/Parsl/parsl/blob/master/parsl/providers/provider_base.py) class and [cluster provider](https://github.com/Parsl/parsl/blob/master/parsl/providers/cluster_provider.py) are missing from there. **Describe the solution you'd like** Update docs to add these to the reference. Provider base and cluster provider to be added to reference **Is your feature request related to a problem? Please describe.** In the last set of doc updates where we trimmed some of the developer docs we've moved to relying more on the references to point someone to. It looks like the [provider base](https://github.com/Parsl/parsl/blob/master/parsl/providers/provider_base.py) class and [cluster provider](https://github.com/Parsl/parsl/blob/master/parsl/providers/cluster_provider.py) are missing from there. **Describe the solution you'd like** Update docs to add these to the reference. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parsl/providers/cluster_provider.py` Content: ``` 1 import logging 2 from abc import abstractmethod 3 from string import Template 4 5 from parsl.providers.error import SchedulerMissingArgs, ScriptPathError 6 from parsl.launchers.error import BadLauncher 7 from parsl.providers.provider_base import ExecutionProvider 8 9 logger = logging.getLogger(__name__) 10 11 12 class ClusterProvider(ExecutionProvider): 13 """ This class defines behavior common to all cluster/supercompute-style scheduler systems. 14 15 Parameters 16 ---------- 17 label : str 18 Label for this provider. 19 channel : Channel 20 Channel for accessing this provider. Possible channels include 21 :class:`~parsl.channels.LocalChannel` (the default), 22 :class:`~parsl.channels.SSHChannel`, or 23 :class:`~parsl.channels.SSHInteractiveLoginChannel`. 24 walltime : str 25 Walltime requested per block in HH:MM:SS. 26 launcher : str 27 FIXME 28 cmd_timeout : int 29 Timeout for commands made to the scheduler in seconds 30 31 .. code:: python 32 33 +------------------ 34 | 35 script_string ------->| submit 36 id <--------|---+ 37 | 38 [ ids ] ------->| status 39 [statuses] <--------|----+ 40 | 41 [ ids ] ------->| cancel 42 [cancel] <--------|----+ 43 | 44 +------------------- 45 """ 46 47 def __init__(self, 48 label, 49 channel, 50 nodes_per_block, 51 init_blocks, 52 min_blocks, 53 max_blocks, 54 parallelism, 55 walltime, 56 launcher, 57 cmd_timeout=10): 58 59 self._label = label 60 self.channel = channel 61 self.nodes_per_block = nodes_per_block 62 self.init_blocks = init_blocks 63 self.min_blocks = min_blocks 64 self.max_blocks = max_blocks 65 self.parallelism = parallelism 66 self.launcher = launcher 67 self.walltime = walltime 68 self.cmd_timeout = cmd_timeout 69 if not callable(self.launcher): 70 raise(BadLauncher(self.launcher, 71 "Launcher for executor: {} is of type: {}. Expects a parsl.launcher.launcher.Launcher or callable".format( 72 label, type(self.launcher)))) 73 74 self.script_dir = None 75 76 # Dictionary that keeps track of jobs, keyed on job_id 77 self.resources = {} 78 79 def execute_wait(self, cmd, timeout=None): 80 t = self.cmd_timeout 81 if timeout is not None: 82 t = timeout 83 return self.channel.execute_wait(cmd, t) 84 85 def _write_submit_script(self, template, script_filename, job_name, configs): 86 """Generate submit script and write it to a file. 87 88 Args: 89 - template (string) : The template string to be used for the writing submit script 90 - script_filename (string) : Name of the submit script 91 - job_name (string) : job name 92 - configs (dict) : configs that get pushed into the template 93 94 Returns: 95 - True: on success 96 97 Raises: 98 SchedulerMissingArgs : If template is missing args 99 ScriptPathError : Unable to write submit script out 100 """ 101 102 try: 103 submit_script = Template(template).substitute(jobname=job_name, **configs) 104 # submit_script = Template(template).safe_substitute(jobname=job_name, **configs) 105 with open(script_filename, 'w') as f: 106 f.write(submit_script) 107 108 except KeyError as e: 109 logger.error("Missing keys for submit script : %s", e) 110 raise (SchedulerMissingArgs(e.args, self.label)) 111 112 except IOError as e: 113 logger.error("Failed writing to submit script: %s", script_filename) 114 raise (ScriptPathError(script_filename, e)) 115 except Exception as e: 116 print("Template : ", template) 117 print("Args : ", job_name) 118 print("Kwargs : ", configs) 119 logger.error("Uncategorized error: %s", e) 120 raise (e) 121 122 return True 123 124 @abstractmethod 125 def _status(self): 126 pass 127 128 def status(self, job_ids): 129 """ Get the status of a list of jobs identified by the job identifiers 130 returned from the submit request. 131 132 Args: 133 - job_ids (list) : A list of job identifiers 134 135 Returns: 136 - A list of JobStatus objects corresponding to each job_id in the job_ids list. 137 138 Raises: 139 - ExecutionProviderException or its subclasses 140 141 """ 142 if job_ids: 143 self._status() 144 return [self.resources[jid]['status'] for jid in job_ids] 145 146 @property 147 def label(self): 148 return self._label 149 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parsl/providers/cluster_provider.py b/parsl/providers/cluster_provider.py --- a/parsl/providers/cluster_provider.py +++ b/parsl/providers/cluster_provider.py @@ -14,18 +14,18 @@ Parameters ---------- - label : str + label : str Label for this provider. - channel : Channel + channel : Channel Channel for accessing this provider. Possible channels include :class:`~parsl.channels.LocalChannel` (the default), :class:`~parsl.channels.SSHChannel`, or :class:`~parsl.channels.SSHInteractiveLoginChannel`. - walltime : str + walltime : str Walltime requested per block in HH:MM:SS. - launcher : str + launcher : str FIXME - cmd_timeout : int + cmd_timeout : int Timeout for commands made to the scheduler in seconds .. code:: python
{"golden_diff": "diff --git a/parsl/providers/cluster_provider.py b/parsl/providers/cluster_provider.py\n--- a/parsl/providers/cluster_provider.py\n+++ b/parsl/providers/cluster_provider.py\n@@ -14,18 +14,18 @@\n \n Parameters\n ----------\n- label : str\n+ label : str\n Label for this provider.\n- channel : Channel\n+ channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n- walltime : str\n+ walltime : str\n Walltime requested per block in HH:MM:SS.\n- launcher : str\n+ launcher : str\n FIXME\n- cmd_timeout : int\n+ cmd_timeout : int\n Timeout for commands made to the scheduler in seconds\n \n .. code:: python\n", "issue": "Provider base and cluster provider to be added to reference\n**Is your feature request related to a problem? Please describe.**\r\n\r\nIn the last set of doc updates where we trimmed some of the developer docs we've moved to relying more on the references to point someone to. It looks like the [provider base](https://github.com/Parsl/parsl/blob/master/parsl/providers/provider_base.py) class and [cluster provider](https://github.com/Parsl/parsl/blob/master/parsl/providers/cluster_provider.py) are missing from there. \r\n\r\n**Describe the solution you'd like**\r\nUpdate docs to add these to the reference.\r\n\nProvider base and cluster provider to be added to reference\n**Is your feature request related to a problem? Please describe.**\r\n\r\nIn the last set of doc updates where we trimmed some of the developer docs we've moved to relying more on the references to point someone to. It looks like the [provider base](https://github.com/Parsl/parsl/blob/master/parsl/providers/provider_base.py) class and [cluster provider](https://github.com/Parsl/parsl/blob/master/parsl/providers/cluster_provider.py) are missing from there. \r\n\r\n**Describe the solution you'd like**\r\nUpdate docs to add these to the reference.\r\n\n", "before_files": [{"content": "import logging\nfrom abc import abstractmethod\nfrom string import Template\n\nfrom parsl.providers.error import SchedulerMissingArgs, ScriptPathError\nfrom parsl.launchers.error import BadLauncher\nfrom parsl.providers.provider_base import ExecutionProvider\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClusterProvider(ExecutionProvider):\n \"\"\" This class defines behavior common to all cluster/supercompute-style scheduler systems.\n\n Parameters\n ----------\n label : str\n Label for this provider.\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n launcher : str\n FIXME\n cmd_timeout : int\n Timeout for commands made to the scheduler in seconds\n\n .. code:: python\n\n +------------------\n |\n script_string ------->| submit\n id <--------|---+\n |\n [ ids ] ------->| status\n [statuses] <--------|----+\n |\n [ ids ] ------->| cancel\n [cancel] <--------|----+\n |\n +-------------------\n \"\"\"\n\n def __init__(self,\n label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n launcher,\n cmd_timeout=10):\n\n self._label = label\n self.channel = channel\n self.nodes_per_block = nodes_per_block\n self.init_blocks = init_blocks\n self.min_blocks = min_blocks\n self.max_blocks = max_blocks\n self.parallelism = parallelism\n self.launcher = launcher\n self.walltime = walltime\n self.cmd_timeout = cmd_timeout\n if not callable(self.launcher):\n raise(BadLauncher(self.launcher,\n \"Launcher for executor: {} is of type: {}. Expects a parsl.launcher.launcher.Launcher or callable\".format(\n label, type(self.launcher))))\n\n self.script_dir = None\n\n # Dictionary that keeps track of jobs, keyed on job_id\n self.resources = {}\n\n def execute_wait(self, cmd, timeout=None):\n t = self.cmd_timeout\n if timeout is not None:\n t = timeout\n return self.channel.execute_wait(cmd, t)\n\n def _write_submit_script(self, template, script_filename, job_name, configs):\n \"\"\"Generate submit script and write it to a file.\n\n Args:\n - template (string) : The template string to be used for the writing submit script\n - script_filename (string) : Name of the submit script\n - job_name (string) : job name\n - configs (dict) : configs that get pushed into the template\n\n Returns:\n - True: on success\n\n Raises:\n SchedulerMissingArgs : If template is missing args\n ScriptPathError : Unable to write submit script out\n \"\"\"\n\n try:\n submit_script = Template(template).substitute(jobname=job_name, **configs)\n # submit_script = Template(template).safe_substitute(jobname=job_name, **configs)\n with open(script_filename, 'w') as f:\n f.write(submit_script)\n\n except KeyError as e:\n logger.error(\"Missing keys for submit script : %s\", e)\n raise (SchedulerMissingArgs(e.args, self.label))\n\n except IOError as e:\n logger.error(\"Failed writing to submit script: %s\", script_filename)\n raise (ScriptPathError(script_filename, e))\n except Exception as e:\n print(\"Template : \", template)\n print(\"Args : \", job_name)\n print(\"Kwargs : \", configs)\n logger.error(\"Uncategorized error: %s\", e)\n raise (e)\n\n return True\n\n @abstractmethod\n def _status(self):\n pass\n\n def status(self, job_ids):\n \"\"\" Get the status of a list of jobs identified by the job identifiers\n returned from the submit request.\n\n Args:\n - job_ids (list) : A list of job identifiers\n\n Returns:\n - A list of JobStatus objects corresponding to each job_id in the job_ids list.\n\n Raises:\n - ExecutionProviderException or its subclasses\n\n \"\"\"\n if job_ids:\n self._status()\n return [self.resources[jid]['status'] for jid in job_ids]\n\n @property\n def label(self):\n return self._label\n", "path": "parsl/providers/cluster_provider.py"}], "after_files": [{"content": "import logging\nfrom abc import abstractmethod\nfrom string import Template\n\nfrom parsl.providers.error import SchedulerMissingArgs, ScriptPathError\nfrom parsl.launchers.error import BadLauncher\nfrom parsl.providers.provider_base import ExecutionProvider\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClusterProvider(ExecutionProvider):\n \"\"\" This class defines behavior common to all cluster/supercompute-style scheduler systems.\n\n Parameters\n ----------\n label : str\n Label for this provider.\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n launcher : str\n FIXME\n cmd_timeout : int\n Timeout for commands made to the scheduler in seconds\n\n .. code:: python\n\n +------------------\n |\n script_string ------->| submit\n id <--------|---+\n |\n [ ids ] ------->| status\n [statuses] <--------|----+\n |\n [ ids ] ------->| cancel\n [cancel] <--------|----+\n |\n +-------------------\n \"\"\"\n\n def __init__(self,\n label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n launcher,\n cmd_timeout=10):\n\n self._label = label\n self.channel = channel\n self.nodes_per_block = nodes_per_block\n self.init_blocks = init_blocks\n self.min_blocks = min_blocks\n self.max_blocks = max_blocks\n self.parallelism = parallelism\n self.launcher = launcher\n self.walltime = walltime\n self.cmd_timeout = cmd_timeout\n if not callable(self.launcher):\n raise(BadLauncher(self.launcher,\n \"Launcher for executor: {} is of type: {}. Expects a parsl.launcher.launcher.Launcher or callable\".format(\n label, type(self.launcher))))\n\n self.script_dir = None\n\n # Dictionary that keeps track of jobs, keyed on job_id\n self.resources = {}\n\n def execute_wait(self, cmd, timeout=None):\n t = self.cmd_timeout\n if timeout is not None:\n t = timeout\n return self.channel.execute_wait(cmd, t)\n\n def _write_submit_script(self, template, script_filename, job_name, configs):\n \"\"\"Generate submit script and write it to a file.\n\n Args:\n - template (string) : The template string to be used for the writing submit script\n - script_filename (string) : Name of the submit script\n - job_name (string) : job name\n - configs (dict) : configs that get pushed into the template\n\n Returns:\n - True: on success\n\n Raises:\n SchedulerMissingArgs : If template is missing args\n ScriptPathError : Unable to write submit script out\n \"\"\"\n\n try:\n submit_script = Template(template).substitute(jobname=job_name, **configs)\n # submit_script = Template(template).safe_substitute(jobname=job_name, **configs)\n with open(script_filename, 'w') as f:\n f.write(submit_script)\n\n except KeyError as e:\n logger.error(\"Missing keys for submit script : %s\", e)\n raise (SchedulerMissingArgs(e.args, self.label))\n\n except IOError as e:\n logger.error(\"Failed writing to submit script: %s\", script_filename)\n raise (ScriptPathError(script_filename, e))\n except Exception as e:\n print(\"Template : \", template)\n print(\"Args : \", job_name)\n print(\"Kwargs : \", configs)\n logger.error(\"Uncategorized error: %s\", e)\n raise (e)\n\n return True\n\n @abstractmethod\n def _status(self):\n pass\n\n def status(self, job_ids):\n \"\"\" Get the status of a list of jobs identified by the job identifiers\n returned from the submit request.\n\n Args:\n - job_ids (list) : A list of job identifiers\n\n Returns:\n - A list of JobStatus objects corresponding to each job_id in the job_ids list.\n\n Raises:\n - ExecutionProviderException or its subclasses\n\n \"\"\"\n if job_ids:\n self._status()\n return [self.resources[jid]['status'] for jid in job_ids]\n\n @property\n def label(self):\n return self._label\n", "path": "parsl/providers/cluster_provider.py"}]}
1,869
218
gh_patches_debug_61171
rasdani/github-patches
git_diff
ocadotechnology__aimmo-218
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Socket error: address already in use When running AI:MMO locally, as soon as the main page is loaded, the following error appears in the console. It looks like two workers try to connect to the same port. After this error, the game is executed normally. `Traceback (most recent call last): File "service.py", line 44, in <module> run(host=sys.argv[1], port=int(sys.argv[2]), directory=sys.argv[3]) File "service.py", line 41, in run app.run(host, port) File "/home/ramon/.local/lib/python2.7/site-packages/flask/app.py", line 841, in run run_simple(host, port, self, **options) File "/home/ramon/.local/lib/python2.7/site-packages/werkzeug/serving.py", line 739, in run_simple inner() File "/home/ramon/.local/lib/python2.7/site-packages/werkzeug/serving.py", line 699, in inner fd=fd) File "/home/ramon/.local/lib/python2.7/site-packages/werkzeug/serving.py", line 593, in make_server passthrough_errors, ssl_context, fd=fd) File "/home/ramon/.local/lib/python2.7/site-packages/werkzeug/serving.py", line 504, in __init__ HTTPServer.__init__(self, (host, int(port)), handler) File "/usr/lib/python2.7/SocketServer.py", line 417, in __init__ self.server_bind() File "/usr/lib/python2.7/BaseHTTPServer.py", line 108, in server_bind SocketServer.TCPServer.server_bind(self) File "/usr/lib/python2.7/SocketServer.py", line 431, in server_bind self.socket.bind(self.server_address) File "/usr/lib/python2.7/socket.py", line 228, in meth return getattr(self._sock,name)(*args) socket.error: [Errno 98] Address already in use ` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `aimmo-game/simulation/worker_manager.py` Content: ``` 1 import atexit 2 import itertools 3 import json 4 import logging 5 import os 6 import subprocess 7 import tempfile 8 import threading 9 import time 10 11 import requests 12 from eventlet.greenpool import GreenPool 13 from eventlet.semaphore import Semaphore 14 from pykube import HTTPClient 15 from pykube import KubeConfig 16 from pykube import Pod 17 18 LOGGER = logging.getLogger(__name__) 19 20 21 class _WorkerManagerData(object): 22 """ 23 This class is thread safe 24 """ 25 26 def __init__(self, game_state, user_codes): 27 self._game_state = game_state 28 self._user_codes = user_codes 29 self._lock = Semaphore() 30 31 def _remove_avatar(self, user_id): 32 assert self._lock.locked 33 self._game_state.remove_avatar(user_id) 34 del self._user_codes[user_id] 35 36 def remove_user_if_code_is_different(self, user): 37 with self._lock: 38 existing_code = self._user_codes.get(user['id'], None) 39 if existing_code != user['code']: 40 # Remove avatar from the game, so it stops being called for turns 41 if existing_code is not None: 42 self._remove_avatar(user['id']) 43 return True 44 else: 45 return False 46 47 def add_avatar(self, user, worker_url): 48 with self._lock: 49 # Add avatar back into game 50 self._game_state.add_avatar( 51 user_id=user['id'], worker_url="%s/turn/" % worker_url) 52 53 def set_code(self, user): 54 with self._lock: 55 self._user_codes[user['id']] = user['code'] 56 57 def get_code(self, player_id): 58 with self._lock: 59 return self._user_codes[player_id] 60 61 def remove_unknown_avatars(self, known_user_ids): 62 with self._lock: 63 unknown_user_ids = set(self._user_codes) - frozenset(known_user_ids) 64 for u in unknown_user_ids: 65 self._remove_avatar(u) 66 return unknown_user_ids 67 68 def set_main_avatar(self, avatar_id): 69 with self._lock: 70 self._game_state.main_avatar_id = avatar_id 71 72 73 class WorkerManager(threading.Thread): 74 """ 75 Methods of this class must be thread safe unless explicitly stated. 76 """ 77 daemon = True 78 79 def __init__(self, game_state, users_url, port=5000): 80 """ 81 82 :param thread_pool: 83 """ 84 self._data = _WorkerManagerData(game_state, {}) 85 self.users_url = users_url 86 self._pool = GreenPool(size=3) 87 self.port = port 88 super(WorkerManager, self).__init__() 89 90 def get_code(self, player_id): 91 return self._data.get_code(player_id) 92 93 def get_persistent_state(self, player_id): 94 """Get the persistent state for a worker.""" 95 96 return None 97 98 def create_worker(self, player_id): 99 """Create a worker.""" 100 101 raise NotImplemented 102 103 def remove_worker(self, player_id): 104 """Remove a worker for the given player.""" 105 106 raise NotImplemented 107 108 # TODO handle failure 109 def spawn(self, user): 110 # Get persistent state from worker 111 persistent_state = self.get_persistent_state(user['id']) # noqa: F841 112 113 # Kill worker 114 LOGGER.info("Removing worker for user %s" % user['id']) 115 self.remove_worker(user['id']) 116 117 self._data.set_code(user) 118 119 # Spawn worker 120 LOGGER.info("Spawning worker for user %s" % user['id']) 121 worker_url = self.create_worker(user['id']) 122 123 # Add avatar back into game 124 self._data.add_avatar(user, worker_url) 125 LOGGER.info('Added user %s', user['id']) 126 127 def _parallel_map(self, func, iterable_args): 128 list(self._pool.imap(func, iterable_args)) 129 130 def update(self): 131 try: 132 LOGGER.info("Waking up") 133 game_data = requests.get(self.users_url).json() 134 except (requests.RequestException, ValueError) as err: 135 LOGGER.error("Failed to obtain game data : %s", err) 136 else: 137 game = game_data['main'] 138 139 # Remove users with different code 140 users_to_add = [] 141 for user in game['users']: 142 if self._data.remove_user_if_code_is_different(user): 143 users_to_add.append(user) 144 LOGGER.debug("Need to add users: %s" % [x['id'] for x in users_to_add]) 145 146 # Add missing users 147 self._parallel_map(self.spawn, users_to_add) 148 149 # Delete extra users 150 known_avatars = set(user['id'] for user in game['users']) 151 removed_user_ids = self._data.remove_unknown_avatars(known_avatars) 152 LOGGER.debug("Removing users: %s" % removed_user_ids) 153 self._parallel_map(self.remove_worker, removed_user_ids) 154 155 # Update main avatar 156 self._data.set_main_avatar(game_data['main']['main_avatar']) 157 158 def run(self): 159 while True: 160 self.update() 161 LOGGER.info("Sleeping") 162 time.sleep(10) 163 164 165 class LocalWorkerManager(WorkerManager): 166 """Relies on them already being created already.""" 167 168 host = '127.0.0.1' 169 worker_directory = os.path.join( 170 os.path.dirname(__file__), 171 '../../aimmo-game-worker/', 172 ) 173 174 def __init__(self, *args, **kwargs): 175 self.workers = {} 176 self.port_counter = itertools.count(1989) 177 super(LocalWorkerManager, self).__init__(*args, **kwargs) 178 179 def create_worker(self, player_id): 180 assert(player_id not in self.workers) 181 port = self.port_counter.next() 182 env = os.environ.copy() 183 data_dir = tempfile.mkdtemp() 184 185 LOGGER.debug('Data dir is %s', data_dir) 186 data = requests.get("http://127.0.0.1:{}/player/{}".format(self.port, player_id)).json() 187 188 options = data['options'] 189 with open('{}/options.json'.format(data_dir), 'w') as options_file: 190 json.dump(options, options_file) 191 192 code = data['code'] 193 with open('{}/avatar.py'.format(data_dir), 'w') as avatar_file: 194 avatar_file.write(code) 195 196 env['PYTHONPATH'] = data_dir 197 198 process = subprocess.Popen(['python', 'service.py', self.host, str(port), str(data_dir)], cwd=self.worker_directory, env=env) 199 atexit.register(process.kill) 200 self.workers[player_id] = process 201 worker_url = 'http://%s:%d' % ( 202 self.host, 203 port, 204 ) 205 LOGGER.info("Worker started for %s, listening at %s", player_id, worker_url) 206 return worker_url 207 208 def remove_worker(self, player_id): 209 if player_id in self.workers: 210 self.workers[player_id].kill() 211 del self.workers[player_id] 212 213 214 class KubernetesWorkerManager(WorkerManager): 215 """Kubernetes worker manager.""" 216 217 def __init__(self, *args, **kwargs): 218 self.api = HTTPClient(KubeConfig.from_service_account()) 219 self.game_id = os.environ['GAME_ID'] 220 self.game_url = os.environ['GAME_URL'] 221 super(KubernetesWorkerManager, self).__init__(*args, **kwargs) 222 223 def create_worker(self, player_id): 224 pod = Pod( 225 self.api, 226 { 227 'kind': 'Pod', 228 'apiVersion': 'v1', 229 'metadata': { 230 'generateName': "aimmo-%s-worker-%s-" % (self.game_id, player_id), 231 'labels': { 232 'app': 'aimmo-game-worker', 233 'game': self.game_id, 234 'player': str(player_id), 235 }, 236 }, 237 'spec': { 238 'containers': [ 239 { 240 'env': [ 241 { 242 'name': 'DATA_URL', 243 'value': "%s/player/%d" % (self.game_url, player_id), 244 }, 245 ], 246 'name': 'aimmo-game-worker', 247 'image': 'ocadotechnology/aimmo-game-worker:%s' % os.environ.get('IMAGE_SUFFIX', 'latest'), 248 'ports': [ 249 { 250 'containerPort': 5000, 251 'protocol': 'TCP' 252 } 253 ], 254 'resources': { 255 'limits': { 256 'cpu': '10m', 257 'memory': '64Mi', 258 }, 259 }, 260 }, 261 ], 262 }, 263 } 264 ) 265 pod.create() 266 iterations = 0 267 while pod.obj['status']['phase'] == 'Pending': 268 if iterations > 30: 269 raise EnvironmentError('Could not start worker %s, details %s' % (player_id, pod.obj)) 270 LOGGER.debug('Waiting for worker %s', player_id) 271 time.sleep(5) 272 pod.reload() 273 iterations += 1 274 worker_url = "http://%s:5000" % pod.obj['status']['podIP'] 275 LOGGER.info("Worker started for %s, listening at %s", player_id, worker_url) 276 return worker_url 277 278 def remove_worker(self, player_id): 279 for pod in Pod.objects(self.api).filter(selector={ 280 'app': 'aimmo-game-worker', 281 'game': self.game_id, 282 'player': str(player_id), 283 }): 284 LOGGER.debug('Removing pod %s', pod.obj['spec']) 285 pod.delete() 286 287 WORKER_MANAGERS = { 288 'local': LocalWorkerManager, 289 'kubernetes': KubernetesWorkerManager, 290 } 291 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/aimmo-game/simulation/worker_manager.py b/aimmo-game/simulation/worker_manager.py --- a/aimmo-game/simulation/worker_manager.py +++ b/aimmo-game/simulation/worker_manager.py @@ -173,7 +173,7 @@ def __init__(self, *args, **kwargs): self.workers = {} - self.port_counter = itertools.count(1989) + self.port_counter = itertools.count(self.port + 10) super(LocalWorkerManager, self).__init__(*args, **kwargs) def create_worker(self, player_id):
{"golden_diff": "diff --git a/aimmo-game/simulation/worker_manager.py b/aimmo-game/simulation/worker_manager.py\n--- a/aimmo-game/simulation/worker_manager.py\n+++ b/aimmo-game/simulation/worker_manager.py\n@@ -173,7 +173,7 @@\n \n def __init__(self, *args, **kwargs):\n self.workers = {}\n- self.port_counter = itertools.count(1989)\n+ self.port_counter = itertools.count(self.port + 10)\n super(LocalWorkerManager, self).__init__(*args, **kwargs)\n \n def create_worker(self, player_id):\n", "issue": "Socket error: address already in use\nWhen running AI:MMO locally, as soon as the main page is loaded, the following error appears in the console. It looks like two workers try to connect to the same port. After this error, the game is executed normally.\r\n\r\n`Traceback (most recent call last):\r\n File \"service.py\", line 44, in <module>\r\n run(host=sys.argv[1], port=int(sys.argv[2]), directory=sys.argv[3])\r\n File \"service.py\", line 41, in run\r\n app.run(host, port)\r\n File \"/home/ramon/.local/lib/python2.7/site-packages/flask/app.py\", line 841, in run\r\n run_simple(host, port, self, **options)\r\n File \"/home/ramon/.local/lib/python2.7/site-packages/werkzeug/serving.py\", line 739, in run_simple\r\n inner()\r\n File \"/home/ramon/.local/lib/python2.7/site-packages/werkzeug/serving.py\", line 699, in inner\r\n fd=fd)\r\n File \"/home/ramon/.local/lib/python2.7/site-packages/werkzeug/serving.py\", line 593, in make_server\r\n passthrough_errors, ssl_context, fd=fd)\r\n File \"/home/ramon/.local/lib/python2.7/site-packages/werkzeug/serving.py\", line 504, in __init__\r\n HTTPServer.__init__(self, (host, int(port)), handler)\r\n File \"/usr/lib/python2.7/SocketServer.py\", line 417, in __init__\r\n self.server_bind()\r\n File \"/usr/lib/python2.7/BaseHTTPServer.py\", line 108, in server_bind\r\n SocketServer.TCPServer.server_bind(self)\r\n File \"/usr/lib/python2.7/SocketServer.py\", line 431, in server_bind\r\n self.socket.bind(self.server_address)\r\n File \"/usr/lib/python2.7/socket.py\", line 228, in meth\r\n return getattr(self._sock,name)(*args)\r\nsocket.error: [Errno 98] Address already in use\r\n`\n", "before_files": [{"content": "import atexit\nimport itertools\nimport json\nimport logging\nimport os\nimport subprocess\nimport tempfile\nimport threading\nimport time\n\nimport requests\nfrom eventlet.greenpool import GreenPool\nfrom eventlet.semaphore import Semaphore\nfrom pykube import HTTPClient\nfrom pykube import KubeConfig\nfrom pykube import Pod\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass _WorkerManagerData(object):\n \"\"\"\n This class is thread safe\n \"\"\"\n\n def __init__(self, game_state, user_codes):\n self._game_state = game_state\n self._user_codes = user_codes\n self._lock = Semaphore()\n\n def _remove_avatar(self, user_id):\n assert self._lock.locked\n self._game_state.remove_avatar(user_id)\n del self._user_codes[user_id]\n\n def remove_user_if_code_is_different(self, user):\n with self._lock:\n existing_code = self._user_codes.get(user['id'], None)\n if existing_code != user['code']:\n # Remove avatar from the game, so it stops being called for turns\n if existing_code is not None:\n self._remove_avatar(user['id'])\n return True\n else:\n return False\n\n def add_avatar(self, user, worker_url):\n with self._lock:\n # Add avatar back into game\n self._game_state.add_avatar(\n user_id=user['id'], worker_url=\"%s/turn/\" % worker_url)\n\n def set_code(self, user):\n with self._lock:\n self._user_codes[user['id']] = user['code']\n\n def get_code(self, player_id):\n with self._lock:\n return self._user_codes[player_id]\n\n def remove_unknown_avatars(self, known_user_ids):\n with self._lock:\n unknown_user_ids = set(self._user_codes) - frozenset(known_user_ids)\n for u in unknown_user_ids:\n self._remove_avatar(u)\n return unknown_user_ids\n\n def set_main_avatar(self, avatar_id):\n with self._lock:\n self._game_state.main_avatar_id = avatar_id\n\n\nclass WorkerManager(threading.Thread):\n \"\"\"\n Methods of this class must be thread safe unless explicitly stated.\n \"\"\"\n daemon = True\n\n def __init__(self, game_state, users_url, port=5000):\n \"\"\"\n\n :param thread_pool:\n \"\"\"\n self._data = _WorkerManagerData(game_state, {})\n self.users_url = users_url\n self._pool = GreenPool(size=3)\n self.port = port\n super(WorkerManager, self).__init__()\n\n def get_code(self, player_id):\n return self._data.get_code(player_id)\n\n def get_persistent_state(self, player_id):\n \"\"\"Get the persistent state for a worker.\"\"\"\n\n return None\n\n def create_worker(self, player_id):\n \"\"\"Create a worker.\"\"\"\n\n raise NotImplemented\n\n def remove_worker(self, player_id):\n \"\"\"Remove a worker for the given player.\"\"\"\n\n raise NotImplemented\n\n # TODO handle failure\n def spawn(self, user):\n # Get persistent state from worker\n persistent_state = self.get_persistent_state(user['id']) # noqa: F841\n\n # Kill worker\n LOGGER.info(\"Removing worker for user %s\" % user['id'])\n self.remove_worker(user['id'])\n\n self._data.set_code(user)\n\n # Spawn worker\n LOGGER.info(\"Spawning worker for user %s\" % user['id'])\n worker_url = self.create_worker(user['id'])\n\n # Add avatar back into game\n self._data.add_avatar(user, worker_url)\n LOGGER.info('Added user %s', user['id'])\n\n def _parallel_map(self, func, iterable_args):\n list(self._pool.imap(func, iterable_args))\n\n def update(self):\n try:\n LOGGER.info(\"Waking up\")\n game_data = requests.get(self.users_url).json()\n except (requests.RequestException, ValueError) as err:\n LOGGER.error(\"Failed to obtain game data : %s\", err)\n else:\n game = game_data['main']\n\n # Remove users with different code\n users_to_add = []\n for user in game['users']:\n if self._data.remove_user_if_code_is_different(user):\n users_to_add.append(user)\n LOGGER.debug(\"Need to add users: %s\" % [x['id'] for x in users_to_add])\n\n # Add missing users\n self._parallel_map(self.spawn, users_to_add)\n\n # Delete extra users\n known_avatars = set(user['id'] for user in game['users'])\n removed_user_ids = self._data.remove_unknown_avatars(known_avatars)\n LOGGER.debug(\"Removing users: %s\" % removed_user_ids)\n self._parallel_map(self.remove_worker, removed_user_ids)\n\n # Update main avatar\n self._data.set_main_avatar(game_data['main']['main_avatar'])\n\n def run(self):\n while True:\n self.update()\n LOGGER.info(\"Sleeping\")\n time.sleep(10)\n\n\nclass LocalWorkerManager(WorkerManager):\n \"\"\"Relies on them already being created already.\"\"\"\n\n host = '127.0.0.1'\n worker_directory = os.path.join(\n os.path.dirname(__file__),\n '../../aimmo-game-worker/',\n )\n\n def __init__(self, *args, **kwargs):\n self.workers = {}\n self.port_counter = itertools.count(1989)\n super(LocalWorkerManager, self).__init__(*args, **kwargs)\n\n def create_worker(self, player_id):\n assert(player_id not in self.workers)\n port = self.port_counter.next()\n env = os.environ.copy()\n data_dir = tempfile.mkdtemp()\n\n LOGGER.debug('Data dir is %s', data_dir)\n data = requests.get(\"http://127.0.0.1:{}/player/{}\".format(self.port, player_id)).json()\n\n options = data['options']\n with open('{}/options.json'.format(data_dir), 'w') as options_file:\n json.dump(options, options_file)\n\n code = data['code']\n with open('{}/avatar.py'.format(data_dir), 'w') as avatar_file:\n avatar_file.write(code)\n\n env['PYTHONPATH'] = data_dir\n\n process = subprocess.Popen(['python', 'service.py', self.host, str(port), str(data_dir)], cwd=self.worker_directory, env=env)\n atexit.register(process.kill)\n self.workers[player_id] = process\n worker_url = 'http://%s:%d' % (\n self.host,\n port,\n )\n LOGGER.info(\"Worker started for %s, listening at %s\", player_id, worker_url)\n return worker_url\n\n def remove_worker(self, player_id):\n if player_id in self.workers:\n self.workers[player_id].kill()\n del self.workers[player_id]\n\n\nclass KubernetesWorkerManager(WorkerManager):\n \"\"\"Kubernetes worker manager.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.api = HTTPClient(KubeConfig.from_service_account())\n self.game_id = os.environ['GAME_ID']\n self.game_url = os.environ['GAME_URL']\n super(KubernetesWorkerManager, self).__init__(*args, **kwargs)\n\n def create_worker(self, player_id):\n pod = Pod(\n self.api,\n {\n 'kind': 'Pod',\n 'apiVersion': 'v1',\n 'metadata': {\n 'generateName': \"aimmo-%s-worker-%s-\" % (self.game_id, player_id),\n 'labels': {\n 'app': 'aimmo-game-worker',\n 'game': self.game_id,\n 'player': str(player_id),\n },\n },\n 'spec': {\n 'containers': [\n {\n 'env': [\n {\n 'name': 'DATA_URL',\n 'value': \"%s/player/%d\" % (self.game_url, player_id),\n },\n ],\n 'name': 'aimmo-game-worker',\n 'image': 'ocadotechnology/aimmo-game-worker:%s' % os.environ.get('IMAGE_SUFFIX', 'latest'),\n 'ports': [\n {\n 'containerPort': 5000,\n 'protocol': 'TCP'\n }\n ],\n 'resources': {\n 'limits': {\n 'cpu': '10m',\n 'memory': '64Mi',\n },\n },\n },\n ],\n },\n }\n )\n pod.create()\n iterations = 0\n while pod.obj['status']['phase'] == 'Pending':\n if iterations > 30:\n raise EnvironmentError('Could not start worker %s, details %s' % (player_id, pod.obj))\n LOGGER.debug('Waiting for worker %s', player_id)\n time.sleep(5)\n pod.reload()\n iterations += 1\n worker_url = \"http://%s:5000\" % pod.obj['status']['podIP']\n LOGGER.info(\"Worker started for %s, listening at %s\", player_id, worker_url)\n return worker_url\n\n def remove_worker(self, player_id):\n for pod in Pod.objects(self.api).filter(selector={\n 'app': 'aimmo-game-worker',\n 'game': self.game_id,\n 'player': str(player_id),\n }):\n LOGGER.debug('Removing pod %s', pod.obj['spec'])\n pod.delete()\n\nWORKER_MANAGERS = {\n 'local': LocalWorkerManager,\n 'kubernetes': KubernetesWorkerManager,\n}\n", "path": "aimmo-game/simulation/worker_manager.py"}], "after_files": [{"content": "import atexit\nimport itertools\nimport json\nimport logging\nimport os\nimport subprocess\nimport tempfile\nimport threading\nimport time\n\nimport requests\nfrom eventlet.greenpool import GreenPool\nfrom eventlet.semaphore import Semaphore\nfrom pykube import HTTPClient\nfrom pykube import KubeConfig\nfrom pykube import Pod\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass _WorkerManagerData(object):\n \"\"\"\n This class is thread safe\n \"\"\"\n\n def __init__(self, game_state, user_codes):\n self._game_state = game_state\n self._user_codes = user_codes\n self._lock = Semaphore()\n\n def _remove_avatar(self, user_id):\n assert self._lock.locked\n self._game_state.remove_avatar(user_id)\n del self._user_codes[user_id]\n\n def remove_user_if_code_is_different(self, user):\n with self._lock:\n existing_code = self._user_codes.get(user['id'], None)\n if existing_code != user['code']:\n # Remove avatar from the game, so it stops being called for turns\n if existing_code is not None:\n self._remove_avatar(user['id'])\n return True\n else:\n return False\n\n def add_avatar(self, user, worker_url):\n with self._lock:\n # Add avatar back into game\n self._game_state.add_avatar(\n user_id=user['id'], worker_url=\"%s/turn/\" % worker_url)\n\n def set_code(self, user):\n with self._lock:\n self._user_codes[user['id']] = user['code']\n\n def get_code(self, player_id):\n with self._lock:\n return self._user_codes[player_id]\n\n def remove_unknown_avatars(self, known_user_ids):\n with self._lock:\n unknown_user_ids = set(self._user_codes) - frozenset(known_user_ids)\n for u in unknown_user_ids:\n self._remove_avatar(u)\n return unknown_user_ids\n\n def set_main_avatar(self, avatar_id):\n with self._lock:\n self._game_state.main_avatar_id = avatar_id\n\n\nclass WorkerManager(threading.Thread):\n \"\"\"\n Methods of this class must be thread safe unless explicitly stated.\n \"\"\"\n daemon = True\n\n def __init__(self, game_state, users_url, port=5000):\n \"\"\"\n\n :param thread_pool:\n \"\"\"\n self._data = _WorkerManagerData(game_state, {})\n self.users_url = users_url\n self._pool = GreenPool(size=3)\n self.port = port\n super(WorkerManager, self).__init__()\n\n def get_code(self, player_id):\n return self._data.get_code(player_id)\n\n def get_persistent_state(self, player_id):\n \"\"\"Get the persistent state for a worker.\"\"\"\n\n return None\n\n def create_worker(self, player_id):\n \"\"\"Create a worker.\"\"\"\n\n raise NotImplemented\n\n def remove_worker(self, player_id):\n \"\"\"Remove a worker for the given player.\"\"\"\n\n raise NotImplemented\n\n # TODO handle failure\n def spawn(self, user):\n # Get persistent state from worker\n persistent_state = self.get_persistent_state(user['id']) # noqa: F841\n\n # Kill worker\n LOGGER.info(\"Removing worker for user %s\" % user['id'])\n self.remove_worker(user['id'])\n\n self._data.set_code(user)\n\n # Spawn worker\n LOGGER.info(\"Spawning worker for user %s\" % user['id'])\n worker_url = self.create_worker(user['id'])\n\n # Add avatar back into game\n self._data.add_avatar(user, worker_url)\n LOGGER.info('Added user %s', user['id'])\n\n def _parallel_map(self, func, iterable_args):\n list(self._pool.imap(func, iterable_args))\n\n def update(self):\n try:\n LOGGER.info(\"Waking up\")\n game_data = requests.get(self.users_url).json()\n except (requests.RequestException, ValueError) as err:\n LOGGER.error(\"Failed to obtain game data : %s\", err)\n else:\n game = game_data['main']\n\n # Remove users with different code\n users_to_add = []\n for user in game['users']:\n if self._data.remove_user_if_code_is_different(user):\n users_to_add.append(user)\n LOGGER.debug(\"Need to add users: %s\" % [x['id'] for x in users_to_add])\n\n # Add missing users\n self._parallel_map(self.spawn, users_to_add)\n\n # Delete extra users\n known_avatars = set(user['id'] for user in game['users'])\n removed_user_ids = self._data.remove_unknown_avatars(known_avatars)\n LOGGER.debug(\"Removing users: %s\" % removed_user_ids)\n self._parallel_map(self.remove_worker, removed_user_ids)\n\n # Update main avatar\n self._data.set_main_avatar(game_data['main']['main_avatar'])\n\n def run(self):\n while True:\n self.update()\n LOGGER.info(\"Sleeping\")\n time.sleep(10)\n\n\nclass LocalWorkerManager(WorkerManager):\n \"\"\"Relies on them already being created already.\"\"\"\n\n host = '127.0.0.1'\n worker_directory = os.path.join(\n os.path.dirname(__file__),\n '../../aimmo-game-worker/',\n )\n\n def __init__(self, *args, **kwargs):\n self.workers = {}\n self.port_counter = itertools.count(self.port + 10)\n super(LocalWorkerManager, self).__init__(*args, **kwargs)\n\n def create_worker(self, player_id):\n assert(player_id not in self.workers)\n port = self.port_counter.next()\n env = os.environ.copy()\n data_dir = tempfile.mkdtemp()\n\n LOGGER.debug('Data dir is %s', data_dir)\n data = requests.get(\"http://127.0.0.1:{}/player/{}\".format(self.port, player_id)).json()\n\n options = data['options']\n with open('{}/options.json'.format(data_dir), 'w') as options_file:\n json.dump(options, options_file)\n\n code = data['code']\n with open('{}/avatar.py'.format(data_dir), 'w') as avatar_file:\n avatar_file.write(code)\n\n env['PYTHONPATH'] = data_dir\n\n process = subprocess.Popen(['python', 'service.py', self.host, str(port), str(data_dir)], cwd=self.worker_directory, env=env)\n atexit.register(process.kill)\n self.workers[player_id] = process\n worker_url = 'http://%s:%d' % (\n self.host,\n port,\n )\n LOGGER.info(\"Worker started for %s, listening at %s\", player_id, worker_url)\n return worker_url\n\n def remove_worker(self, player_id):\n if player_id in self.workers:\n self.workers[player_id].kill()\n del self.workers[player_id]\n\n\nclass KubernetesWorkerManager(WorkerManager):\n \"\"\"Kubernetes worker manager.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.api = HTTPClient(KubeConfig.from_service_account())\n self.game_id = os.environ['GAME_ID']\n self.game_url = os.environ['GAME_URL']\n super(KubernetesWorkerManager, self).__init__(*args, **kwargs)\n\n def create_worker(self, player_id):\n pod = Pod(\n self.api,\n {\n 'kind': 'Pod',\n 'apiVersion': 'v1',\n 'metadata': {\n 'generateName': \"aimmo-%s-worker-%s-\" % (self.game_id, player_id),\n 'labels': {\n 'app': 'aimmo-game-worker',\n 'game': self.game_id,\n 'player': str(player_id),\n },\n },\n 'spec': {\n 'containers': [\n {\n 'env': [\n {\n 'name': 'DATA_URL',\n 'value': \"%s/player/%d\" % (self.game_url, player_id),\n },\n ],\n 'name': 'aimmo-game-worker',\n 'image': 'ocadotechnology/aimmo-game-worker:%s' % os.environ.get('IMAGE_SUFFIX', 'latest'),\n 'ports': [\n {\n 'containerPort': 5000,\n 'protocol': 'TCP'\n }\n ],\n 'resources': {\n 'limits': {\n 'cpu': '10m',\n 'memory': '64Mi',\n },\n },\n },\n ],\n },\n }\n )\n pod.create()\n iterations = 0\n while pod.obj['status']['phase'] == 'Pending':\n if iterations > 30:\n raise EnvironmentError('Could not start worker %s, details %s' % (player_id, pod.obj))\n LOGGER.debug('Waiting for worker %s', player_id)\n time.sleep(5)\n pod.reload()\n iterations += 1\n worker_url = \"http://%s:5000\" % pod.obj['status']['podIP']\n LOGGER.info(\"Worker started for %s, listening at %s\", player_id, worker_url)\n return worker_url\n\n def remove_worker(self, player_id):\n for pod in Pod.objects(self.api).filter(selector={\n 'app': 'aimmo-game-worker',\n 'game': self.game_id,\n 'player': str(player_id),\n }):\n LOGGER.debug('Removing pod %s', pod.obj['spec'])\n pod.delete()\n\nWORKER_MANAGERS = {\n 'local': LocalWorkerManager,\n 'kubernetes': KubernetesWorkerManager,\n}\n", "path": "aimmo-game/simulation/worker_manager.py"}]}
3,589
140
gh_patches_debug_939
rasdani/github-patches
git_diff
apache__airflow-28730
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CSRF token should be expire with session ### Apache Airflow version 2.5.0 ### What happened In the default configuration, the CSRF token [expires in one hour](https://pythonhosted.org/Flask-WTF/config.html#forms-and-csrf). This setting leads to frequent errors in the UI – for no good reason. ### What you think should happen instead A short expiration date for the CSRF token is not the right value in my view and I [agree with this answer](https://security.stackexchange.com/a/56520/22108) that the CSRF token should basically never expire, instead pegging itself to the current session. That is, the CSRF token should last as long as the current session. The easiest way to accomplish this is by generating the CSRF token from the session id. ### How to reproduce _No response_ ### Operating System Linux ### Versions of Apache Airflow Providers _No response_ ### Deployment Official Apache Airflow Helm Chart ### Deployment details _No response_ ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `airflow/config_templates/default_webserver_config.py` Content: ``` 1 # 2 # Licensed to the Apache Software Foundation (ASF) under one 3 # or more contributor license agreements. See the NOTICE file 4 # distributed with this work for additional information 5 # regarding copyright ownership. The ASF licenses this file 6 # to you under the Apache License, Version 2.0 (the 7 # "License"); you may not use this file except in compliance 8 # with the License. You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, 13 # software distributed under the License is distributed on an 14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 # KIND, either express or implied. See the License for the 16 # specific language governing permissions and limitations 17 # under the License. 18 """Default configuration for the Airflow webserver.""" 19 from __future__ import annotations 20 21 import os 22 23 from airflow.www.fab_security.manager import AUTH_DB 24 25 # from airflow.www.fab_security.manager import AUTH_LDAP 26 # from airflow.www.fab_security.manager import AUTH_OAUTH 27 # from airflow.www.fab_security.manager import AUTH_OID 28 # from airflow.www.fab_security.manager import AUTH_REMOTE_USER 29 30 31 basedir = os.path.abspath(os.path.dirname(__file__)) 32 33 # Flask-WTF flag for CSRF 34 WTF_CSRF_ENABLED = True 35 36 # ---------------------------------------------------- 37 # AUTHENTICATION CONFIG 38 # ---------------------------------------------------- 39 # For details on how to set up each of the following authentication, see 40 # http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods 41 # for details. 42 43 # The authentication type 44 # AUTH_OID : Is for OpenID 45 # AUTH_DB : Is for database 46 # AUTH_LDAP : Is for LDAP 47 # AUTH_REMOTE_USER : Is for using REMOTE_USER from web server 48 # AUTH_OAUTH : Is for OAuth 49 AUTH_TYPE = AUTH_DB 50 51 # Uncomment to setup Full admin role name 52 # AUTH_ROLE_ADMIN = 'Admin' 53 54 # Uncomment and set to desired role to enable access without authentication 55 # AUTH_ROLE_PUBLIC = 'Viewer' 56 57 # Will allow user self registration 58 # AUTH_USER_REGISTRATION = True 59 60 # The recaptcha it's automatically enabled for user self registration is active and the keys are necessary 61 # RECAPTCHA_PRIVATE_KEY = PRIVATE_KEY 62 # RECAPTCHA_PUBLIC_KEY = PUBLIC_KEY 63 64 # Config for Flask-Mail necessary for user self registration 65 # MAIL_SERVER = 'smtp.gmail.com' 66 # MAIL_USE_TLS = True 67 # MAIL_USERNAME = '[email protected]' 68 # MAIL_PASSWORD = 'passwordformail' 69 # MAIL_DEFAULT_SENDER = '[email protected]' 70 71 # The default user self registration role 72 # AUTH_USER_REGISTRATION_ROLE = "Public" 73 74 # When using OAuth Auth, uncomment to setup provider(s) info 75 # Google OAuth example: 76 # OAUTH_PROVIDERS = [{ 77 # 'name':'google', 78 # 'token_key':'access_token', 79 # 'icon':'fa-google', 80 # 'remote_app': { 81 # 'api_base_url':'https://www.googleapis.com/oauth2/v2/', 82 # 'client_kwargs':{ 83 # 'scope': 'email profile' 84 # }, 85 # 'access_token_url':'https://accounts.google.com/o/oauth2/token', 86 # 'authorize_url':'https://accounts.google.com/o/oauth2/auth', 87 # 'request_token_url': None, 88 # 'client_id': GOOGLE_KEY, 89 # 'client_secret': GOOGLE_SECRET_KEY, 90 # } 91 # }] 92 93 # When using LDAP Auth, setup the ldap server 94 # AUTH_LDAP_SERVER = "ldap://ldapserver.new" 95 96 # When using OpenID Auth, uncomment to setup OpenID providers. 97 # example for OpenID authentication 98 # OPENID_PROVIDERS = [ 99 # { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' }, 100 # { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' }, 101 # { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' }, 102 # { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }] 103 104 # ---------------------------------------------------- 105 # Theme CONFIG 106 # ---------------------------------------------------- 107 # Flask App Builder comes up with a number of predefined themes 108 # that you can use for Apache Airflow. 109 # http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes 110 # Please make sure to remove "navbar_color" configuration from airflow.cfg 111 # in order to fully utilize the theme. (or use that property in conjunction with theme) 112 # APP_THEME = "bootstrap-theme.css" # default bootstrap 113 # APP_THEME = "amelia.css" 114 # APP_THEME = "cerulean.css" 115 # APP_THEME = "cosmo.css" 116 # APP_THEME = "cyborg.css" 117 # APP_THEME = "darkly.css" 118 # APP_THEME = "flatly.css" 119 # APP_THEME = "journal.css" 120 # APP_THEME = "lumen.css" 121 # APP_THEME = "paper.css" 122 # APP_THEME = "readable.css" 123 # APP_THEME = "sandstone.css" 124 # APP_THEME = "simplex.css" 125 # APP_THEME = "slate.css" 126 # APP_THEME = "solar.css" 127 # APP_THEME = "spacelab.css" 128 # APP_THEME = "superhero.css" 129 # APP_THEME = "united.css" 130 # APP_THEME = "yeti.css" 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/airflow/config_templates/default_webserver_config.py b/airflow/config_templates/default_webserver_config.py --- a/airflow/config_templates/default_webserver_config.py +++ b/airflow/config_templates/default_webserver_config.py @@ -32,6 +32,7 @@ # Flask-WTF flag for CSRF WTF_CSRF_ENABLED = True +WTF_CSRF_TIME_LIMIT = None # ---------------------------------------------------- # AUTHENTICATION CONFIG
{"golden_diff": "diff --git a/airflow/config_templates/default_webserver_config.py b/airflow/config_templates/default_webserver_config.py\n--- a/airflow/config_templates/default_webserver_config.py\n+++ b/airflow/config_templates/default_webserver_config.py\n@@ -32,6 +32,7 @@\n \n # Flask-WTF flag for CSRF\n WTF_CSRF_ENABLED = True\n+WTF_CSRF_TIME_LIMIT = None\n \n # ----------------------------------------------------\n # AUTHENTICATION CONFIG\n", "issue": "CSRF token should be expire with session\n### Apache Airflow version\n\n2.5.0\n\n### What happened\n\nIn the default configuration, the CSRF token [expires in one hour](https://pythonhosted.org/Flask-WTF/config.html#forms-and-csrf). This setting leads to frequent errors in the UI \u2013 for no good reason.\r\n\n\n### What you think should happen instead\n\nA short expiration date for the CSRF token is not the right value in my view and I [agree with this answer](https://security.stackexchange.com/a/56520/22108) that the CSRF token should basically never expire, instead pegging itself to the current session.\r\n\r\nThat is, the CSRF token should last as long as the current session. The easiest way to accomplish this is by generating the CSRF token from the session id.\r\n\r\n\n\n### How to reproduce\n\n_No response_\n\n### Operating System\n\nLinux\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nOfficial Apache Airflow Helm Chart\n\n### Deployment details\n\n_No response_\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [ ] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default configuration for the Airflow webserver.\"\"\"\nfrom __future__ import annotations\n\nimport os\n\nfrom airflow.www.fab_security.manager import AUTH_DB\n\n# from airflow.www.fab_security.manager import AUTH_LDAP\n# from airflow.www.fab_security.manager import AUTH_OAUTH\n# from airflow.www.fab_security.manager import AUTH_OID\n# from airflow.www.fab_security.manager import AUTH_REMOTE_USER\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Flask-WTF flag for CSRF\nWTF_CSRF_ENABLED = True\n\n# ----------------------------------------------------\n# AUTHENTICATION CONFIG\n# ----------------------------------------------------\n# For details on how to set up each of the following authentication, see\n# http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods\n# for details.\n\n# The authentication type\n# AUTH_OID : Is for OpenID\n# AUTH_DB : Is for database\n# AUTH_LDAP : Is for LDAP\n# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server\n# AUTH_OAUTH : Is for OAuth\nAUTH_TYPE = AUTH_DB\n\n# Uncomment to setup Full admin role name\n# AUTH_ROLE_ADMIN = 'Admin'\n\n# Uncomment and set to desired role to enable access without authentication\n# AUTH_ROLE_PUBLIC = 'Viewer'\n\n# Will allow user self registration\n# AUTH_USER_REGISTRATION = True\n\n# The recaptcha it's automatically enabled for user self registration is active and the keys are necessary\n# RECAPTCHA_PRIVATE_KEY = PRIVATE_KEY\n# RECAPTCHA_PUBLIC_KEY = PUBLIC_KEY\n\n# Config for Flask-Mail necessary for user self registration\n# MAIL_SERVER = 'smtp.gmail.com'\n# MAIL_USE_TLS = True\n# MAIL_USERNAME = '[email protected]'\n# MAIL_PASSWORD = 'passwordformail'\n# MAIL_DEFAULT_SENDER = '[email protected]'\n\n# The default user self registration role\n# AUTH_USER_REGISTRATION_ROLE = \"Public\"\n\n# When using OAuth Auth, uncomment to setup provider(s) info\n# Google OAuth example:\n# OAUTH_PROVIDERS = [{\n# 'name':'google',\n# 'token_key':'access_token',\n# 'icon':'fa-google',\n# 'remote_app': {\n# 'api_base_url':'https://www.googleapis.com/oauth2/v2/',\n# 'client_kwargs':{\n# 'scope': 'email profile'\n# },\n# 'access_token_url':'https://accounts.google.com/o/oauth2/token',\n# 'authorize_url':'https://accounts.google.com/o/oauth2/auth',\n# 'request_token_url': None,\n# 'client_id': GOOGLE_KEY,\n# 'client_secret': GOOGLE_SECRET_KEY,\n# }\n# }]\n\n# When using LDAP Auth, setup the ldap server\n# AUTH_LDAP_SERVER = \"ldap://ldapserver.new\"\n\n# When using OpenID Auth, uncomment to setup OpenID providers.\n# example for OpenID authentication\n# OPENID_PROVIDERS = [\n# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },\n# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },\n# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },\n# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]\n\n# ----------------------------------------------------\n# Theme CONFIG\n# ----------------------------------------------------\n# Flask App Builder comes up with a number of predefined themes\n# that you can use for Apache Airflow.\n# http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes\n# Please make sure to remove \"navbar_color\" configuration from airflow.cfg\n# in order to fully utilize the theme. (or use that property in conjunction with theme)\n# APP_THEME = \"bootstrap-theme.css\" # default bootstrap\n# APP_THEME = \"amelia.css\"\n# APP_THEME = \"cerulean.css\"\n# APP_THEME = \"cosmo.css\"\n# APP_THEME = \"cyborg.css\"\n# APP_THEME = \"darkly.css\"\n# APP_THEME = \"flatly.css\"\n# APP_THEME = \"journal.css\"\n# APP_THEME = \"lumen.css\"\n# APP_THEME = \"paper.css\"\n# APP_THEME = \"readable.css\"\n# APP_THEME = \"sandstone.css\"\n# APP_THEME = \"simplex.css\"\n# APP_THEME = \"slate.css\"\n# APP_THEME = \"solar.css\"\n# APP_THEME = \"spacelab.css\"\n# APP_THEME = \"superhero.css\"\n# APP_THEME = \"united.css\"\n# APP_THEME = \"yeti.css\"\n", "path": "airflow/config_templates/default_webserver_config.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default configuration for the Airflow webserver.\"\"\"\nfrom __future__ import annotations\n\nimport os\n\nfrom airflow.www.fab_security.manager import AUTH_DB\n\n# from airflow.www.fab_security.manager import AUTH_LDAP\n# from airflow.www.fab_security.manager import AUTH_OAUTH\n# from airflow.www.fab_security.manager import AUTH_OID\n# from airflow.www.fab_security.manager import AUTH_REMOTE_USER\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Flask-WTF flag for CSRF\nWTF_CSRF_ENABLED = True\nWTF_CSRF_TIME_LIMIT = None\n\n# ----------------------------------------------------\n# AUTHENTICATION CONFIG\n# ----------------------------------------------------\n# For details on how to set up each of the following authentication, see\n# http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods\n# for details.\n\n# The authentication type\n# AUTH_OID : Is for OpenID\n# AUTH_DB : Is for database\n# AUTH_LDAP : Is for LDAP\n# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server\n# AUTH_OAUTH : Is for OAuth\nAUTH_TYPE = AUTH_DB\n\n# Uncomment to setup Full admin role name\n# AUTH_ROLE_ADMIN = 'Admin'\n\n# Uncomment and set to desired role to enable access without authentication\n# AUTH_ROLE_PUBLIC = 'Viewer'\n\n# Will allow user self registration\n# AUTH_USER_REGISTRATION = True\n\n# The recaptcha it's automatically enabled for user self registration is active and the keys are necessary\n# RECAPTCHA_PRIVATE_KEY = PRIVATE_KEY\n# RECAPTCHA_PUBLIC_KEY = PUBLIC_KEY\n\n# Config for Flask-Mail necessary for user self registration\n# MAIL_SERVER = 'smtp.gmail.com'\n# MAIL_USE_TLS = True\n# MAIL_USERNAME = '[email protected]'\n# MAIL_PASSWORD = 'passwordformail'\n# MAIL_DEFAULT_SENDER = '[email protected]'\n\n# The default user self registration role\n# AUTH_USER_REGISTRATION_ROLE = \"Public\"\n\n# When using OAuth Auth, uncomment to setup provider(s) info\n# Google OAuth example:\n# OAUTH_PROVIDERS = [{\n# 'name':'google',\n# 'token_key':'access_token',\n# 'icon':'fa-google',\n# 'remote_app': {\n# 'api_base_url':'https://www.googleapis.com/oauth2/v2/',\n# 'client_kwargs':{\n# 'scope': 'email profile'\n# },\n# 'access_token_url':'https://accounts.google.com/o/oauth2/token',\n# 'authorize_url':'https://accounts.google.com/o/oauth2/auth',\n# 'request_token_url': None,\n# 'client_id': GOOGLE_KEY,\n# 'client_secret': GOOGLE_SECRET_KEY,\n# }\n# }]\n\n# When using LDAP Auth, setup the ldap server\n# AUTH_LDAP_SERVER = \"ldap://ldapserver.new\"\n\n# When using OpenID Auth, uncomment to setup OpenID providers.\n# example for OpenID authentication\n# OPENID_PROVIDERS = [\n# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },\n# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },\n# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },\n# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]\n\n# ----------------------------------------------------\n# Theme CONFIG\n# ----------------------------------------------------\n# Flask App Builder comes up with a number of predefined themes\n# that you can use for Apache Airflow.\n# http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes\n# Please make sure to remove \"navbar_color\" configuration from airflow.cfg\n# in order to fully utilize the theme. (or use that property in conjunction with theme)\n# APP_THEME = \"bootstrap-theme.css\" # default bootstrap\n# APP_THEME = \"amelia.css\"\n# APP_THEME = \"cerulean.css\"\n# APP_THEME = \"cosmo.css\"\n# APP_THEME = \"cyborg.css\"\n# APP_THEME = \"darkly.css\"\n# APP_THEME = \"flatly.css\"\n# APP_THEME = \"journal.css\"\n# APP_THEME = \"lumen.css\"\n# APP_THEME = \"paper.css\"\n# APP_THEME = \"readable.css\"\n# APP_THEME = \"sandstone.css\"\n# APP_THEME = \"simplex.css\"\n# APP_THEME = \"slate.css\"\n# APP_THEME = \"solar.css\"\n# APP_THEME = \"spacelab.css\"\n# APP_THEME = \"superhero.css\"\n# APP_THEME = \"united.css\"\n# APP_THEME = \"yeti.css\"\n", "path": "airflow/config_templates/default_webserver_config.py"}]}
1,985
97
gh_patches_debug_17919
rasdani/github-patches
git_diff
googleapis__google-cloud-python-4061
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spelling Error (_medical_likeliehood) _medical_likeliehood -> _medical_likelihood https://github.com/GoogleCloudPlatform/google-cloud-python/blob/b28a4eb667ae08c3f4dcf9af891ed4931884989c/vision/google/cloud/vision/safe_search.py#L43 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vision/google/cloud/vision/safe_search.py` Content: ``` 1 # Copyright 2017 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Safe search class for information returned from annotating an image.""" 16 17 from google.cloud.vision.likelihood import _get_pb_likelihood 18 from google.cloud.vision.likelihood import Likelihood 19 20 21 class SafeSearchAnnotation(object): 22 """Representation of a SafeSearchAnnotation. 23 24 :type adult_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood` 25 :param adult_likelihood: Likelihood that image contains adult material. 26 27 :type spoof_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood` 28 :param spoof_likelihood: Likelihood that image is a spoof. 29 30 :type medical_likelihood: 31 :class:`~google.cloud.vision.likelihood.Likelihood` 32 :param medical_likelihood: Likelihood that image contains medical material. 33 34 :type violence_likelihood: 35 :class:`~google.cloud.vision.likelihood.Likelihood` 36 :param violence_likelihood: Likelihood that image contains violence. 37 """ 38 39 def __init__(self, adult_likelihood, spoof_likelihood, medical_likelihood, 40 violence_likelihood): 41 self._adult_likelihood = adult_likelihood 42 self._spoof_likelihood = spoof_likelihood 43 self._medical_likeliehood = medical_likelihood 44 self._violence_likelihood = violence_likelihood 45 46 @classmethod 47 def from_api_repr(cls, response): 48 """Factory: construct SafeSearchAnnotation from Vision API response. 49 50 :type response: dict 51 :param response: Dictionary response from Vision API with safe search 52 data. 53 54 :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation` 55 :returns: Instance of ``SafeSearchAnnotation``. 56 """ 57 adult_likelihood = Likelihood[response['adult']] 58 spoof_likelihood = Likelihood[response['spoof']] 59 medical_likelihood = Likelihood[response['medical']] 60 violence_likelihood = Likelihood[response['violence']] 61 62 return cls(adult_likelihood, spoof_likelihood, medical_likelihood, 63 violence_likelihood) 64 65 @classmethod 66 def from_pb(cls, image): 67 """Factory: construct SafeSearchAnnotation from Vision API response. 68 69 :type image: :class:`~google.cloud.vision_v1.proto.\ 70 image_annotator_pb2.SafeSearchAnnotation` 71 :param image: Protobuf response from Vision API with safe search data. 72 73 :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation` 74 :returns: Instance of ``SafeSearchAnnotation``. 75 """ 76 values = [image.adult, image.spoof, image.medical, image.violence] 77 classifications = map(_get_pb_likelihood, values) 78 return cls(*classifications) 79 80 @property 81 def adult(self): 82 """Represents the adult contents likelihood for the image. 83 84 :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` 85 :returns: ``Likelihood`` of the image containing adult content. 86 """ 87 return self._adult_likelihood 88 89 @property 90 def spoof(self): 91 """The likelihood that an obvious modification was made to the image. 92 93 :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` 94 :returns: The ``Likelihood`` that an obvious modification was made to 95 the image's canonical version to make it appear funny or 96 offensive. 97 """ 98 return self._spoof_likelihood 99 100 @property 101 def medical(self): 102 """Likelihood this is a medical image. 103 104 :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` 105 :returns: The ``Likelihood`` that the image is medical in origin. 106 """ 107 return self._medical_likeliehood 108 109 @property 110 def violence(self): 111 """Likeliehood that this image contains violence. 112 113 :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` 114 :returns: The ``Likelihood`` that the image contains violence. 115 """ 116 return self._violence_likelihood 117 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vision/google/cloud/vision/safe_search.py b/vision/google/cloud/vision/safe_search.py --- a/vision/google/cloud/vision/safe_search.py +++ b/vision/google/cloud/vision/safe_search.py @@ -40,7 +40,7 @@ violence_likelihood): self._adult_likelihood = adult_likelihood self._spoof_likelihood = spoof_likelihood - self._medical_likeliehood = medical_likelihood + self._medical_likelihood = medical_likelihood self._violence_likelihood = violence_likelihood @classmethod @@ -104,7 +104,7 @@ :rtype: :class:`~google.cloud.vision.likelihood.Likelihood` :returns: The ``Likelihood`` that the image is medical in origin. """ - return self._medical_likeliehood + return self._medical_likelihood @property def violence(self):
{"golden_diff": "diff --git a/vision/google/cloud/vision/safe_search.py b/vision/google/cloud/vision/safe_search.py\n--- a/vision/google/cloud/vision/safe_search.py\n+++ b/vision/google/cloud/vision/safe_search.py\n@@ -40,7 +40,7 @@\n violence_likelihood):\n self._adult_likelihood = adult_likelihood\n self._spoof_likelihood = spoof_likelihood\n- self._medical_likeliehood = medical_likelihood\n+ self._medical_likelihood = medical_likelihood\n self._violence_likelihood = violence_likelihood\n \n @classmethod\n@@ -104,7 +104,7 @@\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that the image is medical in origin.\n \"\"\"\n- return self._medical_likeliehood\n+ return self._medical_likelihood\n \n @property\n def violence(self):\n", "issue": "Spelling Error (_medical_likeliehood)\n_medical_likeliehood -> _medical_likelihood\r\n\r\nhttps://github.com/GoogleCloudPlatform/google-cloud-python/blob/b28a4eb667ae08c3f4dcf9af891ed4931884989c/vision/google/cloud/vision/safe_search.py#L43\n", "before_files": [{"content": "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Safe search class for information returned from annotating an image.\"\"\"\n\nfrom google.cloud.vision.likelihood import _get_pb_likelihood\nfrom google.cloud.vision.likelihood import Likelihood\n\n\nclass SafeSearchAnnotation(object):\n \"\"\"Representation of a SafeSearchAnnotation.\n\n :type adult_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`\n :param adult_likelihood: Likelihood that image contains adult material.\n\n :type spoof_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`\n :param spoof_likelihood: Likelihood that image is a spoof.\n\n :type medical_likelihood:\n :class:`~google.cloud.vision.likelihood.Likelihood`\n :param medical_likelihood: Likelihood that image contains medical material.\n\n :type violence_likelihood:\n :class:`~google.cloud.vision.likelihood.Likelihood`\n :param violence_likelihood: Likelihood that image contains violence.\n \"\"\"\n\n def __init__(self, adult_likelihood, spoof_likelihood, medical_likelihood,\n violence_likelihood):\n self._adult_likelihood = adult_likelihood\n self._spoof_likelihood = spoof_likelihood\n self._medical_likeliehood = medical_likelihood\n self._violence_likelihood = violence_likelihood\n\n @classmethod\n def from_api_repr(cls, response):\n \"\"\"Factory: construct SafeSearchAnnotation from Vision API response.\n\n :type response: dict\n :param response: Dictionary response from Vision API with safe search\n data.\n\n :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`\n :returns: Instance of ``SafeSearchAnnotation``.\n \"\"\"\n adult_likelihood = Likelihood[response['adult']]\n spoof_likelihood = Likelihood[response['spoof']]\n medical_likelihood = Likelihood[response['medical']]\n violence_likelihood = Likelihood[response['violence']]\n\n return cls(adult_likelihood, spoof_likelihood, medical_likelihood,\n violence_likelihood)\n\n @classmethod\n def from_pb(cls, image):\n \"\"\"Factory: construct SafeSearchAnnotation from Vision API response.\n\n :type image: :class:`~google.cloud.vision_v1.proto.\\\n image_annotator_pb2.SafeSearchAnnotation`\n :param image: Protobuf response from Vision API with safe search data.\n\n :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`\n :returns: Instance of ``SafeSearchAnnotation``.\n \"\"\"\n values = [image.adult, image.spoof, image.medical, image.violence]\n classifications = map(_get_pb_likelihood, values)\n return cls(*classifications)\n\n @property\n def adult(self):\n \"\"\"Represents the adult contents likelihood for the image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: ``Likelihood`` of the image containing adult content.\n \"\"\"\n return self._adult_likelihood\n\n @property\n def spoof(self):\n \"\"\"The likelihood that an obvious modification was made to the image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that an obvious modification was made to\n the image's canonical version to make it appear funny or\n offensive.\n \"\"\"\n return self._spoof_likelihood\n\n @property\n def medical(self):\n \"\"\"Likelihood this is a medical image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that the image is medical in origin.\n \"\"\"\n return self._medical_likeliehood\n\n @property\n def violence(self):\n \"\"\"Likeliehood that this image contains violence.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that the image contains violence.\n \"\"\"\n return self._violence_likelihood\n", "path": "vision/google/cloud/vision/safe_search.py"}], "after_files": [{"content": "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Safe search class for information returned from annotating an image.\"\"\"\n\nfrom google.cloud.vision.likelihood import _get_pb_likelihood\nfrom google.cloud.vision.likelihood import Likelihood\n\n\nclass SafeSearchAnnotation(object):\n \"\"\"Representation of a SafeSearchAnnotation.\n\n :type adult_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`\n :param adult_likelihood: Likelihood that image contains adult material.\n\n :type spoof_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`\n :param spoof_likelihood: Likelihood that image is a spoof.\n\n :type medical_likelihood:\n :class:`~google.cloud.vision.likelihood.Likelihood`\n :param medical_likelihood: Likelihood that image contains medical material.\n\n :type violence_likelihood:\n :class:`~google.cloud.vision.likelihood.Likelihood`\n :param violence_likelihood: Likelihood that image contains violence.\n \"\"\"\n\n def __init__(self, adult_likelihood, spoof_likelihood, medical_likelihood,\n violence_likelihood):\n self._adult_likelihood = adult_likelihood\n self._spoof_likelihood = spoof_likelihood\n self._medical_likelihood = medical_likelihood\n self._violence_likelihood = violence_likelihood\n\n @classmethod\n def from_api_repr(cls, response):\n \"\"\"Factory: construct SafeSearchAnnotation from Vision API response.\n\n :type response: dict\n :param response: Dictionary response from Vision API with safe search\n data.\n\n :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`\n :returns: Instance of ``SafeSearchAnnotation``.\n \"\"\"\n adult_likelihood = Likelihood[response['adult']]\n spoof_likelihood = Likelihood[response['spoof']]\n medical_likelihood = Likelihood[response['medical']]\n violence_likelihood = Likelihood[response['violence']]\n\n return cls(adult_likelihood, spoof_likelihood, medical_likelihood,\n violence_likelihood)\n\n @classmethod\n def from_pb(cls, image):\n \"\"\"Factory: construct SafeSearchAnnotation from Vision API response.\n\n :type image: :class:`~google.cloud.vision_v1.proto.\\\n image_annotator_pb2.SafeSearchAnnotation`\n :param image: Protobuf response from Vision API with safe search data.\n\n :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`\n :returns: Instance of ``SafeSearchAnnotation``.\n \"\"\"\n values = [image.adult, image.spoof, image.medical, image.violence]\n classifications = map(_get_pb_likelihood, values)\n return cls(*classifications)\n\n @property\n def adult(self):\n \"\"\"Represents the adult contents likelihood for the image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: ``Likelihood`` of the image containing adult content.\n \"\"\"\n return self._adult_likelihood\n\n @property\n def spoof(self):\n \"\"\"The likelihood that an obvious modification was made to the image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that an obvious modification was made to\n the image's canonical version to make it appear funny or\n offensive.\n \"\"\"\n return self._spoof_likelihood\n\n @property\n def medical(self):\n \"\"\"Likelihood this is a medical image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that the image is medical in origin.\n \"\"\"\n return self._medical_likelihood\n\n @property\n def violence(self):\n \"\"\"Likeliehood that this image contains violence.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that the image contains violence.\n \"\"\"\n return self._violence_likelihood\n", "path": "vision/google/cloud/vision/safe_search.py"}]}
1,552
204
gh_patches_debug_64529
rasdani/github-patches
git_diff
kartoza__prj.app-293
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- We need to support animated GIF's Using licecap or silentcast it is easy to make animated GIF's. When images are uploaded to django though they are resized and converted to PNG. We need to update the logic so thumbs etc. can be created for animate GIF's without losing the animation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django_project/base/templatetags/custom_markup.py` Content: ``` 1 import markdown 2 from django import template 3 from django.template.defaultfilters import stringfilter 4 from django.utils.encoding import force_unicode 5 from django.utils.safestring import mark_safe 6 7 register = template.Library() 8 9 10 @register.filter(name='base_markdown', is_safe=True) 11 @stringfilter 12 def base_markdown(value): 13 extensions = ["nl2br", ] 14 15 return mark_safe(markdown.markdown(force_unicode(value), 16 extensions, 17 safe_mode=True, 18 enable_attributes=False)) 19 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django_project/base/templatetags/custom_markup.py b/django_project/base/templatetags/custom_markup.py --- a/django_project/base/templatetags/custom_markup.py +++ b/django_project/base/templatetags/custom_markup.py @@ -16,3 +16,9 @@ extensions, safe_mode=True, enable_attributes=False)) + + [email protected](name='is_gif', is_safe=True) +@stringfilter +def is_gif(value): + return value[-4:] == '.gif'
{"golden_diff": "diff --git a/django_project/base/templatetags/custom_markup.py b/django_project/base/templatetags/custom_markup.py\n--- a/django_project/base/templatetags/custom_markup.py\n+++ b/django_project/base/templatetags/custom_markup.py\n@@ -16,3 +16,9 @@\n extensions,\n safe_mode=True,\n enable_attributes=False))\n+\n+\[email protected](name='is_gif', is_safe=True)\n+@stringfilter\n+def is_gif(value):\n+ return value[-4:] == '.gif'\n", "issue": "We need to support animated GIF's\nUsing licecap or silentcast it is easy to make animated GIF's. When images are uploaded to django though they are resized and converted to PNG. We need to update the logic so thumbs etc. can be created for animate GIF's without losing the animation. \n\n", "before_files": [{"content": "import markdown\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.encoding import force_unicode\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name='base_markdown', is_safe=True)\n@stringfilter\ndef base_markdown(value):\n extensions = [\"nl2br\", ]\n\n return mark_safe(markdown.markdown(force_unicode(value),\n extensions,\n safe_mode=True,\n enable_attributes=False))\n", "path": "django_project/base/templatetags/custom_markup.py"}], "after_files": [{"content": "import markdown\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.encoding import force_unicode\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name='base_markdown', is_safe=True)\n@stringfilter\ndef base_markdown(value):\n extensions = [\"nl2br\", ]\n\n return mark_safe(markdown.markdown(force_unicode(value),\n extensions,\n safe_mode=True,\n enable_attributes=False))\n\n\[email protected](name='is_gif', is_safe=True)\n@stringfilter\ndef is_gif(value):\n return value[-4:] == '.gif'\n", "path": "django_project/base/templatetags/custom_markup.py"}]}
457
124
gh_patches_debug_43425
rasdani/github-patches
git_diff
cisagov__manage.get.gov-1759
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Increase reliability on OIDC connection ### Issue description While investigating #1726 we realized there is some areas for improvement in our handling of the connection to OIDC so that all scenarios of failure show a properly formatted 500 error page ### Acceptance criteria - [ ] Make sure all login.gov/identity sandbox connection issues result in the usual 500 error - [ ] refactor the connection set up as needed ### Additional context _No response_ ### Links to other issues relates to: #1726 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/djangooidc/views.py` Content: ``` 1 # coding: utf-8 2 3 import logging 4 5 from django.conf import settings 6 from django.contrib.auth import logout as auth_logout 7 from django.contrib.auth import authenticate, login 8 from django.http import HttpResponseRedirect 9 from django.shortcuts import redirect, render 10 from urllib.parse import parse_qs, urlencode 11 12 from djangooidc.oidc import Client 13 from djangooidc import exceptions as o_e 14 from registrar.models import User 15 16 logger = logging.getLogger(__name__) 17 18 try: 19 # Initialize provider using pyOICD 20 OP = getattr(settings, "OIDC_ACTIVE_PROVIDER") 21 CLIENT = Client(OP) 22 logger.debug("client initialized %s" % CLIENT) 23 except Exception as err: 24 CLIENT = None # type: ignore 25 logger.warning(err) 26 logger.warning("Unable to configure OpenID Connect provider. Users cannot log in.") 27 28 29 def error_page(request, error): 30 """Display a sensible message and log the error.""" 31 logger.error(error) 32 if isinstance(error, o_e.AuthenticationFailed): 33 return render( 34 request, 35 "401.html", 36 context={ 37 "friendly_message": error.friendly_message, 38 "log_identifier": error.locator, 39 }, 40 status=401, 41 ) 42 if isinstance(error, o_e.InternalError): 43 return render( 44 request, 45 "500.html", 46 context={ 47 "friendly_message": error.friendly_message, 48 "log_identifier": error.locator, 49 }, 50 status=500, 51 ) 52 if isinstance(error, Exception): 53 return render(request, "500.html", status=500) 54 55 56 def openid(request): 57 """Redirect the user to an authentication provider (OP).""" 58 # If the session reset because of a server restart, attempt to login again 59 request.session["acr_value"] = CLIENT.get_default_acr_value() 60 61 request.session["next"] = request.GET.get("next", "/") 62 63 try: 64 return CLIENT.create_authn_request(request.session) 65 except Exception as err: 66 return error_page(request, err) 67 68 69 def login_callback(request): 70 """Analyze the token returned by the authentication provider (OP).""" 71 try: 72 query = parse_qs(request.GET.urlencode()) 73 userinfo = CLIENT.callback(query, request.session) 74 # test for need for identity verification and if it is satisfied 75 # if not satisfied, redirect user to login with stepped up acr_value 76 if requires_step_up_auth(userinfo): 77 # add acr_value to request.session 78 request.session["acr_value"] = CLIENT.get_step_up_acr_value() 79 return CLIENT.create_authn_request(request.session) 80 user = authenticate(request=request, **userinfo) 81 if user: 82 login(request, user) 83 logger.info("Successfully logged in user %s" % user) 84 # Double login bug (1507)? 85 return redirect(request.session.get("next", "/")) 86 else: 87 raise o_e.BannedUser() 88 except o_e.NoStateDefined as nsd_err: 89 logger.warning(f"No State Defined: {nsd_err}") 90 return redirect(request.session.get("next", "/")) 91 except Exception as err: 92 return error_page(request, err) 93 94 95 def requires_step_up_auth(userinfo): 96 """if User.needs_identity_verification and step_up_acr_value not in 97 ial returned from callback, return True""" 98 step_up_acr_value = CLIENT.get_step_up_acr_value() 99 acr_value = userinfo.get("ial", "") 100 uuid = userinfo.get("sub", "") 101 email = userinfo.get("email", "") 102 if acr_value != step_up_acr_value: 103 # The acr of this attempt is not at the highest level 104 # so check if the user needs the higher level 105 return User.needs_identity_verification(email, uuid) 106 else: 107 # This attempt already came back at the highest level 108 # so does not require step up 109 return False 110 111 112 def logout(request, next_page=None): 113 """Redirect the user to the authentication provider (OP) logout page.""" 114 try: 115 user = request.user 116 request_args = { 117 "client_id": CLIENT.client_id, 118 "state": request.session["state"], 119 } 120 if ( 121 "post_logout_redirect_uris" in CLIENT.registration_response.keys() 122 and len(CLIENT.registration_response["post_logout_redirect_uris"]) > 0 123 ): 124 request_args.update( 125 {"post_logout_redirect_uri": CLIENT.registration_response["post_logout_redirect_uris"][0]} 126 ) 127 url = CLIENT.provider_info["end_session_endpoint"] 128 url += "?" + urlencode(request_args) 129 return HttpResponseRedirect(url) 130 except Exception as err: 131 return error_page(request, err) 132 finally: 133 # Always remove Django session stuff - even if not logged out from OP. 134 # Don't wait for the callback as it may never come. 135 auth_logout(request) 136 logger.info("Successfully logged out user %s" % user) 137 next_page = getattr(settings, "LOGOUT_REDIRECT_URL", None) 138 if next_page: 139 request.session["next"] = next_page 140 141 142 def logout_callback(request): 143 """Simple redirection view: after logout, redirect to `next`.""" 144 next = request.session.get("next", "/") 145 return redirect(next) 146 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/djangooidc/views.py b/src/djangooidc/views.py --- a/src/djangooidc/views.py +++ b/src/djangooidc/views.py @@ -15,15 +15,34 @@ logger = logging.getLogger(__name__) -try: +CLIENT = None + + +def _initialize_client(): + """Initialize the OIDC client. Exceptions are allowed to raise + and will need to be caught.""" + global CLIENT # Initialize provider using pyOICD OP = getattr(settings, "OIDC_ACTIVE_PROVIDER") CLIENT = Client(OP) - logger.debug("client initialized %s" % CLIENT) + logger.debug("Client initialized: %s" % CLIENT) + + +def _client_is_none(): + """Return if the CLIENT is currently None.""" + global CLIENT + return CLIENT is None + + +# Initialize CLIENT +try: + _initialize_client() except Exception as err: - CLIENT = None # type: ignore - logger.warning(err) - logger.warning("Unable to configure OpenID Connect provider. Users cannot log in.") + # In the event of an exception, log the error and allow the app load to continue + # without the OIDC Client. Subsequent login attempts will attempt to initialize + # again if Client is None + logger.error(err) + logger.error("Unable to configure OpenID Connect provider. Users cannot log in.") def error_page(request, error): @@ -55,12 +74,15 @@ def openid(request): """Redirect the user to an authentication provider (OP).""" - # If the session reset because of a server restart, attempt to login again - request.session["acr_value"] = CLIENT.get_default_acr_value() - - request.session["next"] = request.GET.get("next", "/") - + global CLIENT try: + # If the CLIENT is none, attempt to reinitialize before handling the request + if _client_is_none(): + logger.debug("OIDC client is None, attempting to initialize") + _initialize_client() + request.session["acr_value"] = CLIENT.get_default_acr_value() + request.session["next"] = request.GET.get("next", "/") + # Create the authentication request return CLIENT.create_authn_request(request.session) except Exception as err: return error_page(request, err) @@ -68,12 +90,17 @@ def login_callback(request): """Analyze the token returned by the authentication provider (OP).""" + global CLIENT try: + # If the CLIENT is none, attempt to reinitialize before handling the request + if _client_is_none(): + logger.debug("OIDC client is None, attempting to initialize") + _initialize_client() query = parse_qs(request.GET.urlencode()) userinfo = CLIENT.callback(query, request.session) # test for need for identity verification and if it is satisfied # if not satisfied, redirect user to login with stepped up acr_value - if requires_step_up_auth(userinfo): + if _requires_step_up_auth(userinfo): # add acr_value to request.session request.session["acr_value"] = CLIENT.get_step_up_acr_value() return CLIENT.create_authn_request(request.session) @@ -86,13 +113,16 @@ else: raise o_e.BannedUser() except o_e.NoStateDefined as nsd_err: + # In the event that a user is in the middle of a login when the app is restarted, + # their session state will no longer be available, so redirect the user to the + # beginning of login process without raising an error to the user. logger.warning(f"No State Defined: {nsd_err}") return redirect(request.session.get("next", "/")) except Exception as err: return error_page(request, err) -def requires_step_up_auth(userinfo): +def _requires_step_up_auth(userinfo): """if User.needs_identity_verification and step_up_acr_value not in ial returned from callback, return True""" step_up_acr_value = CLIENT.get_step_up_acr_value()
{"golden_diff": "diff --git a/src/djangooidc/views.py b/src/djangooidc/views.py\n--- a/src/djangooidc/views.py\n+++ b/src/djangooidc/views.py\n@@ -15,15 +15,34 @@\n \n logger = logging.getLogger(__name__)\n \n-try:\n+CLIENT = None\n+\n+\n+def _initialize_client():\n+ \"\"\"Initialize the OIDC client. Exceptions are allowed to raise\n+ and will need to be caught.\"\"\"\n+ global CLIENT\n # Initialize provider using pyOICD\n OP = getattr(settings, \"OIDC_ACTIVE_PROVIDER\")\n CLIENT = Client(OP)\n- logger.debug(\"client initialized %s\" % CLIENT)\n+ logger.debug(\"Client initialized: %s\" % CLIENT)\n+\n+\n+def _client_is_none():\n+ \"\"\"Return if the CLIENT is currently None.\"\"\"\n+ global CLIENT\n+ return CLIENT is None\n+\n+\n+# Initialize CLIENT\n+try:\n+ _initialize_client()\n except Exception as err:\n- CLIENT = None # type: ignore\n- logger.warning(err)\n- logger.warning(\"Unable to configure OpenID Connect provider. Users cannot log in.\")\n+ # In the event of an exception, log the error and allow the app load to continue\n+ # without the OIDC Client. Subsequent login attempts will attempt to initialize\n+ # again if Client is None\n+ logger.error(err)\n+ logger.error(\"Unable to configure OpenID Connect provider. Users cannot log in.\")\n \n \n def error_page(request, error):\n@@ -55,12 +74,15 @@\n \n def openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n- # If the session reset because of a server restart, attempt to login again\n- request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n-\n- request.session[\"next\"] = request.GET.get(\"next\", \"/\")\n-\n+ global CLIENT\n try:\n+ # If the CLIENT is none, attempt to reinitialize before handling the request\n+ if _client_is_none():\n+ logger.debug(\"OIDC client is None, attempting to initialize\")\n+ _initialize_client()\n+ request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n+ request.session[\"next\"] = request.GET.get(\"next\", \"/\")\n+ # Create the authentication request\n return CLIENT.create_authn_request(request.session)\n except Exception as err:\n return error_page(request, err)\n@@ -68,12 +90,17 @@\n \n def login_callback(request):\n \"\"\"Analyze the token returned by the authentication provider (OP).\"\"\"\n+ global CLIENT\n try:\n+ # If the CLIENT is none, attempt to reinitialize before handling the request\n+ if _client_is_none():\n+ logger.debug(\"OIDC client is None, attempting to initialize\")\n+ _initialize_client()\n query = parse_qs(request.GET.urlencode())\n userinfo = CLIENT.callback(query, request.session)\n # test for need for identity verification and if it is satisfied\n # if not satisfied, redirect user to login with stepped up acr_value\n- if requires_step_up_auth(userinfo):\n+ if _requires_step_up_auth(userinfo):\n # add acr_value to request.session\n request.session[\"acr_value\"] = CLIENT.get_step_up_acr_value()\n return CLIENT.create_authn_request(request.session)\n@@ -86,13 +113,16 @@\n else:\n raise o_e.BannedUser()\n except o_e.NoStateDefined as nsd_err:\n+ # In the event that a user is in the middle of a login when the app is restarted,\n+ # their session state will no longer be available, so redirect the user to the\n+ # beginning of login process without raising an error to the user.\n logger.warning(f\"No State Defined: {nsd_err}\")\n return redirect(request.session.get(\"next\", \"/\"))\n except Exception as err:\n return error_page(request, err)\n \n \n-def requires_step_up_auth(userinfo):\n+def _requires_step_up_auth(userinfo):\n \"\"\"if User.needs_identity_verification and step_up_acr_value not in\n ial returned from callback, return True\"\"\"\n step_up_acr_value = CLIENT.get_step_up_acr_value()\n", "issue": "Increase reliability on OIDC connection\n### Issue description\n\nWhile investigating #1726 we realized there is some areas for improvement in our handling of the connection to OIDC so that all scenarios of failure show a properly formatted 500 error page\n\n### Acceptance criteria\n\n- [ ] Make sure all login.gov/identity sandbox connection issues result in the usual 500 error \r\n- [ ] refactor the connection set up as needed\n\n### Additional context\n\n_No response_\n\n### Links to other issues\n\nrelates to: #1726\n", "before_files": [{"content": "# coding: utf-8\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom urllib.parse import parse_qs, urlencode\n\nfrom djangooidc.oidc import Client\nfrom djangooidc import exceptions as o_e\nfrom registrar.models import User\n\nlogger = logging.getLogger(__name__)\n\ntry:\n # Initialize provider using pyOICD\n OP = getattr(settings, \"OIDC_ACTIVE_PROVIDER\")\n CLIENT = Client(OP)\n logger.debug(\"client initialized %s\" % CLIENT)\nexcept Exception as err:\n CLIENT = None # type: ignore\n logger.warning(err)\n logger.warning(\"Unable to configure OpenID Connect provider. Users cannot log in.\")\n\n\ndef error_page(request, error):\n \"\"\"Display a sensible message and log the error.\"\"\"\n logger.error(error)\n if isinstance(error, o_e.AuthenticationFailed):\n return render(\n request,\n \"401.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=401,\n )\n if isinstance(error, o_e.InternalError):\n return render(\n request,\n \"500.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=500,\n )\n if isinstance(error, Exception):\n return render(request, \"500.html\", status=500)\n\n\ndef openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n # If the session reset because of a server restart, attempt to login again\n request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n\n request.session[\"next\"] = request.GET.get(\"next\", \"/\")\n\n try:\n return CLIENT.create_authn_request(request.session)\n except Exception as err:\n return error_page(request, err)\n\n\ndef login_callback(request):\n \"\"\"Analyze the token returned by the authentication provider (OP).\"\"\"\n try:\n query = parse_qs(request.GET.urlencode())\n userinfo = CLIENT.callback(query, request.session)\n # test for need for identity verification and if it is satisfied\n # if not satisfied, redirect user to login with stepped up acr_value\n if requires_step_up_auth(userinfo):\n # add acr_value to request.session\n request.session[\"acr_value\"] = CLIENT.get_step_up_acr_value()\n return CLIENT.create_authn_request(request.session)\n user = authenticate(request=request, **userinfo)\n if user:\n login(request, user)\n logger.info(\"Successfully logged in user %s\" % user)\n # Double login bug (1507)?\n return redirect(request.session.get(\"next\", \"/\"))\n else:\n raise o_e.BannedUser()\n except o_e.NoStateDefined as nsd_err:\n logger.warning(f\"No State Defined: {nsd_err}\")\n return redirect(request.session.get(\"next\", \"/\"))\n except Exception as err:\n return error_page(request, err)\n\n\ndef requires_step_up_auth(userinfo):\n \"\"\"if User.needs_identity_verification and step_up_acr_value not in\n ial returned from callback, return True\"\"\"\n step_up_acr_value = CLIENT.get_step_up_acr_value()\n acr_value = userinfo.get(\"ial\", \"\")\n uuid = userinfo.get(\"sub\", \"\")\n email = userinfo.get(\"email\", \"\")\n if acr_value != step_up_acr_value:\n # The acr of this attempt is not at the highest level\n # so check if the user needs the higher level\n return User.needs_identity_verification(email, uuid)\n else:\n # This attempt already came back at the highest level\n # so does not require step up\n return False\n\n\ndef logout(request, next_page=None):\n \"\"\"Redirect the user to the authentication provider (OP) logout page.\"\"\"\n try:\n user = request.user\n request_args = {\n \"client_id\": CLIENT.client_id,\n \"state\": request.session[\"state\"],\n }\n if (\n \"post_logout_redirect_uris\" in CLIENT.registration_response.keys()\n and len(CLIENT.registration_response[\"post_logout_redirect_uris\"]) > 0\n ):\n request_args.update(\n {\"post_logout_redirect_uri\": CLIENT.registration_response[\"post_logout_redirect_uris\"][0]}\n )\n url = CLIENT.provider_info[\"end_session_endpoint\"]\n url += \"?\" + urlencode(request_args)\n return HttpResponseRedirect(url)\n except Exception as err:\n return error_page(request, err)\n finally:\n # Always remove Django session stuff - even if not logged out from OP.\n # Don't wait for the callback as it may never come.\n auth_logout(request)\n logger.info(\"Successfully logged out user %s\" % user)\n next_page = getattr(settings, \"LOGOUT_REDIRECT_URL\", None)\n if next_page:\n request.session[\"next\"] = next_page\n\n\ndef logout_callback(request):\n \"\"\"Simple redirection view: after logout, redirect to `next`.\"\"\"\n next = request.session.get(\"next\", \"/\")\n return redirect(next)\n", "path": "src/djangooidc/views.py"}], "after_files": [{"content": "# coding: utf-8\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom urllib.parse import parse_qs, urlencode\n\nfrom djangooidc.oidc import Client\nfrom djangooidc import exceptions as o_e\nfrom registrar.models import User\n\nlogger = logging.getLogger(__name__)\n\nCLIENT = None\n\n\ndef _initialize_client():\n \"\"\"Initialize the OIDC client. Exceptions are allowed to raise\n and will need to be caught.\"\"\"\n global CLIENT\n # Initialize provider using pyOICD\n OP = getattr(settings, \"OIDC_ACTIVE_PROVIDER\")\n CLIENT = Client(OP)\n logger.debug(\"Client initialized: %s\" % CLIENT)\n\n\ndef _client_is_none():\n \"\"\"Return if the CLIENT is currently None.\"\"\"\n global CLIENT\n return CLIENT is None\n\n\n# Initialize CLIENT\ntry:\n _initialize_client()\nexcept Exception as err:\n # In the event of an exception, log the error and allow the app load to continue\n # without the OIDC Client. Subsequent login attempts will attempt to initialize\n # again if Client is None\n logger.error(err)\n logger.error(\"Unable to configure OpenID Connect provider. Users cannot log in.\")\n\n\ndef error_page(request, error):\n \"\"\"Display a sensible message and log the error.\"\"\"\n logger.error(error)\n if isinstance(error, o_e.AuthenticationFailed):\n return render(\n request,\n \"401.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=401,\n )\n if isinstance(error, o_e.InternalError):\n return render(\n request,\n \"500.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=500,\n )\n if isinstance(error, Exception):\n return render(request, \"500.html\", status=500)\n\n\ndef openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n global CLIENT\n try:\n # If the CLIENT is none, attempt to reinitialize before handling the request\n if _client_is_none():\n logger.debug(\"OIDC client is None, attempting to initialize\")\n _initialize_client()\n request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n request.session[\"next\"] = request.GET.get(\"next\", \"/\")\n # Create the authentication request\n return CLIENT.create_authn_request(request.session)\n except Exception as err:\n return error_page(request, err)\n\n\ndef login_callback(request):\n \"\"\"Analyze the token returned by the authentication provider (OP).\"\"\"\n global CLIENT\n try:\n # If the CLIENT is none, attempt to reinitialize before handling the request\n if _client_is_none():\n logger.debug(\"OIDC client is None, attempting to initialize\")\n _initialize_client()\n query = parse_qs(request.GET.urlencode())\n userinfo = CLIENT.callback(query, request.session)\n # test for need for identity verification and if it is satisfied\n # if not satisfied, redirect user to login with stepped up acr_value\n if _requires_step_up_auth(userinfo):\n # add acr_value to request.session\n request.session[\"acr_value\"] = CLIENT.get_step_up_acr_value()\n return CLIENT.create_authn_request(request.session)\n user = authenticate(request=request, **userinfo)\n if user:\n login(request, user)\n logger.info(\"Successfully logged in user %s\" % user)\n # Double login bug (1507)?\n return redirect(request.session.get(\"next\", \"/\"))\n else:\n raise o_e.BannedUser()\n except o_e.NoStateDefined as nsd_err:\n # In the event that a user is in the middle of a login when the app is restarted,\n # their session state will no longer be available, so redirect the user to the\n # beginning of login process without raising an error to the user.\n logger.warning(f\"No State Defined: {nsd_err}\")\n return redirect(request.session.get(\"next\", \"/\"))\n except Exception as err:\n return error_page(request, err)\n\n\ndef _requires_step_up_auth(userinfo):\n \"\"\"if User.needs_identity_verification and step_up_acr_value not in\n ial returned from callback, return True\"\"\"\n step_up_acr_value = CLIENT.get_step_up_acr_value()\n acr_value = userinfo.get(\"ial\", \"\")\n uuid = userinfo.get(\"sub\", \"\")\n email = userinfo.get(\"email\", \"\")\n if acr_value != step_up_acr_value:\n # The acr of this attempt is not at the highest level\n # so check if the user needs the higher level\n return User.needs_identity_verification(email, uuid)\n else:\n # This attempt already came back at the highest level\n # so does not require step up\n return False\n\n\ndef logout(request, next_page=None):\n \"\"\"Redirect the user to the authentication provider (OP) logout page.\"\"\"\n try:\n user = request.user\n request_args = {\n \"client_id\": CLIENT.client_id,\n \"state\": request.session[\"state\"],\n }\n if (\n \"post_logout_redirect_uris\" in CLIENT.registration_response.keys()\n and len(CLIENT.registration_response[\"post_logout_redirect_uris\"]) > 0\n ):\n request_args.update(\n {\"post_logout_redirect_uri\": CLIENT.registration_response[\"post_logout_redirect_uris\"][0]}\n )\n url = CLIENT.provider_info[\"end_session_endpoint\"]\n url += \"?\" + urlencode(request_args)\n return HttpResponseRedirect(url)\n except Exception as err:\n return error_page(request, err)\n finally:\n # Always remove Django session stuff - even if not logged out from OP.\n # Don't wait for the callback as it may never come.\n auth_logout(request)\n logger.info(\"Successfully logged out user %s\" % user)\n next_page = getattr(settings, \"LOGOUT_REDIRECT_URL\", None)\n if next_page:\n request.session[\"next\"] = next_page\n\n\ndef logout_callback(request):\n \"\"\"Simple redirection view: after logout, redirect to `next`.\"\"\"\n next = request.session.get(\"next\", \"/\")\n return redirect(next)\n", "path": "src/djangooidc/views.py"}]}
1,817
924
gh_patches_debug_13360
rasdani/github-patches
git_diff
urllib3__urllib3-60
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- nosetests crashes under IPv4 (error: getsockaddrarg: bad family) Turns out tornado is really eager to use IPv6. Unless you expressly hand the server the address, it doesn't even check for socket IPv6 support. I'll submit a pull request for the one-line fix in dummyserver/server.py momentarily. Source: https://groups.google.com/group/python-tornado/browse_thread/thread/3ec04536e57a2833?pli=1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dummyserver/server.py` Content: ``` 1 #!/usr/bin/env python 2 3 """ 4 Dummy server used for unit testing. 5 """ 6 from __future__ import print_function 7 8 import logging 9 import os 10 import sys 11 import threading 12 import socket 13 14 import tornado.wsgi 15 import tornado.httpserver 16 import tornado.ioloop 17 18 from dummyserver.handlers import TestingApp 19 20 21 log = logging.getLogger(__name__) 22 23 CERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs') 24 DEFAULT_CERTS = { 25 'certfile': os.path.join(CERTS_PATH, 'server.crt'), 26 'keyfile': os.path.join(CERTS_PATH, 'server.key'), 27 } 28 DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem') 29 DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem') 30 31 32 # Different types of servers we have: 33 34 35 class SocketServerThread(threading.Thread): 36 """ 37 :param socket_handler: Callable which receives a socket argument for one 38 request. 39 :param ready_lock: Lock which gets released when the socket handler is 40 ready to receive requests. 41 """ 42 def __init__(self, socket_handler, host='localhost', port=8081, 43 ready_lock=None): 44 threading.Thread.__init__(self) 45 46 self.socket_handler = socket_handler 47 self.host = host 48 self.port = port 49 self.ready_lock = ready_lock 50 51 def _start_server(self): 52 sock = socket.socket() 53 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 54 sock.bind((self.host, self.port)) 55 56 # Once listen() returns, the server socket is ready 57 sock.listen(1) 58 59 if self.ready_lock: 60 self.ready_lock.release() 61 62 self.socket_handler(sock) 63 64 def run(self): 65 self.server = self._start_server() 66 67 68 class TornadoServerThread(threading.Thread): 69 def __init__(self, host='localhost', port=8081, scheme='http', certs=None): 70 threading.Thread.__init__(self) 71 72 self.host = host 73 self.port = port 74 self.scheme = scheme 75 self.certs = certs 76 77 def _start_server(self): 78 container = tornado.wsgi.WSGIContainer(TestingApp()) 79 80 if self.scheme == 'https': 81 http_server = tornado.httpserver.HTTPServer(container, 82 ssl_options=self.certs) 83 else: 84 http_server = tornado.httpserver.HTTPServer(container) 85 86 http_server.listen(self.port) 87 return http_server 88 89 def run(self): 90 self.server = self._start_server() 91 self.ioloop = tornado.ioloop.IOLoop.instance() 92 self.ioloop.start() 93 94 def stop(self): 95 self.server.stop() 96 self.ioloop.stop() 97 98 99 if __name__ == '__main__': 100 log.setLevel(logging.DEBUG) 101 log.addHandler(logging.StreamHandler(sys.stderr)) 102 103 from urllib3 import get_host 104 105 url = "http://localhost:8081" 106 if len(sys.argv) > 1: 107 url = sys.argv[1] 108 109 print("Starting WGI server at: %s" % url) 110 111 scheme, host, port = get_host(url) 112 t = TornadoServerThread(scheme=scheme, host=host, port=port) 113 t.start() 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dummyserver/server.py b/dummyserver/server.py --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -83,7 +83,7 @@ else: http_server = tornado.httpserver.HTTPServer(container) - http_server.listen(self.port) + http_server.listen(self.port, address=self.host) return http_server def run(self): @@ -106,7 +106,7 @@ if len(sys.argv) > 1: url = sys.argv[1] - print("Starting WGI server at: %s" % url) + print("Starting WSGI server at: %s" % url) scheme, host, port = get_host(url) t = TornadoServerThread(scheme=scheme, host=host, port=port)
{"golden_diff": "diff --git a/dummyserver/server.py b/dummyserver/server.py\n--- a/dummyserver/server.py\n+++ b/dummyserver/server.py\n@@ -83,7 +83,7 @@\n else:\n http_server = tornado.httpserver.HTTPServer(container)\n \n- http_server.listen(self.port)\n+ http_server.listen(self.port, address=self.host)\n return http_server\n \n def run(self):\n@@ -106,7 +106,7 @@\n if len(sys.argv) > 1:\n url = sys.argv[1]\n \n- print(\"Starting WGI server at: %s\" % url)\n+ print(\"Starting WSGI server at: %s\" % url)\n \n scheme, host, port = get_host(url)\n t = TornadoServerThread(scheme=scheme, host=host, port=port)\n", "issue": "nosetests crashes under IPv4 (error: getsockaddrarg: bad family)\nTurns out tornado is really eager to use IPv6. Unless you expressly hand the server the address, it doesn't even check for socket IPv6 support. I'll submit a pull request for the one-line fix in dummyserver/server.py momentarily.\n\nSource: https://groups.google.com/group/python-tornado/browse_thread/thread/3ec04536e57a2833?pli=1\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport sys\nimport threading\nimport socket\n\nimport tornado.wsgi\nimport tornado.httpserver\nimport tornado.ioloop\n\nfrom dummyserver.handlers import TestingApp\n\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')\nDEFAULT_CERTS = {\n 'certfile': os.path.join(CERTS_PATH, 'server.crt'),\n 'keyfile': os.path.join(CERTS_PATH, 'server.key'),\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')\nDEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')\n\n\n# Different types of servers we have:\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_lock: Lock which gets released when the socket handler is\n ready to receive requests.\n \"\"\"\n def __init__(self, socket_handler, host='localhost', port=8081,\n ready_lock=None):\n threading.Thread.__init__(self)\n\n self.socket_handler = socket_handler\n self.host = host\n self.port = port\n self.ready_lock = ready_lock\n\n def _start_server(self):\n sock = socket.socket()\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, self.port))\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_lock:\n self.ready_lock.release()\n\n self.socket_handler(sock)\n\n def run(self):\n self.server = self._start_server()\n\n\nclass TornadoServerThread(threading.Thread):\n def __init__(self, host='localhost', port=8081, scheme='http', certs=None):\n threading.Thread.__init__(self)\n\n self.host = host\n self.port = port\n self.scheme = scheme\n self.certs = certs\n\n def _start_server(self):\n container = tornado.wsgi.WSGIContainer(TestingApp())\n\n if self.scheme == 'https':\n http_server = tornado.httpserver.HTTPServer(container,\n ssl_options=self.certs)\n else:\n http_server = tornado.httpserver.HTTPServer(container)\n\n http_server.listen(self.port)\n return http_server\n\n def run(self):\n self.server = self._start_server()\n self.ioloop = tornado.ioloop.IOLoop.instance()\n self.ioloop.start()\n\n def stop(self):\n self.server.stop()\n self.ioloop.stop()\n\n\nif __name__ == '__main__':\n log.setLevel(logging.DEBUG)\n log.addHandler(logging.StreamHandler(sys.stderr))\n\n from urllib3 import get_host\n\n url = \"http://localhost:8081\"\n if len(sys.argv) > 1:\n url = sys.argv[1]\n\n print(\"Starting WGI server at: %s\" % url)\n\n scheme, host, port = get_host(url)\n t = TornadoServerThread(scheme=scheme, host=host, port=port)\n t.start()\n", "path": "dummyserver/server.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport sys\nimport threading\nimport socket\n\nimport tornado.wsgi\nimport tornado.httpserver\nimport tornado.ioloop\n\nfrom dummyserver.handlers import TestingApp\n\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')\nDEFAULT_CERTS = {\n 'certfile': os.path.join(CERTS_PATH, 'server.crt'),\n 'keyfile': os.path.join(CERTS_PATH, 'server.key'),\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')\nDEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')\n\n\n# Different types of servers we have:\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_lock: Lock which gets released when the socket handler is\n ready to receive requests.\n \"\"\"\n def __init__(self, socket_handler, host='localhost', port=8081,\n ready_lock=None):\n threading.Thread.__init__(self)\n\n self.socket_handler = socket_handler\n self.host = host\n self.port = port\n self.ready_lock = ready_lock\n\n def _start_server(self):\n sock = socket.socket()\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, self.port))\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_lock:\n self.ready_lock.release()\n\n self.socket_handler(sock)\n\n def run(self):\n self.server = self._start_server()\n\n\nclass TornadoServerThread(threading.Thread):\n def __init__(self, host='localhost', port=8081, scheme='http', certs=None):\n threading.Thread.__init__(self)\n\n self.host = host\n self.port = port\n self.scheme = scheme\n self.certs = certs\n\n def _start_server(self):\n container = tornado.wsgi.WSGIContainer(TestingApp())\n\n if self.scheme == 'https':\n http_server = tornado.httpserver.HTTPServer(container,\n ssl_options=self.certs)\n else:\n http_server = tornado.httpserver.HTTPServer(container)\n\n http_server.listen(self.port, address=self.host)\n return http_server\n\n def run(self):\n self.server = self._start_server()\n self.ioloop = tornado.ioloop.IOLoop.instance()\n self.ioloop.start()\n\n def stop(self):\n self.server.stop()\n self.ioloop.stop()\n\n\nif __name__ == '__main__':\n log.setLevel(logging.DEBUG)\n log.addHandler(logging.StreamHandler(sys.stderr))\n\n from urllib3 import get_host\n\n url = \"http://localhost:8081\"\n if len(sys.argv) > 1:\n url = sys.argv[1]\n\n print(\"Starting WSGI server at: %s\" % url)\n\n scheme, host, port = get_host(url)\n t = TornadoServerThread(scheme=scheme, host=host, port=port)\n t.start()\n", "path": "dummyserver/server.py"}]}
1,305
187
gh_patches_debug_22208
rasdani/github-patches
git_diff
wagtail__wagtail-1576
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Project template needs updating for search promotions changes The provided view still references the EditorsPick model: https://github.com/torchbox/wagtail/blob/master/wagtail/project_template/search/views.py#L5 Shall we update this to use the new contrib module or remove it completely? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/project_template/search/views.py` Content: ``` 1 from django.shortcuts import render 2 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger 3 4 from wagtail.wagtailcore.models import Page 5 from wagtail.wagtailsearch.models import Query, EditorsPick 6 7 8 def search(request): 9 search_query = request.GET.get('query', None) 10 page = request.GET.get('page', 1) 11 12 # Search 13 if search_query: 14 search_results = Page.objects.live().search(search_query) 15 query = Query.get(search_query) 16 17 # Record hit 18 query.add_hit() 19 20 # Get search picks 21 search_picks = query.editors_picks.all() 22 else: 23 search_results = Page.objects.none() 24 search_picks = EditorsPick.objects.none() 25 26 # Pagination 27 paginator = Paginator(search_results, 10) 28 try: 29 search_results = paginator.page(page) 30 except PageNotAnInteger: 31 search_results = paginator.page(1) 32 except EmptyPage: 33 search_results = paginator.page(paginator.num_pages) 34 35 return render(request, 'search/search.html', { 36 'search_query': search_query, 37 'search_results': search_results, 38 'search_picks': search_picks, 39 }) 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/project_template/search/views.py b/wagtail/project_template/search/views.py --- a/wagtail/project_template/search/views.py +++ b/wagtail/project_template/search/views.py @@ -2,7 +2,7 @@ from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from wagtail.wagtailcore.models import Page -from wagtail.wagtailsearch.models import Query, EditorsPick +from wagtail.wagtailsearch.models import Query def search(request): @@ -16,12 +16,8 @@ # Record hit query.add_hit() - - # Get search picks - search_picks = query.editors_picks.all() else: search_results = Page.objects.none() - search_picks = EditorsPick.objects.none() # Pagination paginator = Paginator(search_results, 10) @@ -35,5 +31,4 @@ return render(request, 'search/search.html', { 'search_query': search_query, 'search_results': search_results, - 'search_picks': search_picks, })
{"golden_diff": "diff --git a/wagtail/project_template/search/views.py b/wagtail/project_template/search/views.py\n--- a/wagtail/project_template/search/views.py\n+++ b/wagtail/project_template/search/views.py\n@@ -2,7 +2,7 @@\n from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n \n from wagtail.wagtailcore.models import Page\n-from wagtail.wagtailsearch.models import Query, EditorsPick\n+from wagtail.wagtailsearch.models import Query\n \n \n def search(request):\n@@ -16,12 +16,8 @@\n \n # Record hit\n query.add_hit()\n-\n- # Get search picks\n- search_picks = query.editors_picks.all()\n else:\n search_results = Page.objects.none()\n- search_picks = EditorsPick.objects.none()\n \n # Pagination\n paginator = Paginator(search_results, 10)\n@@ -35,5 +31,4 @@\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n- 'search_picks': search_picks,\n })\n", "issue": "Project template needs updating for search promotions changes\nThe provided view still references the EditorsPick model: https://github.com/torchbox/wagtail/blob/master/wagtail/project_template/search/views.py#L5\n\nShall we update this to use the new contrib module or remove it completely?\n\n", "before_files": [{"content": "from django.shortcuts import render\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch.models import Query, EditorsPick\n\n\ndef search(request):\n search_query = request.GET.get('query', None)\n page = request.GET.get('page', 1)\n\n # Search\n if search_query:\n search_results = Page.objects.live().search(search_query)\n query = Query.get(search_query)\n\n # Record hit\n query.add_hit()\n\n # Get search picks\n search_picks = query.editors_picks.all()\n else:\n search_results = Page.objects.none()\n search_picks = EditorsPick.objects.none()\n\n # Pagination\n paginator = Paginator(search_results, 10)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n 'search_picks': search_picks,\n })\n", "path": "wagtail/project_template/search/views.py"}], "after_files": [{"content": "from django.shortcuts import render\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(request):\n search_query = request.GET.get('query', None)\n page = request.GET.get('page', 1)\n\n # Search\n if search_query:\n search_results = Page.objects.live().search(search_query)\n query = Query.get(search_query)\n\n # Record hit\n query.add_hit()\n else:\n search_results = Page.objects.none()\n\n # Pagination\n paginator = Paginator(search_results, 10)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n })\n", "path": "wagtail/project_template/search/views.py"}]}
649
246
gh_patches_debug_35077
rasdani/github-patches
git_diff
svthalia__concrexit-1246
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Send email with Thalia Pay payments that will be withdrawn on batch processed ### Describe the solution you'd like When a payment batch is processed, all users that are in that batch should receive an email notifying them that we will withdraw some amount from their bank account. ### Motivation It is required for the SEPA direct debit mandate --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/payments/admin_views.py` Content: ``` 1 """Admin views provided by the payments package""" 2 import csv 3 4 from django.apps import apps 5 from django.contrib import messages 6 from django.contrib.admin.utils import model_ngettext 7 from django.contrib.admin.views.decorators import staff_member_required 8 from django.contrib.auth.decorators import permission_required 9 from django.db.models import Sum, Count, Min, Max 10 from django.http import HttpResponse 11 from django.core.exceptions import SuspiciousOperation, DisallowedRedirect 12 from django.shortcuts import redirect 13 from django.utils import timezone 14 from django.utils.text import capfirst 15 from django.utils.decorators import method_decorator 16 from django.utils.http import url_has_allowed_host_and_scheme 17 from django.utils.translation import gettext_lazy as _ 18 from django.views import View 19 20 from members.models import Member 21 from payments import services 22 from .models import Payment, Batch 23 24 25 @method_decorator(staff_member_required, name="dispatch") 26 @method_decorator( 27 permission_required("payments.process_payments"), name="dispatch", 28 ) 29 class PaymentAdminView(View): 30 """ 31 View that creates a payment 32 """ 33 34 def post(self, request, *args, app_label, model_name, payable, **kwargs): 35 if "type" not in request.POST: 36 raise SuspiciousOperation("Missing POST parameters") 37 38 if "next" in request.POST and not url_has_allowed_host_and_scheme( 39 request.POST.get("next"), allowed_hosts={request.get_host()} 40 ): 41 raise DisallowedRedirect 42 43 payable_model = apps.get_model(app_label=app_label, model_name=model_name) 44 payable_obj = payable_model.objects.get(pk=payable) 45 46 result = services.create_payment( 47 payable_obj, request.member, request.POST["type"] 48 ) 49 payable_obj.save() 50 51 if result: 52 messages.success( 53 request, _("Successfully paid %s.") % model_ngettext(payable_obj, 1), 54 ) 55 else: 56 messages.error( 57 request, _("Could not pay %s.") % model_ngettext(payable_obj, 1), 58 ) 59 return redirect(f"admin:{app_label}_{model_name}_change", payable_obj.pk) 60 61 if "next" in request.POST: 62 return redirect(request.POST["next"]) 63 64 return redirect("admin:payments_payment_change", result.pk) 65 66 67 @method_decorator(staff_member_required, name="dispatch") 68 @method_decorator( 69 permission_required("payments.process_batches"), name="dispatch", 70 ) 71 class BatchProcessAdminView(View): 72 """ 73 View that processes a batch 74 """ 75 76 def post(self, request, *args, **kwargs): 77 batch = Batch.objects.get(pk=kwargs["pk"]) 78 79 if "next" in request.POST and not url_has_allowed_host_and_scheme( 80 request.POST.get("next"), allowed_hosts={request.get_host()} 81 ): 82 raise DisallowedRedirect 83 84 if batch.processed: 85 messages.error( 86 request, _("{} already processed.").format(model_ngettext(batch, 1)) 87 ) 88 else: 89 batch.processed = True 90 payments = batch.payments_set.select_related("paid_by") 91 for payment in payments: 92 bank_account = payment.paid_by.bank_accounts.last() 93 bank_account.last_used = timezone.now() 94 bank_account.save() 95 96 batch.save() 97 messages.success( 98 request, 99 _("Successfully processed {}.").format(model_ngettext(batch, 1)), 100 ) 101 102 if "next" in request.POST: 103 return redirect(request.POST["next"]) 104 105 return redirect("admin:payments_batch_change", kwargs["pk"]) 106 107 108 @method_decorator(staff_member_required, name="dispatch") 109 @method_decorator( 110 permission_required("payments.process_batches"), name="dispatch", 111 ) 112 class BatchExportAdminView(View): 113 """ 114 View that exports a batch 115 """ 116 117 def post(self, request, *args, **kwargs): 118 batch = Batch.objects.get(pk=kwargs["pk"]) 119 120 response = HttpResponse(content_type="text/csv") 121 response["Content-Disposition"] = 'attachment;filename="batch.csv"' 122 writer = csv.writer(response) 123 headers = [ 124 _("Account holder"), 125 _("IBAN"), 126 _("Mandate Reference"), 127 _("Amount"), 128 _("Description"), 129 _("Mandate Date"), 130 ] 131 writer.writerow([capfirst(x) for x in headers]) 132 133 member_rows = batch.payments_set.values("paid_by").annotate(total=Sum("amount")) 134 135 for row in member_rows: 136 member = Member.objects.get(id=row["paid_by"]) 137 bankaccount = member.bank_accounts.last() 138 writer.writerow( 139 [ 140 member.get_full_name(), 141 bankaccount.iban, 142 bankaccount.mandate_no, 143 f"{row['total']:.2f}", 144 batch.description, 145 bankaccount.valid_from, 146 ] 147 ) 148 return response 149 150 151 @method_decorator(staff_member_required, name="dispatch") 152 @method_decorator( 153 permission_required("payments.process_batches"), name="dispatch", 154 ) 155 class BatchTopicExportAdminView(View): 156 """ 157 View that exports a batch per topic 158 """ 159 160 def post(self, request, *args, **kwargs): 161 batch = Batch.objects.get(pk=kwargs["pk"]) 162 163 response = HttpResponse(content_type="text/csv") 164 response["Content-Disposition"] = 'attachment;filename="batch-topic.csv"' 165 writer = csv.writer(response) 166 headers = [ 167 _("Topic"), 168 _("No. of payments"), 169 _("First payment"), 170 _("Last payment"), 171 _("Total amount"), 172 ] 173 writer.writerow([capfirst(x) for x in headers]) 174 175 topic_rows = ( 176 batch.payments_set.values("topic") 177 .annotate( 178 total=Sum("amount"), 179 count=Count("paid_by"), 180 min_date=Min("created_at"), 181 max_date=Max("created_at"), 182 ) 183 .order_by("topic") 184 ) 185 186 for row in topic_rows: 187 writer.writerow( 188 [ 189 row["topic"], 190 row["count"], 191 timezone.localtime(row["min_date"]).date(), 192 timezone.localtime(row["max_date"]).date(), 193 f"{row['total']:.2f}", 194 ] 195 ) 196 return response 197 198 199 @method_decorator(staff_member_required, name="dispatch") 200 @method_decorator( 201 permission_required("payments.process_batches"), name="dispatch", 202 ) 203 class BatchNewFilledAdminView(View): 204 """ 205 View that adds a new batch filled with all payments that where not already in a batch. 206 """ 207 208 def get(self, request, *args, **kwargs): 209 batch = Batch() 210 batch.save() 211 212 payments = Payment.objects.filter(type=Payment.TPAY, batch=None,) 213 214 payments.update(batch=batch) 215 216 return redirect("admin:payments_batch_change", object_id=batch.id) 217 ``` Path: `website/payments/services.py` Content: ``` 1 """The services defined by the payments package""" 2 import datetime 3 from typing import Union 4 5 from django.conf import settings 6 from django.db.models import QuerySet, Q 7 from django.utils import timezone 8 from django.utils.translation import gettext_lazy as _ 9 10 from members.models import Member 11 from .exceptions import PaymentError 12 from .models import Payment, BankAccount, Payable 13 14 15 def create_payment( 16 payable: Payable, 17 processed_by: Member, 18 pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY], 19 ) -> Payment: 20 """ 21 Create a new payment from a payable object 22 23 :param payable: Payable object 24 :param processed_by: Member that processed this payment 25 :param pay_type: Payment type 26 :return: Payment object 27 """ 28 if pay_type == Payment.TPAY and not payable.payment_payer.tpay_enabled: 29 raise PaymentError(_("This user does not have Thalia Pay enabled")) 30 31 if payable.payment is not None: 32 payable.payment.amount = payable.payment_amount 33 payable.payment.notes = payable.payment_notes 34 payable.payment.topic = payable.payment_topic 35 payable.payment.paid_by = payable.payment_payer 36 payable.payment.processed_by = processed_by 37 payable.payment.type = pay_type 38 payable.payment.save() 39 else: 40 payable.payment = Payment.objects.create( 41 processed_by=processed_by, 42 amount=payable.payment_amount, 43 notes=payable.payment_notes, 44 topic=payable.payment_topic, 45 paid_by=payable.payment_payer, 46 type=pay_type, 47 ) 48 return payable.payment 49 50 51 def delete_payment(payable: Payable): 52 """ 53 Removes a payment from a payable object 54 :param payable: Payable object 55 :return: 56 """ 57 payment = payable.payment 58 if payment.created_at < timezone.now() - timezone.timedelta( 59 seconds=settings.PAYMENT_CHANGE_WINDOW 60 ): 61 raise PaymentError(_("You are not authorized to delete this payment.")) 62 63 payable.payment = None 64 payable.save() 65 payment.delete() 66 67 68 def update_last_used(queryset: QuerySet, date: datetime.date = None) -> int: 69 """ 70 Update the last used field of a BankAccount queryset 71 72 :param queryset: Queryset of BankAccounts 73 :param date: date to set last_used to 74 :return: number of affected rows 75 """ 76 if not date: 77 date = timezone.now().date() 78 79 result = queryset.filter( 80 (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now())) 81 | Q(valid_until=None) 82 ).update(last_used=date) 83 return result 84 85 86 def revoke_old_mandates() -> int: 87 """ 88 Revokes all mandates that have not been used for 36 months or more 89 :return: number of affected rows 90 """ 91 return BankAccount.objects.filter( 92 last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30)) 93 ).update(valid_until=timezone.now().date()) 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/payments/admin_views.py b/website/payments/admin_views.py --- a/website/payments/admin_views.py +++ b/website/payments/admin_views.py @@ -94,6 +94,9 @@ bank_account.save() batch.save() + + services.send_tpay_batch_processing_emails(batch) + messages.success( request, _("Successfully processed {}.").format(model_ngettext(batch, 1)), diff --git a/website/payments/services.py b/website/payments/services.py --- a/website/payments/services.py +++ b/website/payments/services.py @@ -3,11 +3,13 @@ from typing import Union from django.conf import settings -from django.db.models import QuerySet, Q -from django.utils import timezone +from django.db.models import QuerySet, Q, Sum +from django.urls import reverse +from django.utils import timezone, translation from django.utils.translation import gettext_lazy as _ from members.models import Member +from registrations.emails import _send_email from .exceptions import PaymentError from .models import Payment, BankAccount, Payable @@ -91,3 +93,32 @@ return BankAccount.objects.filter( last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30)) ).update(valid_until=timezone.now().date()) + + +def send_tpay_batch_processing_emails(batch): + """Sends withdrawal notice emails to all members in a batch""" + member_payments = batch.payments_set.values("paid_by").annotate(total=Sum("amount")) + for member_row in member_payments: + member = Member.objects.get(pk=member_row["paid_by"]) + total_amount = member_row["total"] + + with translation.override(member.profile.language): + _send_email( + member.email, + _("Thalia Pay withdrawal notice"), + "payments/email/tpay_withdrawal_notice_mail.txt", + { + "name": member.get_full_name(), + "batch": batch, + "bank_account": member.bank_accounts.filter( + mandate_no__isnull=False + ).last(), + "creditor_id": settings.SEPA_CREDITOR_ID, + "payments": batch.payments_set.filter(paid_by=member), + "total_amount": total_amount, + "payments_url": ( + settings.BASE_URL + reverse("payments:payment-list",) + ), + }, + ) + return len(member_payments)
{"golden_diff": "diff --git a/website/payments/admin_views.py b/website/payments/admin_views.py\n--- a/website/payments/admin_views.py\n+++ b/website/payments/admin_views.py\n@@ -94,6 +94,9 @@\n bank_account.save()\n \n batch.save()\n+\n+ services.send_tpay_batch_processing_emails(batch)\n+\n messages.success(\n request,\n _(\"Successfully processed {}.\").format(model_ngettext(batch, 1)),\ndiff --git a/website/payments/services.py b/website/payments/services.py\n--- a/website/payments/services.py\n+++ b/website/payments/services.py\n@@ -3,11 +3,13 @@\n from typing import Union\n \n from django.conf import settings\n-from django.db.models import QuerySet, Q\n-from django.utils import timezone\n+from django.db.models import QuerySet, Q, Sum\n+from django.urls import reverse\n+from django.utils import timezone, translation\n from django.utils.translation import gettext_lazy as _\n \n from members.models import Member\n+from registrations.emails import _send_email\n from .exceptions import PaymentError\n from .models import Payment, BankAccount, Payable\n \n@@ -91,3 +93,32 @@\n return BankAccount.objects.filter(\n last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))\n ).update(valid_until=timezone.now().date())\n+\n+\n+def send_tpay_batch_processing_emails(batch):\n+ \"\"\"Sends withdrawal notice emails to all members in a batch\"\"\"\n+ member_payments = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n+ for member_row in member_payments:\n+ member = Member.objects.get(pk=member_row[\"paid_by\"])\n+ total_amount = member_row[\"total\"]\n+\n+ with translation.override(member.profile.language):\n+ _send_email(\n+ member.email,\n+ _(\"Thalia Pay withdrawal notice\"),\n+ \"payments/email/tpay_withdrawal_notice_mail.txt\",\n+ {\n+ \"name\": member.get_full_name(),\n+ \"batch\": batch,\n+ \"bank_account\": member.bank_accounts.filter(\n+ mandate_no__isnull=False\n+ ).last(),\n+ \"creditor_id\": settings.SEPA_CREDITOR_ID,\n+ \"payments\": batch.payments_set.filter(paid_by=member),\n+ \"total_amount\": total_amount,\n+ \"payments_url\": (\n+ settings.BASE_URL + reverse(\"payments:payment-list\",)\n+ ),\n+ },\n+ )\n+ return len(member_payments)\n", "issue": "Send email with Thalia Pay payments that will be withdrawn on batch processed\n### Describe the solution you'd like\r\nWhen a payment batch is processed, all users that are in that batch should receive an email notifying them that we will withdraw some amount from their bank account.\r\n\r\n### Motivation\r\nIt is required for the SEPA direct debit mandate\n", "before_files": [{"content": "\"\"\"Admin views provided by the payments package\"\"\"\nimport csv\n\nfrom django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import permission_required\nfrom django.db.models import Sum, Count, Min, Max\nfrom django.http import HttpResponse\nfrom django.core.exceptions import SuspiciousOperation, DisallowedRedirect\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.text import capfirst\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import url_has_allowed_host_and_scheme\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom members.models import Member\nfrom payments import services\nfrom .models import Payment, Batch\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_payments\"), name=\"dispatch\",\n)\nclass PaymentAdminView(View):\n \"\"\"\n View that creates a payment\n \"\"\"\n\n def post(self, request, *args, app_label, model_name, payable, **kwargs):\n if \"type\" not in request.POST:\n raise SuspiciousOperation(\"Missing POST parameters\")\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n payable_model = apps.get_model(app_label=app_label, model_name=model_name)\n payable_obj = payable_model.objects.get(pk=payable)\n\n result = services.create_payment(\n payable_obj, request.member, request.POST[\"type\"]\n )\n payable_obj.save()\n\n if result:\n messages.success(\n request, _(\"Successfully paid %s.\") % model_ngettext(payable_obj, 1),\n )\n else:\n messages.error(\n request, _(\"Could not pay %s.\") % model_ngettext(payable_obj, 1),\n )\n return redirect(f\"admin:{app_label}_{model_name}_change\", payable_obj.pk)\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_payment_change\", result.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchProcessAdminView(View):\n \"\"\"\n View that processes a batch\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n if batch.processed:\n messages.error(\n request, _(\"{} already processed.\").format(model_ngettext(batch, 1))\n )\n else:\n batch.processed = True\n payments = batch.payments_set.select_related(\"paid_by\")\n for payment in payments:\n bank_account = payment.paid_by.bank_accounts.last()\n bank_account.last_used = timezone.now()\n bank_account.save()\n\n batch.save()\n messages.success(\n request,\n _(\"Successfully processed {}.\").format(model_ngettext(batch, 1)),\n )\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_batch_change\", kwargs[\"pk\"])\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchExportAdminView(View):\n \"\"\"\n View that exports a batch\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Account holder\"),\n _(\"IBAN\"),\n _(\"Mandate Reference\"),\n _(\"Amount\"),\n _(\"Description\"),\n _(\"Mandate Date\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n member_rows = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n\n for row in member_rows:\n member = Member.objects.get(id=row[\"paid_by\"])\n bankaccount = member.bank_accounts.last()\n writer.writerow(\n [\n member.get_full_name(),\n bankaccount.iban,\n bankaccount.mandate_no,\n f\"{row['total']:.2f}\",\n batch.description,\n bankaccount.valid_from,\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicExportAdminView(View):\n \"\"\"\n View that exports a batch per topic\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch-topic.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Topic\"),\n _(\"No. of payments\"),\n _(\"First payment\"),\n _(\"Last payment\"),\n _(\"Total amount\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n for row in topic_rows:\n writer.writerow(\n [\n row[\"topic\"],\n row[\"count\"],\n timezone.localtime(row[\"min_date\"]).date(),\n timezone.localtime(row[\"max_date\"]).date(),\n f\"{row['total']:.2f}\",\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchNewFilledAdminView(View):\n \"\"\"\n View that adds a new batch filled with all payments that where not already in a batch.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n batch = Batch()\n batch.save()\n\n payments = Payment.objects.filter(type=Payment.TPAY, batch=None,)\n\n payments.update(batch=batch)\n\n return redirect(\"admin:payments_batch_change\", object_id=batch.id)\n", "path": "website/payments/admin_views.py"}, {"content": "\"\"\"The services defined by the payments package\"\"\"\nimport datetime\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet, Q\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom .exceptions import PaymentError\nfrom .models import Payment, BankAccount, Payable\n\n\ndef create_payment(\n payable: Payable,\n processed_by: Member,\n pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],\n) -> Payment:\n \"\"\"\n Create a new payment from a payable object\n\n :param payable: Payable object\n :param processed_by: Member that processed this payment\n :param pay_type: Payment type\n :return: Payment object\n \"\"\"\n if pay_type == Payment.TPAY and not payable.payment_payer.tpay_enabled:\n raise PaymentError(_(\"This user does not have Thalia Pay enabled\"))\n\n if payable.payment is not None:\n payable.payment.amount = payable.payment_amount\n payable.payment.notes = payable.payment_notes\n payable.payment.topic = payable.payment_topic\n payable.payment.paid_by = payable.payment_payer\n payable.payment.processed_by = processed_by\n payable.payment.type = pay_type\n payable.payment.save()\n else:\n payable.payment = Payment.objects.create(\n processed_by=processed_by,\n amount=payable.payment_amount,\n notes=payable.payment_notes,\n topic=payable.payment_topic,\n paid_by=payable.payment_payer,\n type=pay_type,\n )\n return payable.payment\n\n\ndef delete_payment(payable: Payable):\n \"\"\"\n Removes a payment from a payable object\n :param payable: Payable object\n :return:\n \"\"\"\n payment = payable.payment\n if payment.created_at < timezone.now() - timezone.timedelta(\n seconds=settings.PAYMENT_CHANGE_WINDOW\n ):\n raise PaymentError(_(\"You are not authorized to delete this payment.\"))\n\n payable.payment = None\n payable.save()\n payment.delete()\n\n\ndef update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:\n \"\"\"\n Update the last used field of a BankAccount queryset\n\n :param queryset: Queryset of BankAccounts\n :param date: date to set last_used to\n :return: number of affected rows\n \"\"\"\n if not date:\n date = timezone.now().date()\n\n result = queryset.filter(\n (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))\n | Q(valid_until=None)\n ).update(last_used=date)\n return result\n\n\ndef revoke_old_mandates() -> int:\n \"\"\"\n Revokes all mandates that have not been used for 36 months or more\n :return: number of affected rows\n \"\"\"\n return BankAccount.objects.filter(\n last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))\n ).update(valid_until=timezone.now().date())\n", "path": "website/payments/services.py"}], "after_files": [{"content": "\"\"\"Admin views provided by the payments package\"\"\"\nimport csv\n\nfrom django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import permission_required\nfrom django.db.models import Sum, Count, Min, Max\nfrom django.http import HttpResponse\nfrom django.core.exceptions import SuspiciousOperation, DisallowedRedirect\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.text import capfirst\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import url_has_allowed_host_and_scheme\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom members.models import Member\nfrom payments import services\nfrom .models import Payment, Batch\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_payments\"), name=\"dispatch\",\n)\nclass PaymentAdminView(View):\n \"\"\"\n View that creates a payment\n \"\"\"\n\n def post(self, request, *args, app_label, model_name, payable, **kwargs):\n if \"type\" not in request.POST:\n raise SuspiciousOperation(\"Missing POST parameters\")\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n payable_model = apps.get_model(app_label=app_label, model_name=model_name)\n payable_obj = payable_model.objects.get(pk=payable)\n\n result = services.create_payment(\n payable_obj, request.member, request.POST[\"type\"]\n )\n payable_obj.save()\n\n if result:\n messages.success(\n request, _(\"Successfully paid %s.\") % model_ngettext(payable_obj, 1),\n )\n else:\n messages.error(\n request, _(\"Could not pay %s.\") % model_ngettext(payable_obj, 1),\n )\n return redirect(f\"admin:{app_label}_{model_name}_change\", payable_obj.pk)\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_payment_change\", result.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchProcessAdminView(View):\n \"\"\"\n View that processes a batch\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n if batch.processed:\n messages.error(\n request, _(\"{} already processed.\").format(model_ngettext(batch, 1))\n )\n else:\n batch.processed = True\n payments = batch.payments_set.select_related(\"paid_by\")\n for payment in payments:\n bank_account = payment.paid_by.bank_accounts.last()\n bank_account.last_used = timezone.now()\n bank_account.save()\n\n batch.save()\n\n services.send_tpay_batch_processing_emails(batch)\n\n messages.success(\n request,\n _(\"Successfully processed {}.\").format(model_ngettext(batch, 1)),\n )\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_batch_change\", kwargs[\"pk\"])\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchExportAdminView(View):\n \"\"\"\n View that exports a batch\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Account holder\"),\n _(\"IBAN\"),\n _(\"Mandate Reference\"),\n _(\"Amount\"),\n _(\"Description\"),\n _(\"Mandate Date\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n member_rows = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n\n for row in member_rows:\n member = Member.objects.get(id=row[\"paid_by\"])\n bankaccount = member.bank_accounts.last()\n writer.writerow(\n [\n member.get_full_name(),\n bankaccount.iban,\n bankaccount.mandate_no,\n f\"{row['total']:.2f}\",\n batch.description,\n bankaccount.valid_from,\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicExportAdminView(View):\n \"\"\"\n View that exports a batch per topic\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch-topic.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Topic\"),\n _(\"No. of payments\"),\n _(\"First payment\"),\n _(\"Last payment\"),\n _(\"Total amount\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n for row in topic_rows:\n writer.writerow(\n [\n row[\"topic\"],\n row[\"count\"],\n timezone.localtime(row[\"min_date\"]).date(),\n timezone.localtime(row[\"max_date\"]).date(),\n f\"{row['total']:.2f}\",\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchNewFilledAdminView(View):\n \"\"\"\n View that adds a new batch filled with all payments that where not already in a batch.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n batch = Batch()\n batch.save()\n\n payments = Payment.objects.filter(type=Payment.TPAY, batch=None,)\n\n payments.update(batch=batch)\n\n return redirect(\"admin:payments_batch_change\", object_id=batch.id)\n", "path": "website/payments/admin_views.py"}, {"content": "\"\"\"The services defined by the payments package\"\"\"\nimport datetime\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet, Q, Sum\nfrom django.urls import reverse\nfrom django.utils import timezone, translation\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom registrations.emails import _send_email\nfrom .exceptions import PaymentError\nfrom .models import Payment, BankAccount, Payable\n\n\ndef create_payment(\n payable: Payable,\n processed_by: Member,\n pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],\n) -> Payment:\n \"\"\"\n Create a new payment from a payable object\n\n :param payable: Payable object\n :param processed_by: Member that processed this payment\n :param pay_type: Payment type\n :return: Payment object\n \"\"\"\n if pay_type == Payment.TPAY and not payable.payment_payer.tpay_enabled:\n raise PaymentError(_(\"This user does not have Thalia Pay enabled\"))\n\n if payable.payment is not None:\n payable.payment.amount = payable.payment_amount\n payable.payment.notes = payable.payment_notes\n payable.payment.topic = payable.payment_topic\n payable.payment.paid_by = payable.payment_payer\n payable.payment.processed_by = processed_by\n payable.payment.type = pay_type\n payable.payment.save()\n else:\n payable.payment = Payment.objects.create(\n processed_by=processed_by,\n amount=payable.payment_amount,\n notes=payable.payment_notes,\n topic=payable.payment_topic,\n paid_by=payable.payment_payer,\n type=pay_type,\n )\n return payable.payment\n\n\ndef delete_payment(payable: Payable):\n \"\"\"\n Removes a payment from a payable object\n :param payable: Payable object\n :return:\n \"\"\"\n payment = payable.payment\n if payment.created_at < timezone.now() - timezone.timedelta(\n seconds=settings.PAYMENT_CHANGE_WINDOW\n ):\n raise PaymentError(_(\"You are not authorized to delete this payment.\"))\n\n payable.payment = None\n payable.save()\n payment.delete()\n\n\ndef update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:\n \"\"\"\n Update the last used field of a BankAccount queryset\n\n :param queryset: Queryset of BankAccounts\n :param date: date to set last_used to\n :return: number of affected rows\n \"\"\"\n if not date:\n date = timezone.now().date()\n\n result = queryset.filter(\n (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))\n | Q(valid_until=None)\n ).update(last_used=date)\n return result\n\n\ndef revoke_old_mandates() -> int:\n \"\"\"\n Revokes all mandates that have not been used for 36 months or more\n :return: number of affected rows\n \"\"\"\n return BankAccount.objects.filter(\n last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))\n ).update(valid_until=timezone.now().date())\n\n\ndef send_tpay_batch_processing_emails(batch):\n \"\"\"Sends withdrawal notice emails to all members in a batch\"\"\"\n member_payments = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n for member_row in member_payments:\n member = Member.objects.get(pk=member_row[\"paid_by\"])\n total_amount = member_row[\"total\"]\n\n with translation.override(member.profile.language):\n _send_email(\n member.email,\n _(\"Thalia Pay withdrawal notice\"),\n \"payments/email/tpay_withdrawal_notice_mail.txt\",\n {\n \"name\": member.get_full_name(),\n \"batch\": batch,\n \"bank_account\": member.bank_accounts.filter(\n mandate_no__isnull=False\n ).last(),\n \"creditor_id\": settings.SEPA_CREDITOR_ID,\n \"payments\": batch.payments_set.filter(paid_by=member),\n \"total_amount\": total_amount,\n \"payments_url\": (\n settings.BASE_URL + reverse(\"payments:payment-list\",)\n ),\n },\n )\n return len(member_payments)\n", "path": "website/payments/services.py"}]}
3,115
547
gh_patches_debug_23718
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-1398
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SIGNAGES module / Type filter disappeared I think TYPE filter was available before. Crucial filter of course. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `geotrek/infrastructure/filters.py` Content: ``` 1 from django.utils.translation import ugettext_lazy as _ 2 3 from geotrek.common.filters import StructureRelatedFilterSet, YearFilter 4 from geotrek.maintenance.filters import InterventionYearSelect 5 6 from .models import INFRASTRUCTURE_TYPES, Infrastructure, Signage 7 8 9 class InfrastructureYearSelect(InterventionYearSelect): 10 label = _(u"Intervention year") 11 12 13 class InfrastructureFilterSet(StructureRelatedFilterSet): 14 intervention_year = YearFilter(name='interventions_set__date', 15 widget=InfrastructureYearSelect, 16 label=_(u"Intervention year")) 17 18 def __init__(self, *args, **kwargs): 19 super(InfrastructureFilterSet, self).__init__(*args, **kwargs) 20 field = self.form.fields['type'] 21 field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE) 22 23 field = self.form.fields['type__type'] 24 all_choices = field.widget.choices 25 all_choices = [c for c in all_choices if c[0] != INFRASTRUCTURE_TYPES.SIGNAGE] 26 field.widget.choices = [('', _(u"Category"))] + all_choices 27 28 class Meta(StructureRelatedFilterSet.Meta): 29 model = Infrastructure 30 fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type'] 31 32 33 class SignageFilterSet(StructureRelatedFilterSet): 34 intervention_year = YearFilter(name='interventions_set__date', 35 widget=InfrastructureYearSelect) 36 37 class Meta(StructureRelatedFilterSet.Meta): 38 model = Signage 39 fields = StructureRelatedFilterSet.Meta.fields 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/geotrek/infrastructure/filters.py b/geotrek/infrastructure/filters.py --- a/geotrek/infrastructure/filters.py +++ b/geotrek/infrastructure/filters.py @@ -12,8 +12,7 @@ class InfrastructureFilterSet(StructureRelatedFilterSet): intervention_year = YearFilter(name='interventions_set__date', - widget=InfrastructureYearSelect, - label=_(u"Intervention year")) + widget=InfrastructureYearSelect) def __init__(self, *args, **kwargs): super(InfrastructureFilterSet, self).__init__(*args, **kwargs) @@ -34,6 +33,11 @@ intervention_year = YearFilter(name='interventions_set__date', widget=InfrastructureYearSelect) + def __init__(self, *args, **kwargs): + super(SignageFilterSet, self).__init__(*args, **kwargs) + field = self.form.fields['type'] + field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE) + class Meta(StructureRelatedFilterSet.Meta): model = Signage - fields = StructureRelatedFilterSet.Meta.fields + fields = StructureRelatedFilterSet.Meta.fields + ['type']
{"golden_diff": "diff --git a/geotrek/infrastructure/filters.py b/geotrek/infrastructure/filters.py\n--- a/geotrek/infrastructure/filters.py\n+++ b/geotrek/infrastructure/filters.py\n@@ -12,8 +12,7 @@\n \n class InfrastructureFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n- widget=InfrastructureYearSelect,\n- label=_(u\"Intervention year\"))\n+ widget=InfrastructureYearSelect)\n \n def __init__(self, *args, **kwargs):\n super(InfrastructureFilterSet, self).__init__(*args, **kwargs)\n@@ -34,6 +33,11 @@\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n \n+ def __init__(self, *args, **kwargs):\n+ super(SignageFilterSet, self).__init__(*args, **kwargs)\n+ field = self.form.fields['type']\n+ field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n+\n class Meta(StructureRelatedFilterSet.Meta):\n model = Signage\n- fields = StructureRelatedFilterSet.Meta.fields\n+ fields = StructureRelatedFilterSet.Meta.fields + ['type']\n", "issue": "SIGNAGES module / Type filter disappeared\nI think TYPE filter was available before.\nCrucial filter of course.\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom geotrek.common.filters import StructureRelatedFilterSet, YearFilter\nfrom geotrek.maintenance.filters import InterventionYearSelect\n\nfrom .models import INFRASTRUCTURE_TYPES, Infrastructure, Signage\n\n\nclass InfrastructureYearSelect(InterventionYearSelect):\n label = _(u\"Intervention year\")\n\n\nclass InfrastructureFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect,\n label=_(u\"Intervention year\"))\n\n def __init__(self, *args, **kwargs):\n super(InfrastructureFilterSet, self).__init__(*args, **kwargs)\n field = self.form.fields['type']\n field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n\n field = self.form.fields['type__type']\n all_choices = field.widget.choices\n all_choices = [c for c in all_choices if c[0] != INFRASTRUCTURE_TYPES.SIGNAGE]\n field.widget.choices = [('', _(u\"Category\"))] + all_choices\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Infrastructure\n fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']\n\n\nclass SignageFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Signage\n fields = StructureRelatedFilterSet.Meta.fields\n", "path": "geotrek/infrastructure/filters.py"}], "after_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom geotrek.common.filters import StructureRelatedFilterSet, YearFilter\nfrom geotrek.maintenance.filters import InterventionYearSelect\n\nfrom .models import INFRASTRUCTURE_TYPES, Infrastructure, Signage\n\n\nclass InfrastructureYearSelect(InterventionYearSelect):\n label = _(u\"Intervention year\")\n\n\nclass InfrastructureFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n\n def __init__(self, *args, **kwargs):\n super(InfrastructureFilterSet, self).__init__(*args, **kwargs)\n field = self.form.fields['type']\n field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n\n field = self.form.fields['type__type']\n all_choices = field.widget.choices\n all_choices = [c for c in all_choices if c[0] != INFRASTRUCTURE_TYPES.SIGNAGE]\n field.widget.choices = [('', _(u\"Category\"))] + all_choices\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Infrastructure\n fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']\n\n\nclass SignageFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n\n def __init__(self, *args, **kwargs):\n super(SignageFilterSet, self).__init__(*args, **kwargs)\n field = self.form.fields['type']\n field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Signage\n fields = StructureRelatedFilterSet.Meta.fields + ['type']\n", "path": "geotrek/infrastructure/filters.py"}]}
694
284
gh_patches_debug_6511
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-1675
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `MultioutputWrapper` does not squeeze `kwargs` Hi there, `MultioutputWrapper` class provides the option `squeeze_outputs`. If this is set the output should be squeezed on the index select dimension. Altough this happens for the `args` it does not happen for `kwargs` Here is are the where it happens, but it is only done for the `args` not for the `kwargs`. https://github.com/Lightning-AI/torchmetrics/blob/af52ba6a422e9f8c99853396d78882015a3c6fe3/src/torchmetrics/wrappers/multioutput.py#L105-L108 Should be a one-line change. if needed i can provide an PR Greetings Andi --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/wrappers/multioutput.py` Content: ``` 1 from copy import deepcopy 2 from typing import Any, Callable, List, Optional, Sequence, Tuple, Union 3 4 import torch 5 from torch import Tensor 6 from torch.nn import ModuleList 7 8 from torchmetrics import Metric 9 from torchmetrics.utilities import apply_to_collection 10 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE 11 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE 12 13 if not _MATPLOTLIB_AVAILABLE: 14 __doctest_skip__ = ["MultioutputWrapper.plot"] 15 16 17 def _get_nan_indices(*tensors: Tensor) -> Tensor: 18 """Get indices of rows along dim 0 which have NaN values.""" 19 if len(tensors) == 0: 20 raise ValueError("Must pass at least one tensor as argument") 21 sentinel = tensors[0] 22 nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device) 23 for tensor in tensors: 24 permuted_tensor = tensor.flatten(start_dim=1) 25 nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1) 26 return nan_idxs 27 28 29 class MultioutputWrapper(Metric): 30 """Wrap a base metric to enable it to support multiple outputs. 31 32 Several torchmetrics metrics, such as :class:`torchmetrics.regression.spearman.SpearmanCorrcoef` lack support for 33 multioutput mode. This class wraps such metrics to support computing one metric per output. 34 Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs. 35 This means if you set ``num_outputs`` to 2, ``.compute()`` will return a Tensor of dimension 36 ``(2, ...)`` where ``...`` represents the dimensions the metric returns when not wrapped. 37 38 In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude 39 fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the 40 intersection of NaN containing "rows" upon each update for each output. For example, suppose a user uses 41 `MultioutputWrapper` to wrap :class:`torchmetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally 42 has missing labels for classes like ``R2Score`` is that this class supports removing ``NaN`` values 43 (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows 44 45 Args: 46 base_metric: Metric being wrapped. 47 num_outputs: Expected dimensionality of the output dimension. 48 This parameter is used to determine the number of distinct metrics we need to track. 49 output_dim: 50 Dimension on which output is expected. Note that while this provides some flexibility, the output dimension 51 must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels 52 can have a different number of dimensions than the predictions. This can be worked around if the output 53 dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs. 54 remove_nans: 55 Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying 56 metric. Proper operation requires all tensors passed to update to have dimension ``(N, ...)`` where N 57 represents the length of the batch or dataset being passed in. 58 squeeze_outputs: 59 If ``True``, will squeeze the 1-item dimensions left after ``index_select`` is applied. 60 This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful 61 for certain classification metrics that can't handle additional 1-item dimensions. 62 63 Example: 64 >>> # Mimic R2Score in `multioutput`, `raw_values` mode: 65 >>> import torch 66 >>> from torchmetrics.wrappers import MultioutputWrapper 67 >>> from torchmetrics.regression import R2Score 68 >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) 69 >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) 70 >>> r2score = MultioutputWrapper(R2Score(), 2) 71 >>> r2score(preds, target) 72 tensor([0.9654, 0.9082]) 73 """ 74 75 is_differentiable = False 76 77 def __init__( 78 self, 79 base_metric: Metric, 80 num_outputs: int, 81 output_dim: int = -1, 82 remove_nans: bool = True, 83 squeeze_outputs: bool = True, 84 ) -> None: 85 super().__init__() 86 self.metrics = ModuleList([deepcopy(base_metric) for _ in range(num_outputs)]) 87 self.output_dim = output_dim 88 self.remove_nans = remove_nans 89 self.squeeze_outputs = squeeze_outputs 90 91 def _get_args_kwargs_by_output(self, *args: Tensor, **kwargs: Tensor) -> List[Tuple[Tensor, Tensor]]: 92 """Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.""" 93 args_kwargs_by_output = [] 94 for i in range(len(self.metrics)): 95 selected_args = apply_to_collection( 96 args, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device) 97 ) 98 selected_kwargs = apply_to_collection( 99 kwargs, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device) 100 ) 101 if self.remove_nans: 102 args_kwargs = selected_args + tuple(selected_kwargs.values()) 103 nan_idxs = _get_nan_indices(*args_kwargs) 104 selected_args = [arg[~nan_idxs] for arg in selected_args] 105 selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()} 106 107 if self.squeeze_outputs: 108 selected_args = [arg.squeeze(self.output_dim) for arg in selected_args] 109 args_kwargs_by_output.append((selected_args, selected_kwargs)) 110 return args_kwargs_by_output 111 112 def update(self, *args: Any, **kwargs: Any) -> None: 113 """Update each underlying metric with the corresponding output.""" 114 reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) 115 for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): 116 metric.update(*selected_args, **selected_kwargs) 117 118 def compute(self) -> Tensor: 119 """Compute metrics.""" 120 return torch.stack([m.compute() for m in self.metrics], 0) 121 122 @torch.jit.unused 123 def forward(self, *args: Any, **kwargs: Any) -> Any: 124 """Call underlying forward methods and aggregate the results if they're non-null. 125 126 We override this method to ensure that state variables get copied over on the underlying metrics. 127 """ 128 results = [] 129 reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) 130 for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): 131 results.append(metric(*selected_args, **selected_kwargs)) 132 if results[0] is None: 133 return None 134 return torch.stack(results, 0) 135 136 def reset(self) -> None: 137 """Reset all underlying metrics.""" 138 for metric in self.metrics: 139 metric.reset() 140 super().reset() 141 142 def _wrap_update(self, update: Callable) -> Callable: 143 """Overwrite to do nothing.""" 144 return update 145 146 def _wrap_compute(self, compute: Callable) -> Callable: 147 """Overwrite to do nothing.""" 148 return compute 149 150 def plot( 151 self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None 152 ) -> _PLOT_OUT_TYPE: 153 """Plot a single or multiple values from the metric. 154 155 Args: 156 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results. 157 If no value is provided, will automatically call `metric.compute` and plot that result. 158 ax: An matplotlib axis object. If provided will add plot to that axis 159 160 Returns: 161 Figure and Axes object 162 163 Raises: 164 ModuleNotFoundError: 165 If `matplotlib` is not installed 166 167 .. plot:: 168 :scale: 75 169 170 >>> # Example plotting a single value 171 >>> import torch 172 >>> from torchmetrics import MultioutputWrapper, R2Score 173 >>> metric = MultioutputWrapper(R2Score(), 2) 174 >>> metric.update(torch.randn(20, 2), torch.randn(20, 2)) 175 >>> fig_, ax_ = metric.plot() 176 177 .. plot:: 178 :scale: 75 179 180 >>> # Example plotting multiple values 181 >>> import torch 182 >>> from torchmetrics import MultioutputWrapper, R2Score 183 >>> metric = MultioutputWrapper(R2Score(), 2) 184 >>> values = [ ] 185 >>> for _ in range(3): 186 ... values.append(metric(torch.randn(20, 2), torch.randn(20, 2))) 187 >>> fig_, ax_ = metric.plot(values) 188 """ 189 return self._plot(val, ax) 190 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/wrappers/multioutput.py b/src/torchmetrics/wrappers/multioutput.py --- a/src/torchmetrics/wrappers/multioutput.py +++ b/src/torchmetrics/wrappers/multioutput.py @@ -106,6 +106,7 @@ if self.squeeze_outputs: selected_args = [arg.squeeze(self.output_dim) for arg in selected_args] + selected_kwargs = {k: v.squeeze(self.output_dim) for k, v in selected_kwargs.items()} args_kwargs_by_output.append((selected_args, selected_kwargs)) return args_kwargs_by_output
{"golden_diff": "diff --git a/src/torchmetrics/wrappers/multioutput.py b/src/torchmetrics/wrappers/multioutput.py\n--- a/src/torchmetrics/wrappers/multioutput.py\n+++ b/src/torchmetrics/wrappers/multioutput.py\n@@ -106,6 +106,7 @@\n \n if self.squeeze_outputs:\n selected_args = [arg.squeeze(self.output_dim) for arg in selected_args]\n+ selected_kwargs = {k: v.squeeze(self.output_dim) for k, v in selected_kwargs.items()}\n args_kwargs_by_output.append((selected_args, selected_kwargs))\n return args_kwargs_by_output\n", "issue": "`MultioutputWrapper` does not squeeze `kwargs`\nHi there,\r\n\r\n`MultioutputWrapper` class provides the option `squeeze_outputs`. If this is set the output should be squeezed on the index select dimension.\r\nAltough this happens for the `args` it does not happen for `kwargs`\r\n\r\nHere is are the where it happens, but it is only done for the `args` not for the `kwargs`.\r\nhttps://github.com/Lightning-AI/torchmetrics/blob/af52ba6a422e9f8c99853396d78882015a3c6fe3/src/torchmetrics/wrappers/multioutput.py#L105-L108\r\n\r\nShould be a one-line change. if needed i can provide an PR\r\n\r\nGreetings Andi\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import ModuleList\n\nfrom torchmetrics import Metric\nfrom torchmetrics.utilities import apply_to_collection\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"MultioutputWrapper.plot\"]\n\n\ndef _get_nan_indices(*tensors: Tensor) -> Tensor:\n \"\"\"Get indices of rows along dim 0 which have NaN values.\"\"\"\n if len(tensors) == 0:\n raise ValueError(\"Must pass at least one tensor as argument\")\n sentinel = tensors[0]\n nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device)\n for tensor in tensors:\n permuted_tensor = tensor.flatten(start_dim=1)\n nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1)\n return nan_idxs\n\n\nclass MultioutputWrapper(Metric):\n \"\"\"Wrap a base metric to enable it to support multiple outputs.\n\n Several torchmetrics metrics, such as :class:`torchmetrics.regression.spearman.SpearmanCorrcoef` lack support for\n multioutput mode. This class wraps such metrics to support computing one metric per output.\n Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs.\n This means if you set ``num_outputs`` to 2, ``.compute()`` will return a Tensor of dimension\n ``(2, ...)`` where ``...`` represents the dimensions the metric returns when not wrapped.\n\n In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude\n fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the\n intersection of NaN containing \"rows\" upon each update for each output. For example, suppose a user uses\n `MultioutputWrapper` to wrap :class:`torchmetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally\n has missing labels for classes like ``R2Score`` is that this class supports removing ``NaN`` values\n (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows\n\n Args:\n base_metric: Metric being wrapped.\n num_outputs: Expected dimensionality of the output dimension.\n This parameter is used to determine the number of distinct metrics we need to track.\n output_dim:\n Dimension on which output is expected. Note that while this provides some flexibility, the output dimension\n must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels\n can have a different number of dimensions than the predictions. This can be worked around if the output\n dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs.\n remove_nans:\n Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying\n metric. Proper operation requires all tensors passed to update to have dimension ``(N, ...)`` where N\n represents the length of the batch or dataset being passed in.\n squeeze_outputs:\n If ``True``, will squeeze the 1-item dimensions left after ``index_select`` is applied.\n This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful\n for certain classification metrics that can't handle additional 1-item dimensions.\n\n Example:\n >>> # Mimic R2Score in `multioutput`, `raw_values` mode:\n >>> import torch\n >>> from torchmetrics.wrappers import MultioutputWrapper\n >>> from torchmetrics.regression import R2Score\n >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])\n >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])\n >>> r2score = MultioutputWrapper(R2Score(), 2)\n >>> r2score(preds, target)\n tensor([0.9654, 0.9082])\n \"\"\"\n\n is_differentiable = False\n\n def __init__(\n self,\n base_metric: Metric,\n num_outputs: int,\n output_dim: int = -1,\n remove_nans: bool = True,\n squeeze_outputs: bool = True,\n ) -> None:\n super().__init__()\n self.metrics = ModuleList([deepcopy(base_metric) for _ in range(num_outputs)])\n self.output_dim = output_dim\n self.remove_nans = remove_nans\n self.squeeze_outputs = squeeze_outputs\n\n def _get_args_kwargs_by_output(self, *args: Tensor, **kwargs: Tensor) -> List[Tuple[Tensor, Tensor]]:\n \"\"\"Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.\"\"\"\n args_kwargs_by_output = []\n for i in range(len(self.metrics)):\n selected_args = apply_to_collection(\n args, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)\n )\n selected_kwargs = apply_to_collection(\n kwargs, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)\n )\n if self.remove_nans:\n args_kwargs = selected_args + tuple(selected_kwargs.values())\n nan_idxs = _get_nan_indices(*args_kwargs)\n selected_args = [arg[~nan_idxs] for arg in selected_args]\n selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()}\n\n if self.squeeze_outputs:\n selected_args = [arg.squeeze(self.output_dim) for arg in selected_args]\n args_kwargs_by_output.append((selected_args, selected_kwargs))\n return args_kwargs_by_output\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Update each underlying metric with the corresponding output.\"\"\"\n reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)\n for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):\n metric.update(*selected_args, **selected_kwargs)\n\n def compute(self) -> Tensor:\n \"\"\"Compute metrics.\"\"\"\n return torch.stack([m.compute() for m in self.metrics], 0)\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Call underlying forward methods and aggregate the results if they're non-null.\n\n We override this method to ensure that state variables get copied over on the underlying metrics.\n \"\"\"\n results = []\n reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)\n for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):\n results.append(metric(*selected_args, **selected_kwargs))\n if results[0] is None:\n return None\n return torch.stack(results, 0)\n\n def reset(self) -> None:\n \"\"\"Reset all underlying metrics.\"\"\"\n for metric in self.metrics:\n metric.reset()\n super().reset()\n\n def _wrap_update(self, update: Callable) -> Callable:\n \"\"\"Overwrite to do nothing.\"\"\"\n return update\n\n def _wrap_compute(self, compute: Callable) -> Callable:\n \"\"\"Overwrite to do nothing.\"\"\"\n return compute\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics import MultioutputWrapper, R2Score\n >>> metric = MultioutputWrapper(R2Score(), 2)\n >>> metric.update(torch.randn(20, 2), torch.randn(20, 2))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics import MultioutputWrapper, R2Score\n >>> metric = MultioutputWrapper(R2Score(), 2)\n >>> values = [ ]\n >>> for _ in range(3):\n ... values.append(metric(torch.randn(20, 2), torch.randn(20, 2)))\n >>> fig_, ax_ = metric.plot(values)\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/wrappers/multioutput.py"}], "after_files": [{"content": "from copy import deepcopy\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import ModuleList\n\nfrom torchmetrics import Metric\nfrom torchmetrics.utilities import apply_to_collection\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"MultioutputWrapper.plot\"]\n\n\ndef _get_nan_indices(*tensors: Tensor) -> Tensor:\n \"\"\"Get indices of rows along dim 0 which have NaN values.\"\"\"\n if len(tensors) == 0:\n raise ValueError(\"Must pass at least one tensor as argument\")\n sentinel = tensors[0]\n nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device)\n for tensor in tensors:\n permuted_tensor = tensor.flatten(start_dim=1)\n nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1)\n return nan_idxs\n\n\nclass MultioutputWrapper(Metric):\n \"\"\"Wrap a base metric to enable it to support multiple outputs.\n\n Several torchmetrics metrics, such as :class:`torchmetrics.regression.spearman.SpearmanCorrcoef` lack support for\n multioutput mode. This class wraps such metrics to support computing one metric per output.\n Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs.\n This means if you set ``num_outputs`` to 2, ``.compute()`` will return a Tensor of dimension\n ``(2, ...)`` where ``...`` represents the dimensions the metric returns when not wrapped.\n\n In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude\n fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the\n intersection of NaN containing \"rows\" upon each update for each output. For example, suppose a user uses\n `MultioutputWrapper` to wrap :class:`torchmetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally\n has missing labels for classes like ``R2Score`` is that this class supports removing ``NaN`` values\n (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows\n\n Args:\n base_metric: Metric being wrapped.\n num_outputs: Expected dimensionality of the output dimension.\n This parameter is used to determine the number of distinct metrics we need to track.\n output_dim:\n Dimension on which output is expected. Note that while this provides some flexibility, the output dimension\n must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels\n can have a different number of dimensions than the predictions. This can be worked around if the output\n dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs.\n remove_nans:\n Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying\n metric. Proper operation requires all tensors passed to update to have dimension ``(N, ...)`` where N\n represents the length of the batch or dataset being passed in.\n squeeze_outputs:\n If ``True``, will squeeze the 1-item dimensions left after ``index_select`` is applied.\n This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful\n for certain classification metrics that can't handle additional 1-item dimensions.\n\n Example:\n >>> # Mimic R2Score in `multioutput`, `raw_values` mode:\n >>> import torch\n >>> from torchmetrics.wrappers import MultioutputWrapper\n >>> from torchmetrics.regression import R2Score\n >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])\n >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])\n >>> r2score = MultioutputWrapper(R2Score(), 2)\n >>> r2score(preds, target)\n tensor([0.9654, 0.9082])\n \"\"\"\n\n is_differentiable = False\n\n def __init__(\n self,\n base_metric: Metric,\n num_outputs: int,\n output_dim: int = -1,\n remove_nans: bool = True,\n squeeze_outputs: bool = True,\n ) -> None:\n super().__init__()\n self.metrics = ModuleList([deepcopy(base_metric) for _ in range(num_outputs)])\n self.output_dim = output_dim\n self.remove_nans = remove_nans\n self.squeeze_outputs = squeeze_outputs\n\n def _get_args_kwargs_by_output(self, *args: Tensor, **kwargs: Tensor) -> List[Tuple[Tensor, Tensor]]:\n \"\"\"Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.\"\"\"\n args_kwargs_by_output = []\n for i in range(len(self.metrics)):\n selected_args = apply_to_collection(\n args, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)\n )\n selected_kwargs = apply_to_collection(\n kwargs, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)\n )\n if self.remove_nans:\n args_kwargs = selected_args + tuple(selected_kwargs.values())\n nan_idxs = _get_nan_indices(*args_kwargs)\n selected_args = [arg[~nan_idxs] for arg in selected_args]\n selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()}\n\n if self.squeeze_outputs:\n selected_args = [arg.squeeze(self.output_dim) for arg in selected_args]\n selected_kwargs = {k: v.squeeze(self.output_dim) for k, v in selected_kwargs.items()}\n args_kwargs_by_output.append((selected_args, selected_kwargs))\n return args_kwargs_by_output\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Update each underlying metric with the corresponding output.\"\"\"\n reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)\n for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):\n metric.update(*selected_args, **selected_kwargs)\n\n def compute(self) -> Tensor:\n \"\"\"Compute metrics.\"\"\"\n return torch.stack([m.compute() for m in self.metrics], 0)\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Call underlying forward methods and aggregate the results if they're non-null.\n\n We override this method to ensure that state variables get copied over on the underlying metrics.\n \"\"\"\n results = []\n reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)\n for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):\n results.append(metric(*selected_args, **selected_kwargs))\n if results[0] is None:\n return None\n return torch.stack(results, 0)\n\n def reset(self) -> None:\n \"\"\"Reset all underlying metrics.\"\"\"\n for metric in self.metrics:\n metric.reset()\n super().reset()\n\n def _wrap_update(self, update: Callable) -> Callable:\n \"\"\"Overwrite to do nothing.\"\"\"\n return update\n\n def _wrap_compute(self, compute: Callable) -> Callable:\n \"\"\"Overwrite to do nothing.\"\"\"\n return compute\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics import MultioutputWrapper, R2Score\n >>> metric = MultioutputWrapper(R2Score(), 2)\n >>> metric.update(torch.randn(20, 2), torch.randn(20, 2))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics import MultioutputWrapper, R2Score\n >>> metric = MultioutputWrapper(R2Score(), 2)\n >>> values = [ ]\n >>> for _ in range(3):\n ... values.append(metric(torch.randn(20, 2), torch.randn(20, 2)))\n >>> fig_, ax_ = metric.plot(values)\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/wrappers/multioutput.py"}]}
2,875
137
gh_patches_debug_17074
rasdani/github-patches
git_diff
nltk__nltk-1738
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unnecessary slow down on DecisionTreeClassifier Just a minor unnecessary calculation that might become huge on large trees. https://github.com/nltk/nltk/blob/develop/nltk/classify/decisiontree.py#L269 Even if verbose is false you are building that description string. Wouldn't it be a good idea to have that string building inside the if condition of verbose? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nltk/classify/decisiontree.py` Content: ``` 1 # Natural Language Toolkit: Decision Tree Classifiers 2 # 3 # Copyright (C) 2001-2017 NLTK Project 4 # Author: Edward Loper <[email protected]> 5 # URL: <http://nltk.org/> 6 # For license information, see LICENSE.TXT 7 8 """ 9 A classifier model that decides which label to assign to a token on 10 the basis of a tree structure, where branches correspond to conditions 11 on feature values, and leaves correspond to label assignments. 12 """ 13 from __future__ import print_function, unicode_literals, division 14 15 from collections import defaultdict 16 17 from nltk.probability import FreqDist, MLEProbDist, entropy 18 from nltk.classify.api import ClassifierI 19 from nltk.compat import python_2_unicode_compatible 20 21 @python_2_unicode_compatible 22 class DecisionTreeClassifier(ClassifierI): 23 def __init__(self, label, feature_name=None, decisions=None, default=None): 24 """ 25 :param label: The most likely label for tokens that reach 26 this node in the decision tree. If this decision tree 27 has no children, then this label will be assigned to 28 any token that reaches this decision tree. 29 :param feature_name: The name of the feature that this 30 decision tree selects for. 31 :param decisions: A dictionary mapping from feature values 32 for the feature identified by ``feature_name`` to 33 child decision trees. 34 :param default: The child that will be used if the value of 35 feature ``feature_name`` does not match any of the keys in 36 ``decisions``. This is used when constructing binary 37 decision trees. 38 """ 39 self._label = label 40 self._fname = feature_name 41 self._decisions = decisions 42 self._default = default 43 44 def labels(self): 45 labels = [self._label] 46 if self._decisions is not None: 47 for dt in self._decisions.values(): 48 labels.extend(dt.labels()) 49 if self._default is not None: 50 labels.extend(self._default.labels()) 51 return list(set(labels)) 52 53 def classify(self, featureset): 54 # Decision leaf: 55 if self._fname is None: 56 return self._label 57 58 # Decision tree: 59 fval = featureset.get(self._fname) 60 if fval in self._decisions: 61 return self._decisions[fval].classify(featureset) 62 elif self._default is not None: 63 return self._default.classify(featureset) 64 else: 65 return self._label 66 67 def error(self, labeled_featuresets): 68 errors = 0 69 for featureset, label in labeled_featuresets: 70 if self.classify(featureset) != label: 71 errors += 1 72 return errors/len(labeled_featuresets) 73 74 def pretty_format(self, width=70, prefix='', depth=4): 75 """ 76 Return a string containing a pretty-printed version of this 77 decision tree. Each line in this string corresponds to a 78 single decision tree node or leaf, and indentation is used to 79 display the structure of the decision tree. 80 """ 81 # [xx] display default!! 82 if self._fname is None: 83 n = width-len(prefix)-15 84 return '{0}{1} {2}\n'.format(prefix, '.'*n, self._label) 85 s = '' 86 for i, (fval, result) in enumerate(sorted(self._decisions.items())): 87 hdr = '{0}{1}={2}? '.format(prefix, self._fname, fval) 88 n = width-15-len(hdr) 89 s += '{0}{1} {2}\n'.format(hdr, '.'*(n), result._label) 90 if result._fname is not None and depth>1: 91 s += result.pretty_format(width, prefix+' ', depth-1) 92 if self._default is not None: 93 n = width-len(prefix)-21 94 s += '{0}else: {1} {2}\n'.format(prefix, '.'*n, self._default._label) 95 if self._default._fname is not None and depth>1: 96 s += self._default.pretty_format(width, prefix+' ', depth-1) 97 return s 98 99 def pseudocode(self, prefix='', depth=4): 100 """ 101 Return a string representation of this decision tree that 102 expresses the decisions it makes as a nested set of pseudocode 103 if statements. 104 """ 105 if self._fname is None: 106 return "{0}return {1!r}\n".format(prefix, self._label) 107 s = '' 108 for (fval, result) in sorted(self._decisions.items()): 109 s += '{0}if {1} == {2!r}: '.format(prefix, self._fname, fval) 110 if result._fname is not None and depth>1: 111 s += '\n'+result.pseudocode(prefix+' ', depth-1) 112 else: 113 s += 'return {0!r}\n'.format(result._label) 114 if self._default is not None: 115 if len(self._decisions) == 1: 116 s += '{0}if {1} != {2!r}: '.format(prefix, self._fname, 117 list(self._decisions.keys())[0]) 118 else: 119 s += '{0}else: '.format(prefix) 120 if self._default._fname is not None and depth>1: 121 s += '\n'+self._default.pseudocode(prefix+' ', depth-1) 122 else: 123 s += 'return {0!r}\n'.format(self._default._label) 124 return s 125 126 def __str__(self): 127 return self.pretty_format() 128 129 @staticmethod 130 def train(labeled_featuresets, entropy_cutoff=0.05, depth_cutoff=100, 131 support_cutoff=10, binary=False, feature_values=None, 132 verbose=False): 133 """ 134 :param binary: If true, then treat all feature/value pairs as 135 individual binary features, rather than using a single n-way 136 branch for each feature. 137 """ 138 # Collect a list of all feature names. 139 feature_names = set() 140 for featureset, label in labeled_featuresets: 141 for fname in featureset: 142 feature_names.add(fname) 143 144 # Collect a list of the values each feature can take. 145 if feature_values is None and binary: 146 feature_values = defaultdict(set) 147 for featureset, label in labeled_featuresets: 148 for fname, fval in featureset.items(): 149 feature_values[fname].add(fval) 150 151 # Start with a stump. 152 if not binary: 153 tree = DecisionTreeClassifier.best_stump( 154 feature_names, labeled_featuresets, verbose) 155 else: 156 tree = DecisionTreeClassifier.best_binary_stump( 157 feature_names, labeled_featuresets, feature_values, verbose) 158 159 # Refine the stump. 160 tree.refine(labeled_featuresets, entropy_cutoff, depth_cutoff-1, 161 support_cutoff, binary, feature_values, verbose) 162 163 # Return it 164 return tree 165 166 @staticmethod 167 def leaf(labeled_featuresets): 168 label = FreqDist(label for (featureset, label) 169 in labeled_featuresets).max() 170 return DecisionTreeClassifier(label) 171 172 @staticmethod 173 def stump(feature_name, labeled_featuresets): 174 label = FreqDist(label for (featureset, label) 175 in labeled_featuresets).max() 176 177 # Find the best label for each value. 178 freqs = defaultdict(FreqDist) # freq(label|value) 179 for featureset, label in labeled_featuresets: 180 feature_value = featureset.get(feature_name) 181 freqs[feature_value][label] += 1 182 183 decisions = dict((val, DecisionTreeClassifier(freqs[val].max())) 184 for val in freqs) 185 return DecisionTreeClassifier(label, feature_name, decisions) 186 187 def refine(self, labeled_featuresets, entropy_cutoff, depth_cutoff, 188 support_cutoff, binary=False, feature_values=None, 189 verbose=False): 190 if len(labeled_featuresets) <= support_cutoff: return 191 if self._fname is None: return 192 if depth_cutoff <= 0: return 193 for fval in self._decisions: 194 fval_featuresets = [(featureset, label) for (featureset, label) 195 in labeled_featuresets 196 if featureset.get(self._fname) == fval] 197 198 label_freqs = FreqDist(label for (featureset, label) 199 in fval_featuresets) 200 if entropy(MLEProbDist(label_freqs)) > entropy_cutoff: 201 self._decisions[fval] = DecisionTreeClassifier.train( 202 fval_featuresets, entropy_cutoff, depth_cutoff, 203 support_cutoff, binary, feature_values, verbose) 204 if self._default is not None: 205 default_featuresets = [(featureset, label) for (featureset, label) 206 in labeled_featuresets 207 if featureset.get(self._fname) not in 208 self._decisions] 209 label_freqs = FreqDist(label for (featureset, label) 210 in default_featuresets) 211 if entropy(MLEProbDist(label_freqs)) > entropy_cutoff: 212 self._default = DecisionTreeClassifier.train( 213 default_featuresets, entropy_cutoff, depth_cutoff, 214 support_cutoff, binary, feature_values, verbose) 215 216 @staticmethod 217 def best_stump(feature_names, labeled_featuresets, verbose=False): 218 best_stump = DecisionTreeClassifier.leaf(labeled_featuresets) 219 best_error = best_stump.error(labeled_featuresets) 220 for fname in feature_names: 221 stump = DecisionTreeClassifier.stump(fname, labeled_featuresets) 222 stump_error = stump.error(labeled_featuresets) 223 if stump_error < best_error: 224 best_error = stump_error 225 best_stump = stump 226 if verbose: 227 print(('best stump for {:6d} toks uses {:20} err={:6.4f}'.format \ 228 (len(labeled_featuresets), best_stump._fname, best_error))) 229 return best_stump 230 231 @staticmethod 232 def binary_stump(feature_name, feature_value, labeled_featuresets): 233 label = FreqDist(label for (featureset, label) 234 in labeled_featuresets).max() 235 236 # Find the best label for each value. 237 pos_fdist = FreqDist() 238 neg_fdist = FreqDist() 239 for featureset, label in labeled_featuresets: 240 if featureset.get(feature_name) == feature_value: 241 pos_fdist[label] += 1 242 else: 243 neg_fdist[label] += 1 244 245 246 decisions = {} 247 default = label 248 # But hopefully we have observations! 249 if pos_fdist.N() > 0: 250 decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())} 251 if neg_fdist.N() > 0: 252 default = DecisionTreeClassifier(neg_fdist.max()) 253 254 return DecisionTreeClassifier(label, feature_name, decisions, default) 255 256 @staticmethod 257 def best_binary_stump(feature_names, labeled_featuresets, feature_values, 258 verbose=False): 259 best_stump = DecisionTreeClassifier.leaf(labeled_featuresets) 260 best_error = best_stump.error(labeled_featuresets) 261 for fname in feature_names: 262 for fval in feature_values[fname]: 263 stump = DecisionTreeClassifier.binary_stump( 264 fname, fval, labeled_featuresets) 265 stump_error = stump.error(labeled_featuresets) 266 if stump_error < best_error: 267 best_error = stump_error 268 best_stump = stump 269 if best_stump._decisions: 270 descr = '{0}={1}'.format(best_stump._fname, 271 list(best_stump._decisions.keys())[0]) 272 else: 273 descr = '(default)' 274 if verbose: 275 print(('best stump for {:6d} toks uses {:20} err={:6.4f}'.format \ 276 (len(labeled_featuresets), descr, best_error))) 277 return best_stump 278 279 ##////////////////////////////////////////////////////// 280 ## Demo 281 ##////////////////////////////////////////////////////// 282 283 def f(x): 284 return DecisionTreeClassifier.train(x, binary=True, verbose=True) 285 286 def demo(): 287 from nltk.classify.util import names_demo, binary_names_demo_features 288 classifier = names_demo(f, #DecisionTreeClassifier.train, 289 binary_names_demo_features) 290 print(classifier.pp(depth=7)) 291 print(classifier.pseudocode(depth=7)) 292 293 if __name__ == '__main__': 294 demo() 295 296 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nltk/classify/decisiontree.py b/nltk/classify/decisiontree.py --- a/nltk/classify/decisiontree.py +++ b/nltk/classify/decisiontree.py @@ -266,12 +266,12 @@ if stump_error < best_error: best_error = stump_error best_stump = stump - if best_stump._decisions: - descr = '{0}={1}'.format(best_stump._fname, - list(best_stump._decisions.keys())[0]) - else: - descr = '(default)' if verbose: + if best_stump._decisions: + descr = '{0}={1}'.format(best_stump._fname, + list(best_stump._decisions.keys())[0]) + else: + descr = '(default)' print(('best stump for {:6d} toks uses {:20} err={:6.4f}'.format \ (len(labeled_featuresets), descr, best_error))) return best_stump
{"golden_diff": "diff --git a/nltk/classify/decisiontree.py b/nltk/classify/decisiontree.py\n--- a/nltk/classify/decisiontree.py\n+++ b/nltk/classify/decisiontree.py\n@@ -266,12 +266,12 @@\n if stump_error < best_error:\n best_error = stump_error\n best_stump = stump\n- if best_stump._decisions:\n- descr = '{0}={1}'.format(best_stump._fname,\n- list(best_stump._decisions.keys())[0])\n- else:\n- descr = '(default)'\n if verbose:\n+ if best_stump._decisions:\n+ descr = '{0}={1}'.format(best_stump._fname,\n+ list(best_stump._decisions.keys())[0])\n+ else:\n+ descr = '(default)'\n print(('best stump for {:6d} toks uses {:20} err={:6.4f}'.format \\\n (len(labeled_featuresets), descr, best_error)))\n return best_stump\n", "issue": "Unnecessary slow down on DecisionTreeClassifier\nJust a minor unnecessary calculation that might become huge on large trees.\r\n\r\nhttps://github.com/nltk/nltk/blob/develop/nltk/classify/decisiontree.py#L269\r\n\r\nEven if verbose is false you are building that description string. Wouldn't it be a good idea to have that string building inside the if condition of verbose?\r\n\n", "before_files": [{"content": "# Natural Language Toolkit: Decision Tree Classifiers\n#\n# Copyright (C) 2001-2017 NLTK Project\n# Author: Edward Loper <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nA classifier model that decides which label to assign to a token on\nthe basis of a tree structure, where branches correspond to conditions\non feature values, and leaves correspond to label assignments.\n\"\"\"\nfrom __future__ import print_function, unicode_literals, division\n\nfrom collections import defaultdict\n\nfrom nltk.probability import FreqDist, MLEProbDist, entropy\nfrom nltk.classify.api import ClassifierI\nfrom nltk.compat import python_2_unicode_compatible\n\n@python_2_unicode_compatible\nclass DecisionTreeClassifier(ClassifierI):\n def __init__(self, label, feature_name=None, decisions=None, default=None):\n \"\"\"\n :param label: The most likely label for tokens that reach\n this node in the decision tree. If this decision tree\n has no children, then this label will be assigned to\n any token that reaches this decision tree.\n :param feature_name: The name of the feature that this\n decision tree selects for.\n :param decisions: A dictionary mapping from feature values\n for the feature identified by ``feature_name`` to\n child decision trees.\n :param default: The child that will be used if the value of\n feature ``feature_name`` does not match any of the keys in\n ``decisions``. This is used when constructing binary\n decision trees.\n \"\"\"\n self._label = label\n self._fname = feature_name\n self._decisions = decisions\n self._default = default\n\n def labels(self):\n labels = [self._label]\n if self._decisions is not None:\n for dt in self._decisions.values():\n labels.extend(dt.labels())\n if self._default is not None:\n labels.extend(self._default.labels())\n return list(set(labels))\n\n def classify(self, featureset):\n # Decision leaf:\n if self._fname is None:\n return self._label\n\n # Decision tree:\n fval = featureset.get(self._fname)\n if fval in self._decisions:\n return self._decisions[fval].classify(featureset)\n elif self._default is not None:\n return self._default.classify(featureset)\n else:\n return self._label\n\n def error(self, labeled_featuresets):\n errors = 0\n for featureset, label in labeled_featuresets:\n if self.classify(featureset) != label:\n errors += 1\n return errors/len(labeled_featuresets)\n\n def pretty_format(self, width=70, prefix='', depth=4):\n \"\"\"\n Return a string containing a pretty-printed version of this\n decision tree. Each line in this string corresponds to a\n single decision tree node or leaf, and indentation is used to\n display the structure of the decision tree.\n \"\"\"\n # [xx] display default!!\n if self._fname is None:\n n = width-len(prefix)-15\n return '{0}{1} {2}\\n'.format(prefix, '.'*n, self._label)\n s = ''\n for i, (fval, result) in enumerate(sorted(self._decisions.items())):\n hdr = '{0}{1}={2}? '.format(prefix, self._fname, fval)\n n = width-15-len(hdr)\n s += '{0}{1} {2}\\n'.format(hdr, '.'*(n), result._label)\n if result._fname is not None and depth>1:\n s += result.pretty_format(width, prefix+' ', depth-1)\n if self._default is not None:\n n = width-len(prefix)-21\n s += '{0}else: {1} {2}\\n'.format(prefix, '.'*n, self._default._label)\n if self._default._fname is not None and depth>1:\n s += self._default.pretty_format(width, prefix+' ', depth-1)\n return s\n\n def pseudocode(self, prefix='', depth=4):\n \"\"\"\n Return a string representation of this decision tree that\n expresses the decisions it makes as a nested set of pseudocode\n if statements.\n \"\"\"\n if self._fname is None:\n return \"{0}return {1!r}\\n\".format(prefix, self._label)\n s = ''\n for (fval, result) in sorted(self._decisions.items()):\n s += '{0}if {1} == {2!r}: '.format(prefix, self._fname, fval)\n if result._fname is not None and depth>1:\n s += '\\n'+result.pseudocode(prefix+' ', depth-1)\n else:\n s += 'return {0!r}\\n'.format(result._label)\n if self._default is not None:\n if len(self._decisions) == 1:\n s += '{0}if {1} != {2!r}: '.format(prefix, self._fname,\n list(self._decisions.keys())[0])\n else:\n s += '{0}else: '.format(prefix)\n if self._default._fname is not None and depth>1:\n s += '\\n'+self._default.pseudocode(prefix+' ', depth-1)\n else:\n s += 'return {0!r}\\n'.format(self._default._label)\n return s\n\n def __str__(self):\n return self.pretty_format()\n\n @staticmethod\n def train(labeled_featuresets, entropy_cutoff=0.05, depth_cutoff=100,\n support_cutoff=10, binary=False, feature_values=None,\n verbose=False):\n \"\"\"\n :param binary: If true, then treat all feature/value pairs as\n individual binary features, rather than using a single n-way\n branch for each feature.\n \"\"\"\n # Collect a list of all feature names.\n feature_names = set()\n for featureset, label in labeled_featuresets:\n for fname in featureset:\n feature_names.add(fname)\n\n # Collect a list of the values each feature can take.\n if feature_values is None and binary:\n feature_values = defaultdict(set)\n for featureset, label in labeled_featuresets:\n for fname, fval in featureset.items():\n feature_values[fname].add(fval)\n\n # Start with a stump.\n if not binary:\n tree = DecisionTreeClassifier.best_stump(\n feature_names, labeled_featuresets, verbose)\n else:\n tree = DecisionTreeClassifier.best_binary_stump(\n feature_names, labeled_featuresets, feature_values, verbose)\n\n # Refine the stump.\n tree.refine(labeled_featuresets, entropy_cutoff, depth_cutoff-1,\n support_cutoff, binary, feature_values, verbose)\n\n # Return it\n return tree\n\n @staticmethod\n def leaf(labeled_featuresets):\n label = FreqDist(label for (featureset, label)\n in labeled_featuresets).max()\n return DecisionTreeClassifier(label)\n\n @staticmethod\n def stump(feature_name, labeled_featuresets):\n label = FreqDist(label for (featureset, label)\n in labeled_featuresets).max()\n\n # Find the best label for each value.\n freqs = defaultdict(FreqDist) # freq(label|value)\n for featureset, label in labeled_featuresets:\n feature_value = featureset.get(feature_name)\n freqs[feature_value][label] += 1\n\n decisions = dict((val, DecisionTreeClassifier(freqs[val].max()))\n for val in freqs)\n return DecisionTreeClassifier(label, feature_name, decisions)\n\n def refine(self, labeled_featuresets, entropy_cutoff, depth_cutoff,\n support_cutoff, binary=False, feature_values=None,\n verbose=False):\n if len(labeled_featuresets) <= support_cutoff: return\n if self._fname is None: return\n if depth_cutoff <= 0: return\n for fval in self._decisions:\n fval_featuresets = [(featureset, label) for (featureset, label)\n in labeled_featuresets\n if featureset.get(self._fname) == fval]\n\n label_freqs = FreqDist(label for (featureset, label)\n in fval_featuresets)\n if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:\n self._decisions[fval] = DecisionTreeClassifier.train(\n fval_featuresets, entropy_cutoff, depth_cutoff,\n support_cutoff, binary, feature_values, verbose)\n if self._default is not None:\n default_featuresets = [(featureset, label) for (featureset, label)\n in labeled_featuresets\n if featureset.get(self._fname) not in\n self._decisions]\n label_freqs = FreqDist(label for (featureset, label)\n in default_featuresets)\n if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:\n self._default = DecisionTreeClassifier.train(\n default_featuresets, entropy_cutoff, depth_cutoff,\n support_cutoff, binary, feature_values, verbose)\n\n @staticmethod\n def best_stump(feature_names, labeled_featuresets, verbose=False):\n best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)\n best_error = best_stump.error(labeled_featuresets)\n for fname in feature_names:\n stump = DecisionTreeClassifier.stump(fname, labeled_featuresets)\n stump_error = stump.error(labeled_featuresets)\n if stump_error < best_error:\n best_error = stump_error\n best_stump = stump\n if verbose:\n print(('best stump for {:6d} toks uses {:20} err={:6.4f}'.format \\\n (len(labeled_featuresets), best_stump._fname, best_error)))\n return best_stump\n\n @staticmethod\n def binary_stump(feature_name, feature_value, labeled_featuresets):\n label = FreqDist(label for (featureset, label)\n in labeled_featuresets).max()\n\n # Find the best label for each value.\n pos_fdist = FreqDist()\n neg_fdist = FreqDist()\n for featureset, label in labeled_featuresets:\n if featureset.get(feature_name) == feature_value:\n pos_fdist[label] += 1\n else:\n neg_fdist[label] += 1\n\n\n decisions = {}\n default = label\n # But hopefully we have observations!\n if pos_fdist.N() > 0:\n decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())}\n if neg_fdist.N() > 0:\n default = DecisionTreeClassifier(neg_fdist.max())\n\n return DecisionTreeClassifier(label, feature_name, decisions, default)\n\n @staticmethod\n def best_binary_stump(feature_names, labeled_featuresets, feature_values,\n verbose=False):\n best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)\n best_error = best_stump.error(labeled_featuresets)\n for fname in feature_names:\n for fval in feature_values[fname]:\n stump = DecisionTreeClassifier.binary_stump(\n fname, fval, labeled_featuresets)\n stump_error = stump.error(labeled_featuresets)\n if stump_error < best_error:\n best_error = stump_error\n best_stump = stump\n if best_stump._decisions:\n descr = '{0}={1}'.format(best_stump._fname,\n list(best_stump._decisions.keys())[0])\n else:\n descr = '(default)'\n if verbose:\n print(('best stump for {:6d} toks uses {:20} err={:6.4f}'.format \\\n (len(labeled_featuresets), descr, best_error)))\n return best_stump\n\n##//////////////////////////////////////////////////////\n## Demo\n##//////////////////////////////////////////////////////\n\ndef f(x):\n return DecisionTreeClassifier.train(x, binary=True, verbose=True)\n\ndef demo():\n from nltk.classify.util import names_demo, binary_names_demo_features\n classifier = names_demo(f, #DecisionTreeClassifier.train,\n binary_names_demo_features)\n print(classifier.pp(depth=7))\n print(classifier.pseudocode(depth=7))\n\nif __name__ == '__main__':\n demo()\n\n", "path": "nltk/classify/decisiontree.py"}], "after_files": [{"content": "# Natural Language Toolkit: Decision Tree Classifiers\n#\n# Copyright (C) 2001-2017 NLTK Project\n# Author: Edward Loper <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nA classifier model that decides which label to assign to a token on\nthe basis of a tree structure, where branches correspond to conditions\non feature values, and leaves correspond to label assignments.\n\"\"\"\nfrom __future__ import print_function, unicode_literals, division\n\nfrom collections import defaultdict\n\nfrom nltk.probability import FreqDist, MLEProbDist, entropy\nfrom nltk.classify.api import ClassifierI\nfrom nltk.compat import python_2_unicode_compatible\n\n@python_2_unicode_compatible\nclass DecisionTreeClassifier(ClassifierI):\n def __init__(self, label, feature_name=None, decisions=None, default=None):\n \"\"\"\n :param label: The most likely label for tokens that reach\n this node in the decision tree. If this decision tree\n has no children, then this label will be assigned to\n any token that reaches this decision tree.\n :param feature_name: The name of the feature that this\n decision tree selects for.\n :param decisions: A dictionary mapping from feature values\n for the feature identified by ``feature_name`` to\n child decision trees.\n :param default: The child that will be used if the value of\n feature ``feature_name`` does not match any of the keys in\n ``decisions``. This is used when constructing binary\n decision trees.\n \"\"\"\n self._label = label\n self._fname = feature_name\n self._decisions = decisions\n self._default = default\n\n def labels(self):\n labels = [self._label]\n if self._decisions is not None:\n for dt in self._decisions.values():\n labels.extend(dt.labels())\n if self._default is not None:\n labels.extend(self._default.labels())\n return list(set(labels))\n\n def classify(self, featureset):\n # Decision leaf:\n if self._fname is None:\n return self._label\n\n # Decision tree:\n fval = featureset.get(self._fname)\n if fval in self._decisions:\n return self._decisions[fval].classify(featureset)\n elif self._default is not None:\n return self._default.classify(featureset)\n else:\n return self._label\n\n def error(self, labeled_featuresets):\n errors = 0\n for featureset, label in labeled_featuresets:\n if self.classify(featureset) != label:\n errors += 1\n return errors/len(labeled_featuresets)\n\n def pretty_format(self, width=70, prefix='', depth=4):\n \"\"\"\n Return a string containing a pretty-printed version of this\n decision tree. Each line in this string corresponds to a\n single decision tree node or leaf, and indentation is used to\n display the structure of the decision tree.\n \"\"\"\n # [xx] display default!!\n if self._fname is None:\n n = width-len(prefix)-15\n return '{0}{1} {2}\\n'.format(prefix, '.'*n, self._label)\n s = ''\n for i, (fval, result) in enumerate(sorted(self._decisions.items())):\n hdr = '{0}{1}={2}? '.format(prefix, self._fname, fval)\n n = width-15-len(hdr)\n s += '{0}{1} {2}\\n'.format(hdr, '.'*(n), result._label)\n if result._fname is not None and depth>1:\n s += result.pretty_format(width, prefix+' ', depth-1)\n if self._default is not None:\n n = width-len(prefix)-21\n s += '{0}else: {1} {2}\\n'.format(prefix, '.'*n, self._default._label)\n if self._default._fname is not None and depth>1:\n s += self._default.pretty_format(width, prefix+' ', depth-1)\n return s\n\n def pseudocode(self, prefix='', depth=4):\n \"\"\"\n Return a string representation of this decision tree that\n expresses the decisions it makes as a nested set of pseudocode\n if statements.\n \"\"\"\n if self._fname is None:\n return \"{0}return {1!r}\\n\".format(prefix, self._label)\n s = ''\n for (fval, result) in sorted(self._decisions.items()):\n s += '{0}if {1} == {2!r}: '.format(prefix, self._fname, fval)\n if result._fname is not None and depth>1:\n s += '\\n'+result.pseudocode(prefix+' ', depth-1)\n else:\n s += 'return {0!r}\\n'.format(result._label)\n if self._default is not None:\n if len(self._decisions) == 1:\n s += '{0}if {1} != {2!r}: '.format(prefix, self._fname,\n list(self._decisions.keys())[0])\n else:\n s += '{0}else: '.format(prefix)\n if self._default._fname is not None and depth>1:\n s += '\\n'+self._default.pseudocode(prefix+' ', depth-1)\n else:\n s += 'return {0!r}\\n'.format(self._default._label)\n return s\n\n def __str__(self):\n return self.pretty_format()\n\n @staticmethod\n def train(labeled_featuresets, entropy_cutoff=0.05, depth_cutoff=100,\n support_cutoff=10, binary=False, feature_values=None,\n verbose=False):\n \"\"\"\n :param binary: If true, then treat all feature/value pairs as\n individual binary features, rather than using a single n-way\n branch for each feature.\n \"\"\"\n # Collect a list of all feature names.\n feature_names = set()\n for featureset, label in labeled_featuresets:\n for fname in featureset:\n feature_names.add(fname)\n\n # Collect a list of the values each feature can take.\n if feature_values is None and binary:\n feature_values = defaultdict(set)\n for featureset, label in labeled_featuresets:\n for fname, fval in featureset.items():\n feature_values[fname].add(fval)\n\n # Start with a stump.\n if not binary:\n tree = DecisionTreeClassifier.best_stump(\n feature_names, labeled_featuresets, verbose)\n else:\n tree = DecisionTreeClassifier.best_binary_stump(\n feature_names, labeled_featuresets, feature_values, verbose)\n\n # Refine the stump.\n tree.refine(labeled_featuresets, entropy_cutoff, depth_cutoff-1,\n support_cutoff, binary, feature_values, verbose)\n\n # Return it\n return tree\n\n @staticmethod\n def leaf(labeled_featuresets):\n label = FreqDist(label for (featureset, label)\n in labeled_featuresets).max()\n return DecisionTreeClassifier(label)\n\n @staticmethod\n def stump(feature_name, labeled_featuresets):\n label = FreqDist(label for (featureset, label)\n in labeled_featuresets).max()\n\n # Find the best label for each value.\n freqs = defaultdict(FreqDist) # freq(label|value)\n for featureset, label in labeled_featuresets:\n feature_value = featureset.get(feature_name)\n freqs[feature_value][label] += 1\n\n decisions = dict((val, DecisionTreeClassifier(freqs[val].max()))\n for val in freqs)\n return DecisionTreeClassifier(label, feature_name, decisions)\n\n def refine(self, labeled_featuresets, entropy_cutoff, depth_cutoff,\n support_cutoff, binary=False, feature_values=None,\n verbose=False):\n if len(labeled_featuresets) <= support_cutoff: return\n if self._fname is None: return\n if depth_cutoff <= 0: return\n for fval in self._decisions:\n fval_featuresets = [(featureset, label) for (featureset, label)\n in labeled_featuresets\n if featureset.get(self._fname) == fval]\n\n label_freqs = FreqDist(label for (featureset, label)\n in fval_featuresets)\n if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:\n self._decisions[fval] = DecisionTreeClassifier.train(\n fval_featuresets, entropy_cutoff, depth_cutoff,\n support_cutoff, binary, feature_values, verbose)\n if self._default is not None:\n default_featuresets = [(featureset, label) for (featureset, label)\n in labeled_featuresets\n if featureset.get(self._fname) not in\n self._decisions]\n label_freqs = FreqDist(label for (featureset, label)\n in default_featuresets)\n if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:\n self._default = DecisionTreeClassifier.train(\n default_featuresets, entropy_cutoff, depth_cutoff,\n support_cutoff, binary, feature_values, verbose)\n\n @staticmethod\n def best_stump(feature_names, labeled_featuresets, verbose=False):\n best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)\n best_error = best_stump.error(labeled_featuresets)\n for fname in feature_names:\n stump = DecisionTreeClassifier.stump(fname, labeled_featuresets)\n stump_error = stump.error(labeled_featuresets)\n if stump_error < best_error:\n best_error = stump_error\n best_stump = stump\n if verbose:\n print(('best stump for {:6d} toks uses {:20} err={:6.4f}'.format \\\n (len(labeled_featuresets), best_stump._fname, best_error)))\n return best_stump\n\n @staticmethod\n def binary_stump(feature_name, feature_value, labeled_featuresets):\n label = FreqDist(label for (featureset, label)\n in labeled_featuresets).max()\n\n # Find the best label for each value.\n pos_fdist = FreqDist()\n neg_fdist = FreqDist()\n for featureset, label in labeled_featuresets:\n if featureset.get(feature_name) == feature_value:\n pos_fdist[label] += 1\n else:\n neg_fdist[label] += 1\n\n\n decisions = {}\n default = label\n # But hopefully we have observations!\n if pos_fdist.N() > 0:\n decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())}\n if neg_fdist.N() > 0:\n default = DecisionTreeClassifier(neg_fdist.max())\n\n return DecisionTreeClassifier(label, feature_name, decisions, default)\n\n @staticmethod\n def best_binary_stump(feature_names, labeled_featuresets, feature_values,\n verbose=False):\n best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)\n best_error = best_stump.error(labeled_featuresets)\n for fname in feature_names:\n for fval in feature_values[fname]:\n stump = DecisionTreeClassifier.binary_stump(\n fname, fval, labeled_featuresets)\n stump_error = stump.error(labeled_featuresets)\n if stump_error < best_error:\n best_error = stump_error\n best_stump = stump\n if verbose:\n if best_stump._decisions:\n descr = '{0}={1}'.format(best_stump._fname,\n list(best_stump._decisions.keys())[0])\n else:\n descr = '(default)'\n print(('best stump for {:6d} toks uses {:20} err={:6.4f}'.format \\\n (len(labeled_featuresets), descr, best_error)))\n return best_stump\n\n##//////////////////////////////////////////////////////\n## Demo\n##//////////////////////////////////////////////////////\n\ndef f(x):\n return DecisionTreeClassifier.train(x, binary=True, verbose=True)\n\ndef demo():\n from nltk.classify.util import names_demo, binary_names_demo_features\n classifier = names_demo(f, #DecisionTreeClassifier.train,\n binary_names_demo_features)\n print(classifier.pp(depth=7))\n print(classifier.pseudocode(depth=7))\n\nif __name__ == '__main__':\n demo()\n\n", "path": "nltk/classify/decisiontree.py"}]}
3,796
234
gh_patches_debug_26146
rasdani/github-patches
git_diff
mlflow__mlflow-1383
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Avoid extensive copying of model files & artifacts in pyfunc ### Issue I was trying out mlflow to serve a ML model with large model files (totalling about 2 GB) on sagemaker. I used `mlflow.pyfunc` to load the model data etc. specified via the artifacts-dictionary. However, when I built the Docker container and ran it locally, gunicorn always timed out (due to the hard-coded 60s timeout). Observing the `/tmp` folder in the Docker container during the run as well as digging into the mlflow code, I noticed that temporary folders containing the **full** model artifacts (in my case amounting to about 2 GB) were copied **twice** per worker, while the model files were already all lying safe and sound in the `/opt/ml/model` directory in the Docker container. ### Reason The reason for this behavior is that each worker calls upon initialization `pyfunc.load_pyfunc("/opt/ml/model/")` (see `wsgi.py` in `mlflow/sagemaker/container/scoring_server`). In `pyfunc.load_pyfunc` i) the model files are "downloaded" (copied to a temporary directory in `/tmp` via `local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)`. Then, the `_load_pyfunc` in the pyfunc loader_module (`model.py`) is called. This again, ii) copies all artifact files to a temporary directory with suffix "artifacts" ### My workaround As a quick fix for myself, I i) set `local_model_path = local_file_uri_to_path(model_uri)`in `mlflow/pyfunc/__init__.py` ii) set `artifacts[saved_artifact_name] = os.path.join(model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH])` instead of `tmp_artifact_path` in `mlflow/pyfunc/model.py` and commented out the creation of `tmp_artifact_path` which involves `_copy_file_or_tree` to copy the artifact files. ### Request Since I believe I'm not the only one working with larger model artifacts, I suggest improving the handling of the model artifacts. Besides, it might be reasonable to allow for a custom gunicorn timeout in case model initialization is more involved. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlflow/pyfunc/model.py` Content: ``` 1 """ 2 The ``mlflow.pyfunc.model`` module defines logic for saving and loading custom "python_function" 3 models with a user-defined ``PythonModel`` subclass. 4 """ 5 6 import os 7 import tempfile 8 import shutil 9 import yaml 10 from abc import ABCMeta, abstractmethod 11 12 import cloudpickle 13 14 import mlflow.pyfunc 15 import mlflow.utils 16 from mlflow.exceptions import MlflowException 17 from mlflow.models import Model 18 from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS 19 from mlflow.tracking.artifact_utils import _download_artifact_from_uri 20 from mlflow.utils.environment import _mlflow_conda_env 21 from mlflow.utils.model_utils import _get_flavor_configuration 22 from mlflow.utils.file_utils import TempDir, _copy_file_or_tree 23 24 CONFIG_KEY_ARTIFACTS = "artifacts" 25 CONFIG_KEY_ARTIFACT_RELATIVE_PATH = "path" 26 CONFIG_KEY_ARTIFACT_URI = "uri" 27 CONFIG_KEY_PYTHON_MODEL = "python_model" 28 CONFIG_KEY_CLOUDPICKLE_VERSION = "cloudpickle_version" 29 30 31 def get_default_conda_env(): 32 """ 33 :return: The default Conda environment for MLflow Models produced by calls to 34 :func:`save_model() <mlflow.pyfunc.save_model>` 35 and :func:`log_model() <mlflow.pyfunc.log_model>` when a user-defined subclass of 36 :class:`PythonModel` is provided. 37 """ 38 return _mlflow_conda_env( 39 additional_conda_deps=None, 40 additional_pip_deps=[ 41 "cloudpickle=={}".format(cloudpickle.__version__), 42 ], 43 additional_conda_channels=None) 44 45 46 class PythonModel(object): 47 """ 48 Represents a generic Python model that evaluates inputs and produces API-compatible outputs. 49 By subclassing :class:`~PythonModel`, users can create customized MLflow models with the 50 "python_function" ("pyfunc") flavor, leveraging custom inference logic and artifact 51 dependencies. 52 """ 53 __metaclass__ = ABCMeta 54 55 def load_context(self, context): 56 """ 57 Loads artifacts from the specified :class:`~PythonModelContext` that can be used by 58 :func:`~PythonModel.predict` when evaluating inputs. When loading an MLflow model with 59 :func:`~load_pyfunc`, this method is called as soon as the :class:`~PythonModel` is 60 constructed. 61 62 The same :class:`~PythonModelContext` will also be available during calls to 63 :func:`~PythonModel.predict`, but it may be more efficient to override this method 64 and load artifacts from the context at model load time. 65 66 :param context: A :class:`~PythonModelContext` instance containing artifacts that the model 67 can use to perform inference. 68 """ 69 70 @abstractmethod 71 def predict(self, context, model_input): 72 """ 73 Evaluates a pyfunc-compatible input and produces a pyfunc-compatible output. 74 For more information about the pyfunc input/output API, see the :ref:`pyfunc-inference-api`. 75 76 :param context: A :class:`~PythonModelContext` instance containing artifacts that the model 77 can use to perform inference. 78 :param model_input: A pyfunc-compatible input for the model to evaluate. 79 """ 80 81 82 class PythonModelContext(object): 83 """ 84 A collection of artifacts that a :class:`~PythonModel` can use when performing inference. 85 :class:`~PythonModelContext` objects are created *implicitly* by the 86 :func:`save_model() <mlflow.pyfunc.save_model>` and 87 :func:`log_model() <mlflow.pyfunc.log_model>` persistence methods, using the contents specified 88 by the ``artifacts`` parameter of these methods. 89 """ 90 91 def __init__(self, artifacts): 92 """ 93 :param artifacts: A dictionary of ``<name, artifact_path>`` entries, where ``artifact_path`` 94 is an absolute filesystem path to a given artifact. 95 """ 96 self._artifacts = artifacts 97 98 @property 99 def artifacts(self): 100 """ 101 :return: A dictionary containing ``<name, artifact_path>`` entries, where ``artifact_path`` 102 is an absolute filesystem path to the artifact. 103 """ 104 return self._artifacts 105 106 107 def _save_model_with_class_artifacts_params(path, python_model, artifacts=None, conda_env=None, 108 code_paths=None, mlflow_model=Model()): 109 """ 110 :param path: The path to which to save the Python model. 111 :param python_model: An instance of a subclass of :class:`~PythonModel`. ``python_model`` 112 defines how the model loads artifacts and how it performs inference. 113 :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs 114 are resolved to absolute filesystem paths, producing a dictionary of 115 ``<name, absolute_path>`` entries. ``python_model`` can reference these 116 resolved entries as the ``artifacts`` property of the ``context`` attribute. 117 If *None*, no artifacts are added to the model. 118 :param conda_env: Either a dictionary representation of a Conda environment or the path to a 119 Conda environment yaml file. If provided, this decribes the environment 120 this model should be run in. At minimum, it should specify the dependencies 121 contained in :func:`get_default_conda_env()`. If `None`, the default 122 :func:`get_default_conda_env()` environment is added to the model. 123 :param code_paths: A list of local filesystem paths to Python file dependencies (or directories 124 containing file dependencies). These files are *prepended* to the system 125 path before the model is loaded. 126 :param mlflow_model: The model configuration to which to add the ``mlflow.pyfunc`` flavor. 127 """ 128 if os.path.exists(path): 129 raise MlflowException( 130 message="Path '{}' already exists".format(path), 131 error_code=RESOURCE_ALREADY_EXISTS) 132 os.makedirs(path) 133 134 custom_model_config_kwargs = { 135 CONFIG_KEY_CLOUDPICKLE_VERSION: cloudpickle.__version__, 136 } 137 if isinstance(python_model, PythonModel): 138 saved_python_model_subpath = "python_model.pkl" 139 with open(os.path.join(path, saved_python_model_subpath), "wb") as out: 140 cloudpickle.dump(python_model, out) 141 custom_model_config_kwargs[CONFIG_KEY_PYTHON_MODEL] = saved_python_model_subpath 142 else: 143 raise MlflowException( 144 message=("`python_model` must be a subclass of `PythonModel`. Instead, found an" 145 " object of type: {python_model_type}".format( 146 python_model_type=type(python_model))), 147 error_code=INVALID_PARAMETER_VALUE) 148 149 if artifacts: 150 saved_artifacts_config = {} 151 with TempDir() as tmp_artifacts_dir: 152 tmp_artifacts_config = {} 153 saved_artifacts_dir_subpath = "artifacts" 154 for artifact_name, artifact_uri in artifacts.items(): 155 tmp_artifact_path = _download_artifact_from_uri( 156 artifact_uri=artifact_uri, output_path=tmp_artifacts_dir.path()) 157 tmp_artifacts_config[artifact_name] = tmp_artifact_path 158 saved_artifact_subpath = os.path.join( 159 saved_artifacts_dir_subpath, 160 os.path.relpath(path=tmp_artifact_path, start=tmp_artifacts_dir.path())) 161 saved_artifacts_config[artifact_name] = { 162 CONFIG_KEY_ARTIFACT_RELATIVE_PATH: saved_artifact_subpath, 163 CONFIG_KEY_ARTIFACT_URI: artifact_uri, 164 } 165 166 shutil.move(tmp_artifacts_dir.path(), os.path.join(path, saved_artifacts_dir_subpath)) 167 custom_model_config_kwargs[CONFIG_KEY_ARTIFACTS] = saved_artifacts_config 168 169 conda_env_subpath = "conda.yaml" 170 if conda_env is None: 171 conda_env = get_default_conda_env() 172 elif not isinstance(conda_env, dict): 173 with open(conda_env, "r") as f: 174 conda_env = yaml.safe_load(f) 175 with open(os.path.join(path, conda_env_subpath), "w") as f: 176 yaml.safe_dump(conda_env, stream=f, default_flow_style=False) 177 178 saved_code_subpath = None 179 if code_paths is not None: 180 saved_code_subpath = "code" 181 for code_path in code_paths: 182 _copy_file_or_tree(src=code_path, dst=path, dst_dir=saved_code_subpath) 183 184 mlflow.pyfunc.add_to_model(model=mlflow_model, loader_module=__name__, code=saved_code_subpath, 185 env=conda_env_subpath, **custom_model_config_kwargs) 186 mlflow_model.save(os.path.join(path, 'MLmodel')) 187 188 189 def _load_pyfunc(model_path): 190 pyfunc_config = _get_flavor_configuration( 191 model_path=model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME) 192 193 python_model_cloudpickle_version = pyfunc_config.get(CONFIG_KEY_CLOUDPICKLE_VERSION, None) 194 if python_model_cloudpickle_version is None: 195 mlflow.pyfunc._logger.warning( 196 "The version of CloudPickle used to save the model could not be found in the MLmodel" 197 " configuration") 198 elif python_model_cloudpickle_version != cloudpickle.__version__: 199 # CloudPickle does not have a well-defined cross-version compatibility policy. Micro version 200 # releases have been known to cause incompatibilities. Therefore, we match on the full 201 # library version 202 mlflow.pyfunc._logger.warning( 203 "The version of CloudPickle that was used to save the model, `CloudPickle %s`, differs" 204 " from the version of CloudPickle that is currently running, `CloudPickle %s`, and may" 205 " be incompatible", 206 python_model_cloudpickle_version, cloudpickle.__version__) 207 208 python_model_subpath = pyfunc_config.get(CONFIG_KEY_PYTHON_MODEL, None) 209 if python_model_subpath is None: 210 raise MlflowException( 211 "Python model path was not specified in the model configuration") 212 with open(os.path.join(model_path, python_model_subpath), "rb") as f: 213 python_model = cloudpickle.load(f) 214 215 # TODO: If the longevity of the temporary directory prior becomes problematic, consider using 216 # an alternative solution. 217 tmp_artifacts_dir_path = tempfile.mkdtemp(suffix="artifacts") 218 artifacts = {} 219 for saved_artifact_name, saved_artifact_info in\ 220 pyfunc_config.get(CONFIG_KEY_ARTIFACTS, {}).items(): 221 tmp_artifact_path = os.path.join( 222 tmp_artifacts_dir_path, 223 _copy_file_or_tree( 224 src=os.path.join( 225 model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH]), 226 dst=tmp_artifacts_dir_path, 227 dst_dir=saved_artifact_name)) 228 artifacts[saved_artifact_name] = tmp_artifact_path 229 230 context = PythonModelContext(artifacts=artifacts) 231 python_model.load_context(context=context) 232 return _PythonModelPyfuncWrapper(python_model=python_model, context=context) 233 234 235 class _PythonModelPyfuncWrapper(object): 236 """ 237 Wrapper class that creates a predict function such that 238 predict(model_input: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame) 239 """ 240 241 def __init__(self, python_model, context): 242 """ 243 :param python_model: An instance of a subclass of :class:`~PythonModel`. 244 :param context: A :class:`~PythonModelContext` instance containing artifacts that 245 ``python_model`` may use when performing inference. 246 """ 247 self.python_model = python_model 248 self.context = context 249 250 def predict(self, model_input): 251 return self.python_model.predict(self.context, model_input) 252 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlflow/pyfunc/model.py b/mlflow/pyfunc/model.py --- a/mlflow/pyfunc/model.py +++ b/mlflow/pyfunc/model.py @@ -4,7 +4,6 @@ """ import os -import tempfile import shutil import yaml from abc import ABCMeta, abstractmethod @@ -212,20 +211,11 @@ with open(os.path.join(model_path, python_model_subpath), "rb") as f: python_model = cloudpickle.load(f) - # TODO: If the longevity of the temporary directory prior becomes problematic, consider using - # an alternative solution. - tmp_artifacts_dir_path = tempfile.mkdtemp(suffix="artifacts") artifacts = {} for saved_artifact_name, saved_artifact_info in\ pyfunc_config.get(CONFIG_KEY_ARTIFACTS, {}).items(): - tmp_artifact_path = os.path.join( - tmp_artifacts_dir_path, - _copy_file_or_tree( - src=os.path.join( - model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH]), - dst=tmp_artifacts_dir_path, - dst_dir=saved_artifact_name)) - artifacts[saved_artifact_name] = tmp_artifact_path + artifacts[saved_artifact_name] = os.path.join( + model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH]) context = PythonModelContext(artifacts=artifacts) python_model.load_context(context=context)
{"golden_diff": "diff --git a/mlflow/pyfunc/model.py b/mlflow/pyfunc/model.py\n--- a/mlflow/pyfunc/model.py\n+++ b/mlflow/pyfunc/model.py\n@@ -4,7 +4,6 @@\n \"\"\"\n \n import os\n-import tempfile\n import shutil\n import yaml\n from abc import ABCMeta, abstractmethod\n@@ -212,20 +211,11 @@\n with open(os.path.join(model_path, python_model_subpath), \"rb\") as f:\n python_model = cloudpickle.load(f)\n \n- # TODO: If the longevity of the temporary directory prior becomes problematic, consider using\n- # an alternative solution.\n- tmp_artifacts_dir_path = tempfile.mkdtemp(suffix=\"artifacts\")\n artifacts = {}\n for saved_artifact_name, saved_artifact_info in\\\n pyfunc_config.get(CONFIG_KEY_ARTIFACTS, {}).items():\n- tmp_artifact_path = os.path.join(\n- tmp_artifacts_dir_path,\n- _copy_file_or_tree(\n- src=os.path.join(\n- model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH]),\n- dst=tmp_artifacts_dir_path,\n- dst_dir=saved_artifact_name))\n- artifacts[saved_artifact_name] = tmp_artifact_path\n+ artifacts[saved_artifact_name] = os.path.join(\n+ model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH])\n \n context = PythonModelContext(artifacts=artifacts)\n python_model.load_context(context=context)\n", "issue": "Avoid extensive copying of model files & artifacts in pyfunc\n### Issue\r\nI was trying out mlflow to serve a ML model with large model files (totalling about 2 GB) on sagemaker. I used `mlflow.pyfunc` to load the model data etc. specified via the artifacts-dictionary. \r\n\r\nHowever, when I built the Docker container and ran it locally, gunicorn always timed out (due to the hard-coded 60s timeout). \r\nObserving the `/tmp` folder in the Docker container during the run as well as digging into the mlflow code, I noticed that temporary folders containing the **full** model artifacts (in my case amounting to about 2 GB) were copied **twice** per worker, while the model files were already all lying safe and sound in the `/opt/ml/model` directory in the Docker container.\r\n\r\n### Reason\r\nThe reason for this behavior is that each worker calls upon initialization `pyfunc.load_pyfunc(\"/opt/ml/model/\")` (see `wsgi.py` in `mlflow/sagemaker/container/scoring_server`).\r\nIn `pyfunc.load_pyfunc`\r\ni) the model files are \"downloaded\" (copied to a temporary directory in `/tmp` via `local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)`. Then, the `_load_pyfunc` in the pyfunc loader_module (`model.py`) is called. This again,\r\nii) copies all artifact files to a temporary directory with suffix \"artifacts\"\r\n\r\n### My workaround\r\nAs a quick fix for myself, I\r\ni) set `local_model_path = local_file_uri_to_path(model_uri)`in `mlflow/pyfunc/__init__.py`\r\nii) set `artifacts[saved_artifact_name] = os.path.join(model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH])` instead of `tmp_artifact_path` in `mlflow/pyfunc/model.py` and commented out the creation of `tmp_artifact_path` which involves `_copy_file_or_tree` to copy the artifact files.\r\n\r\n### Request\r\nSince I believe I'm not the only one working with larger model artifacts, I suggest improving the handling of the model artifacts. Besides, it might be reasonable to allow for a custom gunicorn timeout in case model initialization is more involved.\n", "before_files": [{"content": "\"\"\"\nThe ``mlflow.pyfunc.model`` module defines logic for saving and loading custom \"python_function\"\nmodels with a user-defined ``PythonModel`` subclass.\n\"\"\"\n\nimport os\nimport tempfile\nimport shutil\nimport yaml\nfrom abc import ABCMeta, abstractmethod\n\nimport cloudpickle\n\nimport mlflow.pyfunc\nimport mlflow.utils\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.model_utils import _get_flavor_configuration\nfrom mlflow.utils.file_utils import TempDir, _copy_file_or_tree\n\nCONFIG_KEY_ARTIFACTS = \"artifacts\"\nCONFIG_KEY_ARTIFACT_RELATIVE_PATH = \"path\"\nCONFIG_KEY_ARTIFACT_URI = \"uri\"\nCONFIG_KEY_PYTHON_MODEL = \"python_model\"\nCONFIG_KEY_CLOUDPICKLE_VERSION = \"cloudpickle_version\"\n\n\ndef get_default_conda_env():\n \"\"\"\n :return: The default Conda environment for MLflow Models produced by calls to\n :func:`save_model() <mlflow.pyfunc.save_model>`\n and :func:`log_model() <mlflow.pyfunc.log_model>` when a user-defined subclass of\n :class:`PythonModel` is provided.\n \"\"\"\n return _mlflow_conda_env(\n additional_conda_deps=None,\n additional_pip_deps=[\n \"cloudpickle=={}\".format(cloudpickle.__version__),\n ],\n additional_conda_channels=None)\n\n\nclass PythonModel(object):\n \"\"\"\n Represents a generic Python model that evaluates inputs and produces API-compatible outputs.\n By subclassing :class:`~PythonModel`, users can create customized MLflow models with the\n \"python_function\" (\"pyfunc\") flavor, leveraging custom inference logic and artifact\n dependencies.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def load_context(self, context):\n \"\"\"\n Loads artifacts from the specified :class:`~PythonModelContext` that can be used by\n :func:`~PythonModel.predict` when evaluating inputs. When loading an MLflow model with\n :func:`~load_pyfunc`, this method is called as soon as the :class:`~PythonModel` is\n constructed.\n\n The same :class:`~PythonModelContext` will also be available during calls to\n :func:`~PythonModel.predict`, but it may be more efficient to override this method\n and load artifacts from the context at model load time.\n\n :param context: A :class:`~PythonModelContext` instance containing artifacts that the model\n can use to perform inference.\n \"\"\"\n\n @abstractmethod\n def predict(self, context, model_input):\n \"\"\"\n Evaluates a pyfunc-compatible input and produces a pyfunc-compatible output.\n For more information about the pyfunc input/output API, see the :ref:`pyfunc-inference-api`.\n\n :param context: A :class:`~PythonModelContext` instance containing artifacts that the model\n can use to perform inference.\n :param model_input: A pyfunc-compatible input for the model to evaluate.\n \"\"\"\n\n\nclass PythonModelContext(object):\n \"\"\"\n A collection of artifacts that a :class:`~PythonModel` can use when performing inference.\n :class:`~PythonModelContext` objects are created *implicitly* by the\n :func:`save_model() <mlflow.pyfunc.save_model>` and\n :func:`log_model() <mlflow.pyfunc.log_model>` persistence methods, using the contents specified\n by the ``artifacts`` parameter of these methods.\n \"\"\"\n\n def __init__(self, artifacts):\n \"\"\"\n :param artifacts: A dictionary of ``<name, artifact_path>`` entries, where ``artifact_path``\n is an absolute filesystem path to a given artifact.\n \"\"\"\n self._artifacts = artifacts\n\n @property\n def artifacts(self):\n \"\"\"\n :return: A dictionary containing ``<name, artifact_path>`` entries, where ``artifact_path``\n is an absolute filesystem path to the artifact.\n \"\"\"\n return self._artifacts\n\n\ndef _save_model_with_class_artifacts_params(path, python_model, artifacts=None, conda_env=None,\n code_paths=None, mlflow_model=Model()):\n \"\"\"\n :param path: The path to which to save the Python model.\n :param python_model: An instance of a subclass of :class:`~PythonModel`. ``python_model``\n defines how the model loads artifacts and how it performs inference.\n :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs\n are resolved to absolute filesystem paths, producing a dictionary of\n ``<name, absolute_path>`` entries. ``python_model`` can reference these\n resolved entries as the ``artifacts`` property of the ``context`` attribute.\n If *None*, no artifacts are added to the model.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If `None`, the default\n :func:`get_default_conda_env()` environment is added to the model.\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path before the model is loaded.\n :param mlflow_model: The model configuration to which to add the ``mlflow.pyfunc`` flavor.\n \"\"\"\n if os.path.exists(path):\n raise MlflowException(\n message=\"Path '{}' already exists\".format(path),\n error_code=RESOURCE_ALREADY_EXISTS)\n os.makedirs(path)\n\n custom_model_config_kwargs = {\n CONFIG_KEY_CLOUDPICKLE_VERSION: cloudpickle.__version__,\n }\n if isinstance(python_model, PythonModel):\n saved_python_model_subpath = \"python_model.pkl\"\n with open(os.path.join(path, saved_python_model_subpath), \"wb\") as out:\n cloudpickle.dump(python_model, out)\n custom_model_config_kwargs[CONFIG_KEY_PYTHON_MODEL] = saved_python_model_subpath\n else:\n raise MlflowException(\n message=(\"`python_model` must be a subclass of `PythonModel`. Instead, found an\"\n \" object of type: {python_model_type}\".format(\n python_model_type=type(python_model))),\n error_code=INVALID_PARAMETER_VALUE)\n\n if artifacts:\n saved_artifacts_config = {}\n with TempDir() as tmp_artifacts_dir:\n tmp_artifacts_config = {}\n saved_artifacts_dir_subpath = \"artifacts\"\n for artifact_name, artifact_uri in artifacts.items():\n tmp_artifact_path = _download_artifact_from_uri(\n artifact_uri=artifact_uri, output_path=tmp_artifacts_dir.path())\n tmp_artifacts_config[artifact_name] = tmp_artifact_path\n saved_artifact_subpath = os.path.join(\n saved_artifacts_dir_subpath,\n os.path.relpath(path=tmp_artifact_path, start=tmp_artifacts_dir.path()))\n saved_artifacts_config[artifact_name] = {\n CONFIG_KEY_ARTIFACT_RELATIVE_PATH: saved_artifact_subpath,\n CONFIG_KEY_ARTIFACT_URI: artifact_uri,\n }\n\n shutil.move(tmp_artifacts_dir.path(), os.path.join(path, saved_artifacts_dir_subpath))\n custom_model_config_kwargs[CONFIG_KEY_ARTIFACTS] = saved_artifacts_config\n\n conda_env_subpath = \"conda.yaml\"\n if conda_env is None:\n conda_env = get_default_conda_env()\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n saved_code_subpath = None\n if code_paths is not None:\n saved_code_subpath = \"code\"\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=saved_code_subpath)\n\n mlflow.pyfunc.add_to_model(model=mlflow_model, loader_module=__name__, code=saved_code_subpath,\n env=conda_env_subpath, **custom_model_config_kwargs)\n mlflow_model.save(os.path.join(path, 'MLmodel'))\n\n\ndef _load_pyfunc(model_path):\n pyfunc_config = _get_flavor_configuration(\n model_path=model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)\n\n python_model_cloudpickle_version = pyfunc_config.get(CONFIG_KEY_CLOUDPICKLE_VERSION, None)\n if python_model_cloudpickle_version is None:\n mlflow.pyfunc._logger.warning(\n \"The version of CloudPickle used to save the model could not be found in the MLmodel\"\n \" configuration\")\n elif python_model_cloudpickle_version != cloudpickle.__version__:\n # CloudPickle does not have a well-defined cross-version compatibility policy. Micro version\n # releases have been known to cause incompatibilities. Therefore, we match on the full\n # library version\n mlflow.pyfunc._logger.warning(\n \"The version of CloudPickle that was used to save the model, `CloudPickle %s`, differs\"\n \" from the version of CloudPickle that is currently running, `CloudPickle %s`, and may\"\n \" be incompatible\",\n python_model_cloudpickle_version, cloudpickle.__version__)\n\n python_model_subpath = pyfunc_config.get(CONFIG_KEY_PYTHON_MODEL, None)\n if python_model_subpath is None:\n raise MlflowException(\n \"Python model path was not specified in the model configuration\")\n with open(os.path.join(model_path, python_model_subpath), \"rb\") as f:\n python_model = cloudpickle.load(f)\n\n # TODO: If the longevity of the temporary directory prior becomes problematic, consider using\n # an alternative solution.\n tmp_artifacts_dir_path = tempfile.mkdtemp(suffix=\"artifacts\")\n artifacts = {}\n for saved_artifact_name, saved_artifact_info in\\\n pyfunc_config.get(CONFIG_KEY_ARTIFACTS, {}).items():\n tmp_artifact_path = os.path.join(\n tmp_artifacts_dir_path,\n _copy_file_or_tree(\n src=os.path.join(\n model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH]),\n dst=tmp_artifacts_dir_path,\n dst_dir=saved_artifact_name))\n artifacts[saved_artifact_name] = tmp_artifact_path\n\n context = PythonModelContext(artifacts=artifacts)\n python_model.load_context(context=context)\n return _PythonModelPyfuncWrapper(python_model=python_model, context=context)\n\n\nclass _PythonModelPyfuncWrapper(object):\n \"\"\"\n Wrapper class that creates a predict function such that\n predict(model_input: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)\n \"\"\"\n\n def __init__(self, python_model, context):\n \"\"\"\n :param python_model: An instance of a subclass of :class:`~PythonModel`.\n :param context: A :class:`~PythonModelContext` instance containing artifacts that\n ``python_model`` may use when performing inference.\n \"\"\"\n self.python_model = python_model\n self.context = context\n\n def predict(self, model_input):\n return self.python_model.predict(self.context, model_input)\n", "path": "mlflow/pyfunc/model.py"}], "after_files": [{"content": "\"\"\"\nThe ``mlflow.pyfunc.model`` module defines logic for saving and loading custom \"python_function\"\nmodels with a user-defined ``PythonModel`` subclass.\n\"\"\"\n\nimport os\nimport shutil\nimport yaml\nfrom abc import ABCMeta, abstractmethod\n\nimport cloudpickle\n\nimport mlflow.pyfunc\nimport mlflow.utils\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.model_utils import _get_flavor_configuration\nfrom mlflow.utils.file_utils import TempDir, _copy_file_or_tree\n\nCONFIG_KEY_ARTIFACTS = \"artifacts\"\nCONFIG_KEY_ARTIFACT_RELATIVE_PATH = \"path\"\nCONFIG_KEY_ARTIFACT_URI = \"uri\"\nCONFIG_KEY_PYTHON_MODEL = \"python_model\"\nCONFIG_KEY_CLOUDPICKLE_VERSION = \"cloudpickle_version\"\n\n\ndef get_default_conda_env():\n \"\"\"\n :return: The default Conda environment for MLflow Models produced by calls to\n :func:`save_model() <mlflow.pyfunc.save_model>`\n and :func:`log_model() <mlflow.pyfunc.log_model>` when a user-defined subclass of\n :class:`PythonModel` is provided.\n \"\"\"\n return _mlflow_conda_env(\n additional_conda_deps=None,\n additional_pip_deps=[\n \"cloudpickle=={}\".format(cloudpickle.__version__),\n ],\n additional_conda_channels=None)\n\n\nclass PythonModel(object):\n \"\"\"\n Represents a generic Python model that evaluates inputs and produces API-compatible outputs.\n By subclassing :class:`~PythonModel`, users can create customized MLflow models with the\n \"python_function\" (\"pyfunc\") flavor, leveraging custom inference logic and artifact\n dependencies.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def load_context(self, context):\n \"\"\"\n Loads artifacts from the specified :class:`~PythonModelContext` that can be used by\n :func:`~PythonModel.predict` when evaluating inputs. When loading an MLflow model with\n :func:`~load_pyfunc`, this method is called as soon as the :class:`~PythonModel` is\n constructed.\n\n The same :class:`~PythonModelContext` will also be available during calls to\n :func:`~PythonModel.predict`, but it may be more efficient to override this method\n and load artifacts from the context at model load time.\n\n :param context: A :class:`~PythonModelContext` instance containing artifacts that the model\n can use to perform inference.\n \"\"\"\n\n @abstractmethod\n def predict(self, context, model_input):\n \"\"\"\n Evaluates a pyfunc-compatible input and produces a pyfunc-compatible output.\n For more information about the pyfunc input/output API, see the :ref:`pyfunc-inference-api`.\n\n :param context: A :class:`~PythonModelContext` instance containing artifacts that the model\n can use to perform inference.\n :param model_input: A pyfunc-compatible input for the model to evaluate.\n \"\"\"\n\n\nclass PythonModelContext(object):\n \"\"\"\n A collection of artifacts that a :class:`~PythonModel` can use when performing inference.\n :class:`~PythonModelContext` objects are created *implicitly* by the\n :func:`save_model() <mlflow.pyfunc.save_model>` and\n :func:`log_model() <mlflow.pyfunc.log_model>` persistence methods, using the contents specified\n by the ``artifacts`` parameter of these methods.\n \"\"\"\n\n def __init__(self, artifacts):\n \"\"\"\n :param artifacts: A dictionary of ``<name, artifact_path>`` entries, where ``artifact_path``\n is an absolute filesystem path to a given artifact.\n \"\"\"\n self._artifacts = artifacts\n\n @property\n def artifacts(self):\n \"\"\"\n :return: A dictionary containing ``<name, artifact_path>`` entries, where ``artifact_path``\n is an absolute filesystem path to the artifact.\n \"\"\"\n return self._artifacts\n\n\ndef _save_model_with_class_artifacts_params(path, python_model, artifacts=None, conda_env=None,\n code_paths=None, mlflow_model=Model()):\n \"\"\"\n :param path: The path to which to save the Python model.\n :param python_model: An instance of a subclass of :class:`~PythonModel`. ``python_model``\n defines how the model loads artifacts and how it performs inference.\n :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs\n are resolved to absolute filesystem paths, producing a dictionary of\n ``<name, absolute_path>`` entries. ``python_model`` can reference these\n resolved entries as the ``artifacts`` property of the ``context`` attribute.\n If *None*, no artifacts are added to the model.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If `None`, the default\n :func:`get_default_conda_env()` environment is added to the model.\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path before the model is loaded.\n :param mlflow_model: The model configuration to which to add the ``mlflow.pyfunc`` flavor.\n \"\"\"\n if os.path.exists(path):\n raise MlflowException(\n message=\"Path '{}' already exists\".format(path),\n error_code=RESOURCE_ALREADY_EXISTS)\n os.makedirs(path)\n\n custom_model_config_kwargs = {\n CONFIG_KEY_CLOUDPICKLE_VERSION: cloudpickle.__version__,\n }\n if isinstance(python_model, PythonModel):\n saved_python_model_subpath = \"python_model.pkl\"\n with open(os.path.join(path, saved_python_model_subpath), \"wb\") as out:\n cloudpickle.dump(python_model, out)\n custom_model_config_kwargs[CONFIG_KEY_PYTHON_MODEL] = saved_python_model_subpath\n else:\n raise MlflowException(\n message=(\"`python_model` must be a subclass of `PythonModel`. Instead, found an\"\n \" object of type: {python_model_type}\".format(\n python_model_type=type(python_model))),\n error_code=INVALID_PARAMETER_VALUE)\n\n if artifacts:\n saved_artifacts_config = {}\n with TempDir() as tmp_artifacts_dir:\n tmp_artifacts_config = {}\n saved_artifacts_dir_subpath = \"artifacts\"\n for artifact_name, artifact_uri in artifacts.items():\n tmp_artifact_path = _download_artifact_from_uri(\n artifact_uri=artifact_uri, output_path=tmp_artifacts_dir.path())\n tmp_artifacts_config[artifact_name] = tmp_artifact_path\n saved_artifact_subpath = os.path.join(\n saved_artifacts_dir_subpath,\n os.path.relpath(path=tmp_artifact_path, start=tmp_artifacts_dir.path()))\n saved_artifacts_config[artifact_name] = {\n CONFIG_KEY_ARTIFACT_RELATIVE_PATH: saved_artifact_subpath,\n CONFIG_KEY_ARTIFACT_URI: artifact_uri,\n }\n\n shutil.move(tmp_artifacts_dir.path(), os.path.join(path, saved_artifacts_dir_subpath))\n custom_model_config_kwargs[CONFIG_KEY_ARTIFACTS] = saved_artifacts_config\n\n conda_env_subpath = \"conda.yaml\"\n if conda_env is None:\n conda_env = get_default_conda_env()\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n saved_code_subpath = None\n if code_paths is not None:\n saved_code_subpath = \"code\"\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=saved_code_subpath)\n\n mlflow.pyfunc.add_to_model(model=mlflow_model, loader_module=__name__, code=saved_code_subpath,\n env=conda_env_subpath, **custom_model_config_kwargs)\n mlflow_model.save(os.path.join(path, 'MLmodel'))\n\n\ndef _load_pyfunc(model_path):\n pyfunc_config = _get_flavor_configuration(\n model_path=model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME)\n\n python_model_cloudpickle_version = pyfunc_config.get(CONFIG_KEY_CLOUDPICKLE_VERSION, None)\n if python_model_cloudpickle_version is None:\n mlflow.pyfunc._logger.warning(\n \"The version of CloudPickle used to save the model could not be found in the MLmodel\"\n \" configuration\")\n elif python_model_cloudpickle_version != cloudpickle.__version__:\n # CloudPickle does not have a well-defined cross-version compatibility policy. Micro version\n # releases have been known to cause incompatibilities. Therefore, we match on the full\n # library version\n mlflow.pyfunc._logger.warning(\n \"The version of CloudPickle that was used to save the model, `CloudPickle %s`, differs\"\n \" from the version of CloudPickle that is currently running, `CloudPickle %s`, and may\"\n \" be incompatible\",\n python_model_cloudpickle_version, cloudpickle.__version__)\n\n python_model_subpath = pyfunc_config.get(CONFIG_KEY_PYTHON_MODEL, None)\n if python_model_subpath is None:\n raise MlflowException(\n \"Python model path was not specified in the model configuration\")\n with open(os.path.join(model_path, python_model_subpath), \"rb\") as f:\n python_model = cloudpickle.load(f)\n\n artifacts = {}\n for saved_artifact_name, saved_artifact_info in\\\n pyfunc_config.get(CONFIG_KEY_ARTIFACTS, {}).items():\n artifacts[saved_artifact_name] = os.path.join(\n model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH])\n\n context = PythonModelContext(artifacts=artifacts)\n python_model.load_context(context=context)\n return _PythonModelPyfuncWrapper(python_model=python_model, context=context)\n\n\nclass _PythonModelPyfuncWrapper(object):\n \"\"\"\n Wrapper class that creates a predict function such that\n predict(model_input: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)\n \"\"\"\n\n def __init__(self, python_model, context):\n \"\"\"\n :param python_model: An instance of a subclass of :class:`~PythonModel`.\n :param context: A :class:`~PythonModelContext` instance containing artifacts that\n ``python_model`` may use when performing inference.\n \"\"\"\n self.python_model = python_model\n self.context = context\n\n def predict(self, model_input):\n return self.python_model.predict(self.context, model_input)\n", "path": "mlflow/pyfunc/model.py"}]}
3,888
332
gh_patches_debug_61517
rasdani/github-patches
git_diff
open-mmlab__mmpose-271
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pylint: R1710 ```bash mmpose/apis/test.py:142:0: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements) mmpose/datasets/datasets/mesh/mesh_mix_dataset.py:38:4: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmpose/datasets/datasets/mesh/mesh_mix_dataset.py` Content: ``` 1 from abc import ABCMeta 2 3 import numpy as np 4 from torch.utils.data import Dataset 5 6 from mmpose.datasets.builder import DATASETS 7 from .mesh_base_dataset import MeshBaseDataset 8 9 10 @DATASETS.register_module() 11 class MeshMixDataset(Dataset, metaclass=ABCMeta): 12 """Mix Dataset for 3D human mesh estimation. 13 14 The dataset combines data from multiple datasets (MeshBaseDataset) and 15 sample the data from different datasets with the provided proportions. 16 The dataset loads raw features and apply specified transforms 17 to return a dict containing the image tensors and other information. 18 19 Args: 20 configs (list): List of configs for multiple datasets. 21 partition (list): Sample proportion of multiple datasets. 22 The the elements of it should be non-negative and the 23 sum of it should be 1. 24 """ 25 26 def __init__(self, configs, partition): 27 """Load data from multiple datasets.""" 28 assert min(partition) >= 0 29 assert sum(partition) == 1 30 self.partition = np.array(partition).cumsum() 31 self.datasets = [MeshBaseDataset(**cfg) for cfg in configs] 32 self.length = max(len(ds) for ds in self.datasets) 33 34 def __len__(self): 35 """Get the size of the dataset.""" 36 return self.length 37 38 def __getitem__(self, idx): 39 """Given index, sample the data from multiple datasets with the given 40 proportion.""" 41 p = np.random.rand() 42 for i in range(len(self.datasets)): 43 if p <= self.partition[i]: 44 index_new = (idx + np.random.rand()) * len( 45 self.datasets[i]) / self.length 46 index_new = int(np.round(index_new)) % (len(self.datasets[i])) 47 return self.datasets[i][index_new] 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py b/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py --- a/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py +++ b/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py @@ -45,3 +45,4 @@ self.datasets[i]) / self.length index_new = int(np.round(index_new)) % (len(self.datasets[i])) return self.datasets[i][index_new] + return None
{"golden_diff": "diff --git a/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py b/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py\n--- a/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py\n+++ b/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py\n@@ -45,3 +45,4 @@\n self.datasets[i]) / self.length\n index_new = int(np.round(index_new)) % (len(self.datasets[i]))\n return self.datasets[i][index_new]\n+ return None\n", "issue": "Pylint: R1710\n```bash\r\nmmpose/apis/test.py:142:0: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements)\r\nmmpose/datasets/datasets/mesh/mesh_mix_dataset.py:38:4: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements)\r\n```\n", "before_files": [{"content": "from abc import ABCMeta\n\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .mesh_base_dataset import MeshBaseDataset\n\n\[email protected]_module()\nclass MeshMixDataset(Dataset, metaclass=ABCMeta):\n \"\"\"Mix Dataset for 3D human mesh estimation.\n\n The dataset combines data from multiple datasets (MeshBaseDataset) and\n sample the data from different datasets with the provided proportions.\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n configs (list): List of configs for multiple datasets.\n partition (list): Sample proportion of multiple datasets.\n The the elements of it should be non-negative and the\n sum of it should be 1.\n \"\"\"\n\n def __init__(self, configs, partition):\n \"\"\"Load data from multiple datasets.\"\"\"\n assert min(partition) >= 0\n assert sum(partition) == 1\n self.partition = np.array(partition).cumsum()\n self.datasets = [MeshBaseDataset(**cfg) for cfg in configs]\n self.length = max(len(ds) for ds in self.datasets)\n\n def __len__(self):\n \"\"\"Get the size of the dataset.\"\"\"\n return self.length\n\n def __getitem__(self, idx):\n \"\"\"Given index, sample the data from multiple datasets with the given\n proportion.\"\"\"\n p = np.random.rand()\n for i in range(len(self.datasets)):\n if p <= self.partition[i]:\n index_new = (idx + np.random.rand()) * len(\n self.datasets[i]) / self.length\n index_new = int(np.round(index_new)) % (len(self.datasets[i]))\n return self.datasets[i][index_new]\n", "path": "mmpose/datasets/datasets/mesh/mesh_mix_dataset.py"}], "after_files": [{"content": "from abc import ABCMeta\n\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .mesh_base_dataset import MeshBaseDataset\n\n\[email protected]_module()\nclass MeshMixDataset(Dataset, metaclass=ABCMeta):\n \"\"\"Mix Dataset for 3D human mesh estimation.\n\n The dataset combines data from multiple datasets (MeshBaseDataset) and\n sample the data from different datasets with the provided proportions.\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n configs (list): List of configs for multiple datasets.\n partition (list): Sample proportion of multiple datasets.\n The the elements of it should be non-negative and the\n sum of it should be 1.\n \"\"\"\n\n def __init__(self, configs, partition):\n \"\"\"Load data from multiple datasets.\"\"\"\n assert min(partition) >= 0\n assert sum(partition) == 1\n self.partition = np.array(partition).cumsum()\n self.datasets = [MeshBaseDataset(**cfg) for cfg in configs]\n self.length = max(len(ds) for ds in self.datasets)\n\n def __len__(self):\n \"\"\"Get the size of the dataset.\"\"\"\n return self.length\n\n def __getitem__(self, idx):\n \"\"\"Given index, sample the data from multiple datasets with the given\n proportion.\"\"\"\n p = np.random.rand()\n for i in range(len(self.datasets)):\n if p <= self.partition[i]:\n index_new = (idx + np.random.rand()) * len(\n self.datasets[i]) / self.length\n index_new = int(np.round(index_new)) % (len(self.datasets[i]))\n return self.datasets[i][index_new]\n return None\n", "path": "mmpose/datasets/datasets/mesh/mesh_mix_dataset.py"}]}
849
120
gh_patches_debug_4133
rasdani/github-patches
git_diff
archlinux__archinstall-2033
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Redundant profile menu Selecting the Profile option in the menu leads to a menu with the option Profile. Below is a screenshot of the menu described. ![image](https://github.com/archlinux/archinstall/assets/11915375/e2fac7f7-2ee9-4e49-a7e2-a9d4fe09fafc) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `archinstall/lib/profile/profile_menu.py` Content: ``` 1 from __future__ import annotations 2 3 from typing import TYPE_CHECKING, Any, Optional, Dict 4 5 from archinstall.default_profiles.profile import Profile, GreeterType 6 from .profile_model import ProfileConfiguration 7 from ..menu import Menu, MenuSelectionType, AbstractSubMenu, Selector 8 from ..interactions.system_conf import select_driver 9 from ..hardware import GfxDriver 10 11 if TYPE_CHECKING: 12 _: Any 13 14 15 class ProfileMenu(AbstractSubMenu): 16 def __init__( 17 self, 18 data_store: Dict[str, Any], 19 preset: Optional[ProfileConfiguration] = None 20 ): 21 if preset: 22 self._preset = preset 23 else: 24 self._preset = ProfileConfiguration() 25 26 super().__init__(data_store=data_store) 27 28 def setup_selection_menu_options(self): 29 self._menu_options['profile'] = Selector( 30 _('Profile'), 31 lambda x: self._select_profile(x), 32 display_func=lambda x: x.name if x else None, 33 preview_func=self._preview_profile, 34 default=self._preset.profile, 35 enabled=True 36 ) 37 38 self._menu_options['gfx_driver'] = Selector( 39 _('Graphics driver'), 40 lambda preset: self._select_gfx_driver(preset), 41 display_func=lambda x: x.value if x else None, 42 dependencies=['profile'], 43 default=self._preset.gfx_driver if self._preset.profile and self._preset.profile.is_graphic_driver_supported() else None, 44 enabled=self._preset.profile.is_graphic_driver_supported() if self._preset.profile else False 45 ) 46 47 self._menu_options['greeter'] = Selector( 48 _('Greeter'), 49 lambda preset: select_greeter(self._menu_options['profile'].current_selection, preset), 50 display_func=lambda x: x.value if x else None, 51 dependencies=['profile'], 52 default=self._preset.greeter if self._preset.profile and self._preset.profile.is_greeter_supported() else None, 53 enabled=self._preset.profile.is_greeter_supported() if self._preset.profile else False 54 ) 55 56 def run(self, allow_reset: bool = True) -> Optional[ProfileConfiguration]: 57 super().run(allow_reset=allow_reset) 58 59 if self._data_store.get('profile', None): 60 return ProfileConfiguration( 61 self._menu_options['profile'].current_selection, 62 self._menu_options['gfx_driver'].current_selection, 63 self._menu_options['greeter'].current_selection 64 ) 65 66 return None 67 68 def _select_profile(self, preset: Optional[Profile]) -> Optional[Profile]: 69 profile = select_profile(preset) 70 if profile is not None: 71 if not profile.is_graphic_driver_supported(): 72 self._menu_options['gfx_driver'].set_enabled(False) 73 self._menu_options['gfx_driver'].set_current_selection(None) 74 else: 75 self._menu_options['gfx_driver'].set_enabled(True) 76 self._menu_options['gfx_driver'].set_current_selection(GfxDriver.AllOpenSource) 77 78 if not profile.is_greeter_supported(): 79 self._menu_options['greeter'].set_enabled(False) 80 self._menu_options['greeter'].set_current_selection(None) 81 else: 82 self._menu_options['greeter'].set_enabled(True) 83 self._menu_options['greeter'].set_current_selection(profile.default_greeter_type) 84 else: 85 self._menu_options['gfx_driver'].set_current_selection(None) 86 self._menu_options['greeter'].set_current_selection(None) 87 88 return profile 89 90 def _select_gfx_driver(self, preset: Optional[GfxDriver] = None) -> Optional[GfxDriver]: 91 driver = preset 92 profile: Optional[Profile] = self._menu_options['profile'].current_selection 93 94 if profile: 95 if profile.is_graphic_driver_supported(): 96 driver = select_driver(current_value=preset) 97 98 if driver and 'Sway' in profile.current_selection_names(): 99 if driver.is_nvidia(): 100 prompt = str(_('The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues, are you okay with that?')) 101 choice = Menu(prompt, Menu.yes_no(), default_option=Menu.no(), skip=False).run() 102 103 if choice.value == Menu.no(): 104 return None 105 106 return driver 107 108 def _preview_profile(self) -> Optional[str]: 109 profile: Optional[Profile] = self._menu_options['profile'].current_selection 110 111 if profile: 112 names = profile.current_selection_names() 113 return '\n'.join(names) 114 115 return None 116 117 118 def select_greeter( 119 profile: Optional[Profile] = None, 120 preset: Optional[GreeterType] = None 121 ) -> Optional[GreeterType]: 122 if not profile or profile.is_greeter_supported(): 123 title = str(_('Please chose which greeter to install')) 124 greeter_options = [greeter.value for greeter in GreeterType] 125 126 default: Optional[GreeterType] = None 127 128 if preset is not None: 129 default = preset 130 elif profile is not None: 131 default_greeter = profile.default_greeter_type 132 default = default_greeter if default_greeter else None 133 134 choice = Menu( 135 title, 136 greeter_options, 137 skip=True, 138 default_option=default.value if default else None 139 ).run() 140 141 match choice.type_: 142 case MenuSelectionType.Skip: 143 return default 144 145 return GreeterType(choice.single_value) 146 147 return None 148 149 150 def select_profile( 151 current_profile: Optional[Profile] = None, 152 title: Optional[str] = None, 153 allow_reset: bool = True, 154 multi: bool = False 155 ) -> Optional[Profile]: 156 from archinstall.lib.profile.profiles_handler import profile_handler 157 top_level_profiles = profile_handler.get_top_level_profiles() 158 159 display_title = title 160 if not display_title: 161 display_title = str(_('This is a list of pre-programmed default_profiles')) 162 163 choice = profile_handler.select_profile( 164 top_level_profiles, 165 current_profile=current_profile, 166 title=display_title, 167 allow_reset=allow_reset, 168 multi=multi 169 ) 170 171 match choice.type_: 172 case MenuSelectionType.Selection: 173 profile_selection: Profile = choice.single_value 174 select_result = profile_selection.do_on_select() 175 176 if not select_result: 177 return select_profile( 178 current_profile=current_profile, 179 title=title, 180 allow_reset=allow_reset, 181 multi=multi 182 ) 183 184 # we're going to reset the currently selected profile(s) to avoid 185 # any stale data laying around 186 match select_result: 187 case select_result.NewSelection: 188 profile_handler.reset_top_level_profiles(exclude=[profile_selection]) 189 current_profile = profile_selection 190 case select_result.ResetCurrent: 191 profile_handler.reset_top_level_profiles() 192 current_profile = None 193 case select_result.SameSelection: 194 pass 195 196 return current_profile 197 case MenuSelectionType.Reset: 198 return None 199 case MenuSelectionType.Skip: 200 return current_profile 201 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/archinstall/lib/profile/profile_menu.py b/archinstall/lib/profile/profile_menu.py --- a/archinstall/lib/profile/profile_menu.py +++ b/archinstall/lib/profile/profile_menu.py @@ -27,7 +27,7 @@ def setup_selection_menu_options(self): self._menu_options['profile'] = Selector( - _('Profile'), + _('Type'), lambda x: self._select_profile(x), display_func=lambda x: x.name if x else None, preview_func=self._preview_profile,
{"golden_diff": "diff --git a/archinstall/lib/profile/profile_menu.py b/archinstall/lib/profile/profile_menu.py\n--- a/archinstall/lib/profile/profile_menu.py\n+++ b/archinstall/lib/profile/profile_menu.py\n@@ -27,7 +27,7 @@\n \n \tdef setup_selection_menu_options(self):\n \t\tself._menu_options['profile'] = Selector(\n-\t\t\t_('Profile'),\n+\t\t\t_('Type'),\n \t\t\tlambda x: self._select_profile(x),\n \t\t\tdisplay_func=lambda x: x.name if x else None,\n \t\t\tpreview_func=self._preview_profile,\n", "issue": "Redundant profile menu\nSelecting the Profile option in the menu leads to a menu with the option Profile. Below is a screenshot of the menu described.\r\n\r\n![image](https://github.com/archlinux/archinstall/assets/11915375/e2fac7f7-2ee9-4e49-a7e2-a9d4fe09fafc)\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Optional, Dict\n\nfrom archinstall.default_profiles.profile import Profile, GreeterType\nfrom .profile_model import ProfileConfiguration\nfrom ..menu import Menu, MenuSelectionType, AbstractSubMenu, Selector\nfrom ..interactions.system_conf import select_driver\nfrom ..hardware import GfxDriver\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nclass ProfileMenu(AbstractSubMenu):\n\tdef __init__(\n\t\tself,\n\t\tdata_store: Dict[str, Any],\n\t\tpreset: Optional[ProfileConfiguration] = None\n\t):\n\t\tif preset:\n\t\t\tself._preset = preset\n\t\telse:\n\t\t\tself._preset = ProfileConfiguration()\n\n\t\tsuper().__init__(data_store=data_store)\n\n\tdef setup_selection_menu_options(self):\n\t\tself._menu_options['profile'] = Selector(\n\t\t\t_('Profile'),\n\t\t\tlambda x: self._select_profile(x),\n\t\t\tdisplay_func=lambda x: x.name if x else None,\n\t\t\tpreview_func=self._preview_profile,\n\t\t\tdefault=self._preset.profile,\n\t\t\tenabled=True\n\t\t)\n\n\t\tself._menu_options['gfx_driver'] = Selector(\n\t\t\t_('Graphics driver'),\n\t\t\tlambda preset: self._select_gfx_driver(preset),\n\t\t\tdisplay_func=lambda x: x.value if x else None,\n\t\t\tdependencies=['profile'],\n\t\t\tdefault=self._preset.gfx_driver if self._preset.profile and self._preset.profile.is_graphic_driver_supported() else None,\n\t\t\tenabled=self._preset.profile.is_graphic_driver_supported() if self._preset.profile else False\n\t\t)\n\n\t\tself._menu_options['greeter'] = Selector(\n\t\t\t_('Greeter'),\n\t\t\tlambda preset: select_greeter(self._menu_options['profile'].current_selection, preset),\n\t\t\tdisplay_func=lambda x: x.value if x else None,\n\t\t\tdependencies=['profile'],\n\t\t\tdefault=self._preset.greeter if self._preset.profile and self._preset.profile.is_greeter_supported() else None,\n\t\t\tenabled=self._preset.profile.is_greeter_supported() if self._preset.profile else False\n\t\t)\n\n\tdef run(self, allow_reset: bool = True) -> Optional[ProfileConfiguration]:\n\t\tsuper().run(allow_reset=allow_reset)\n\n\t\tif self._data_store.get('profile', None):\n\t\t\treturn ProfileConfiguration(\n\t\t\t\tself._menu_options['profile'].current_selection,\n\t\t\t\tself._menu_options['gfx_driver'].current_selection,\n\t\t\t\tself._menu_options['greeter'].current_selection\n\t\t\t)\n\n\t\treturn None\n\n\tdef _select_profile(self, preset: Optional[Profile]) -> Optional[Profile]:\n\t\tprofile = select_profile(preset)\n\t\tif profile is not None:\n\t\t\tif not profile.is_graphic_driver_supported():\n\t\t\t\tself._menu_options['gfx_driver'].set_enabled(False)\n\t\t\t\tself._menu_options['gfx_driver'].set_current_selection(None)\n\t\t\telse:\n\t\t\t\tself._menu_options['gfx_driver'].set_enabled(True)\n\t\t\t\tself._menu_options['gfx_driver'].set_current_selection(GfxDriver.AllOpenSource)\n\n\t\t\tif not profile.is_greeter_supported():\n\t\t\t\tself._menu_options['greeter'].set_enabled(False)\n\t\t\t\tself._menu_options['greeter'].set_current_selection(None)\n\t\t\telse:\n\t\t\t\tself._menu_options['greeter'].set_enabled(True)\n\t\t\t\tself._menu_options['greeter'].set_current_selection(profile.default_greeter_type)\n\t\telse:\n\t\t\tself._menu_options['gfx_driver'].set_current_selection(None)\n\t\t\tself._menu_options['greeter'].set_current_selection(None)\n\n\t\treturn profile\n\n\tdef _select_gfx_driver(self, preset: Optional[GfxDriver] = None) -> Optional[GfxDriver]:\n\t\tdriver = preset\n\t\tprofile: Optional[Profile] = self._menu_options['profile'].current_selection\n\n\t\tif profile:\n\t\t\tif profile.is_graphic_driver_supported():\n\t\t\t\tdriver = select_driver(current_value=preset)\n\n\t\t\tif driver and 'Sway' in profile.current_selection_names():\n\t\t\t\tif driver.is_nvidia():\n\t\t\t\t\tprompt = str(_('The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues, are you okay with that?'))\n\t\t\t\t\tchoice = Menu(prompt, Menu.yes_no(), default_option=Menu.no(), skip=False).run()\n\n\t\t\t\t\tif choice.value == Menu.no():\n\t\t\t\t\t\treturn None\n\n\t\treturn driver\n\n\tdef _preview_profile(self) -> Optional[str]:\n\t\tprofile: Optional[Profile] = self._menu_options['profile'].current_selection\n\n\t\tif profile:\n\t\t\tnames = profile.current_selection_names()\n\t\t\treturn '\\n'.join(names)\n\n\t\treturn None\n\n\ndef select_greeter(\n\tprofile: Optional[Profile] = None,\n\tpreset: Optional[GreeterType] = None\n) -> Optional[GreeterType]:\n\tif not profile or profile.is_greeter_supported():\n\t\ttitle = str(_('Please chose which greeter to install'))\n\t\tgreeter_options = [greeter.value for greeter in GreeterType]\n\n\t\tdefault: Optional[GreeterType] = None\n\n\t\tif preset is not None:\n\t\t\tdefault = preset\n\t\telif profile is not None:\n\t\t\tdefault_greeter = profile.default_greeter_type\n\t\t\tdefault = default_greeter if default_greeter else None\n\n\t\tchoice = Menu(\n\t\t\ttitle,\n\t\t\tgreeter_options,\n\t\t\tskip=True,\n\t\t\tdefault_option=default.value if default else None\n\t\t).run()\n\n\t\tmatch choice.type_:\n\t\t\tcase MenuSelectionType.Skip:\n\t\t\t\treturn default\n\n\t\treturn GreeterType(choice.single_value)\n\n\treturn None\n\n\ndef select_profile(\n\tcurrent_profile: Optional[Profile] = None,\n\ttitle: Optional[str] = None,\n\tallow_reset: bool = True,\n\tmulti: bool = False\n) -> Optional[Profile]:\n\tfrom archinstall.lib.profile.profiles_handler import profile_handler\n\ttop_level_profiles = profile_handler.get_top_level_profiles()\n\n\tdisplay_title = title\n\tif not display_title:\n\t\tdisplay_title = str(_('This is a list of pre-programmed default_profiles'))\n\n\tchoice = profile_handler.select_profile(\n\t\ttop_level_profiles,\n\t\tcurrent_profile=current_profile,\n\t\ttitle=display_title,\n\t\tallow_reset=allow_reset,\n\t\tmulti=multi\n\t)\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Selection:\n\t\t\tprofile_selection: Profile = choice.single_value\n\t\t\tselect_result = profile_selection.do_on_select()\n\n\t\t\tif not select_result:\n\t\t\t\treturn select_profile(\n\t\t\t\t\tcurrent_profile=current_profile,\n\t\t\t\t\ttitle=title,\n\t\t\t\t\tallow_reset=allow_reset,\n\t\t\t\t\tmulti=multi\n\t\t\t\t)\n\n\t\t\t# we're going to reset the currently selected profile(s) to avoid\n\t\t\t# any stale data laying around\n\t\t\tmatch select_result:\n\t\t\t\tcase select_result.NewSelection:\n\t\t\t\t\tprofile_handler.reset_top_level_profiles(exclude=[profile_selection])\n\t\t\t\t\tcurrent_profile = profile_selection\n\t\t\t\tcase select_result.ResetCurrent:\n\t\t\t\t\tprofile_handler.reset_top_level_profiles()\n\t\t\t\t\tcurrent_profile = None\n\t\t\t\tcase select_result.SameSelection:\n\t\t\t\t\tpass\n\n\t\t\treturn current_profile\n\t\tcase MenuSelectionType.Reset:\n\t\t\treturn None\n\t\tcase MenuSelectionType.Skip:\n\t\t\treturn current_profile\n", "path": "archinstall/lib/profile/profile_menu.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Optional, Dict\n\nfrom archinstall.default_profiles.profile import Profile, GreeterType\nfrom .profile_model import ProfileConfiguration\nfrom ..menu import Menu, MenuSelectionType, AbstractSubMenu, Selector\nfrom ..interactions.system_conf import select_driver\nfrom ..hardware import GfxDriver\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nclass ProfileMenu(AbstractSubMenu):\n\tdef __init__(\n\t\tself,\n\t\tdata_store: Dict[str, Any],\n\t\tpreset: Optional[ProfileConfiguration] = None\n\t):\n\t\tif preset:\n\t\t\tself._preset = preset\n\t\telse:\n\t\t\tself._preset = ProfileConfiguration()\n\n\t\tsuper().__init__(data_store=data_store)\n\n\tdef setup_selection_menu_options(self):\n\t\tself._menu_options['profile'] = Selector(\n\t\t\t_('Type'),\n\t\t\tlambda x: self._select_profile(x),\n\t\t\tdisplay_func=lambda x: x.name if x else None,\n\t\t\tpreview_func=self._preview_profile,\n\t\t\tdefault=self._preset.profile,\n\t\t\tenabled=True\n\t\t)\n\n\t\tself._menu_options['gfx_driver'] = Selector(\n\t\t\t_('Graphics driver'),\n\t\t\tlambda preset: self._select_gfx_driver(preset),\n\t\t\tdisplay_func=lambda x: x.value if x else None,\n\t\t\tdependencies=['profile'],\n\t\t\tdefault=self._preset.gfx_driver if self._preset.profile and self._preset.profile.is_graphic_driver_supported() else None,\n\t\t\tenabled=self._preset.profile.is_graphic_driver_supported() if self._preset.profile else False\n\t\t)\n\n\t\tself._menu_options['greeter'] = Selector(\n\t\t\t_('Greeter'),\n\t\t\tlambda preset: select_greeter(self._menu_options['profile'].current_selection, preset),\n\t\t\tdisplay_func=lambda x: x.value if x else None,\n\t\t\tdependencies=['profile'],\n\t\t\tdefault=self._preset.greeter if self._preset.profile and self._preset.profile.is_greeter_supported() else None,\n\t\t\tenabled=self._preset.profile.is_greeter_supported() if self._preset.profile else False\n\t\t)\n\n\tdef run(self, allow_reset: bool = True) -> Optional[ProfileConfiguration]:\n\t\tsuper().run(allow_reset=allow_reset)\n\n\t\tif self._data_store.get('profile', None):\n\t\t\treturn ProfileConfiguration(\n\t\t\t\tself._menu_options['profile'].current_selection,\n\t\t\t\tself._menu_options['gfx_driver'].current_selection,\n\t\t\t\tself._menu_options['greeter'].current_selection\n\t\t\t)\n\n\t\treturn None\n\n\tdef _select_profile(self, preset: Optional[Profile]) -> Optional[Profile]:\n\t\tprofile = select_profile(preset)\n\t\tif profile is not None:\n\t\t\tif not profile.is_graphic_driver_supported():\n\t\t\t\tself._menu_options['gfx_driver'].set_enabled(False)\n\t\t\t\tself._menu_options['gfx_driver'].set_current_selection(None)\n\t\t\telse:\n\t\t\t\tself._menu_options['gfx_driver'].set_enabled(True)\n\t\t\t\tself._menu_options['gfx_driver'].set_current_selection(GfxDriver.AllOpenSource)\n\n\t\t\tif not profile.is_greeter_supported():\n\t\t\t\tself._menu_options['greeter'].set_enabled(False)\n\t\t\t\tself._menu_options['greeter'].set_current_selection(None)\n\t\t\telse:\n\t\t\t\tself._menu_options['greeter'].set_enabled(True)\n\t\t\t\tself._menu_options['greeter'].set_current_selection(profile.default_greeter_type)\n\t\telse:\n\t\t\tself._menu_options['gfx_driver'].set_current_selection(None)\n\t\t\tself._menu_options['greeter'].set_current_selection(None)\n\n\t\treturn profile\n\n\tdef _select_gfx_driver(self, preset: Optional[GfxDriver] = None) -> Optional[GfxDriver]:\n\t\tdriver = preset\n\t\tprofile: Optional[Profile] = self._menu_options['profile'].current_selection\n\n\t\tif profile:\n\t\t\tif profile.is_graphic_driver_supported():\n\t\t\t\tdriver = select_driver(current_value=preset)\n\n\t\t\tif driver and 'Sway' in profile.current_selection_names():\n\t\t\t\tif driver.is_nvidia():\n\t\t\t\t\tprompt = str(_('The proprietary Nvidia driver is not supported by Sway. It is likely that you will run into issues, are you okay with that?'))\n\t\t\t\t\tchoice = Menu(prompt, Menu.yes_no(), default_option=Menu.no(), skip=False).run()\n\n\t\t\t\t\tif choice.value == Menu.no():\n\t\t\t\t\t\treturn None\n\n\t\treturn driver\n\n\tdef _preview_profile(self) -> Optional[str]:\n\t\tprofile: Optional[Profile] = self._menu_options['profile'].current_selection\n\n\t\tif profile:\n\t\t\tnames = profile.current_selection_names()\n\t\t\treturn '\\n'.join(names)\n\n\t\treturn None\n\n\ndef select_greeter(\n\tprofile: Optional[Profile] = None,\n\tpreset: Optional[GreeterType] = None\n) -> Optional[GreeterType]:\n\tif not profile or profile.is_greeter_supported():\n\t\ttitle = str(_('Please chose which greeter to install'))\n\t\tgreeter_options = [greeter.value for greeter in GreeterType]\n\n\t\tdefault: Optional[GreeterType] = None\n\n\t\tif preset is not None:\n\t\t\tdefault = preset\n\t\telif profile is not None:\n\t\t\tdefault_greeter = profile.default_greeter_type\n\t\t\tdefault = default_greeter if default_greeter else None\n\n\t\tchoice = Menu(\n\t\t\ttitle,\n\t\t\tgreeter_options,\n\t\t\tskip=True,\n\t\t\tdefault_option=default.value if default else None\n\t\t).run()\n\n\t\tmatch choice.type_:\n\t\t\tcase MenuSelectionType.Skip:\n\t\t\t\treturn default\n\n\t\treturn GreeterType(choice.single_value)\n\n\treturn None\n\n\ndef select_profile(\n\tcurrent_profile: Optional[Profile] = None,\n\ttitle: Optional[str] = None,\n\tallow_reset: bool = True,\n\tmulti: bool = False\n) -> Optional[Profile]:\n\tfrom archinstall.lib.profile.profiles_handler import profile_handler\n\ttop_level_profiles = profile_handler.get_top_level_profiles()\n\n\tdisplay_title = title\n\tif not display_title:\n\t\tdisplay_title = str(_('This is a list of pre-programmed default_profiles'))\n\n\tchoice = profile_handler.select_profile(\n\t\ttop_level_profiles,\n\t\tcurrent_profile=current_profile,\n\t\ttitle=display_title,\n\t\tallow_reset=allow_reset,\n\t\tmulti=multi\n\t)\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Selection:\n\t\t\tprofile_selection: Profile = choice.single_value\n\t\t\tselect_result = profile_selection.do_on_select()\n\n\t\t\tif not select_result:\n\t\t\t\treturn select_profile(\n\t\t\t\t\tcurrent_profile=current_profile,\n\t\t\t\t\ttitle=title,\n\t\t\t\t\tallow_reset=allow_reset,\n\t\t\t\t\tmulti=multi\n\t\t\t\t)\n\n\t\t\t# we're going to reset the currently selected profile(s) to avoid\n\t\t\t# any stale data laying around\n\t\t\tmatch select_result:\n\t\t\t\tcase select_result.NewSelection:\n\t\t\t\t\tprofile_handler.reset_top_level_profiles(exclude=[profile_selection])\n\t\t\t\t\tcurrent_profile = profile_selection\n\t\t\t\tcase select_result.ResetCurrent:\n\t\t\t\t\tprofile_handler.reset_top_level_profiles()\n\t\t\t\t\tcurrent_profile = None\n\t\t\t\tcase select_result.SameSelection:\n\t\t\t\t\tpass\n\n\t\t\treturn current_profile\n\t\tcase MenuSelectionType.Reset:\n\t\t\treturn None\n\t\tcase MenuSelectionType.Skip:\n\t\t\treturn current_profile\n", "path": "archinstall/lib/profile/profile_menu.py"}]}
2,376
117
gh_patches_debug_16387
rasdani/github-patches
git_diff
python__python-docs-es-106
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Revisar los `:role:!key` Cuando hicimos la migración #27 aceptamos `:role:!key` como `:role:key`. La única diferencia entre ellos es que el que tiene `!` no hace un link a la referencia. Tenemos que revisar que queden consistentes nuevamente. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conf.py` Content: ``` 1 # Sphinx configuration file. 2 # 3 # - import original configurations from cpython/Doc/conf.py 4 # - append the path considering the cpython submodule is at ./cpython 5 # - create the symbolic links under ./cpython/locale/es/LC_MESSAGES 6 # - make the build to work under Read the Docs 7 # 8 # The git submodule was created using this Stack Overflow answer 9 # to fetch only the commit that I needed and avoid clonning the whole history 10 # https://stackoverflow.com/a/27445058 11 # 12 # This can be built locally using `sphinx-build` by running 13 # 14 # $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html 15 16 import sys, os, time 17 sys.path.append(os.path.abspath('cpython/Doc/tools/extensions')) 18 sys.path.append(os.path.abspath('cpython/Doc/includes')) 19 20 # Import all the Sphinx settings from cpython 21 sys.path.append(os.path.abspath('cpython/Doc')) 22 from conf import * 23 24 # Call patchlevel with the proper path to get the version from 25 # instead of hardcoding it 26 import patchlevel 27 version, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc')) 28 29 project = 'Python en Español' 30 copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y') 31 32 html_theme_path = ['cpython/Doc/tools'] 33 templates_path = ['cpython/Doc/tools/templates'] 34 html_static_path = ['cpython/Doc/tools/static'] 35 36 os.system('mkdir -p cpython/locales/es/') 37 os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES') 38 39 40 if not os.environ.get('SPHINX_GETTEXT') == 'True': 41 # Override all the files from ``.overrides`` directory 42 import glob 43 for root, dirs, files in os.walk('.overrides'): 44 for fname in files: 45 if fname == 'README.rst' and root == '.overrides': 46 continue 47 destroot = root.replace('.overrides', '').lstrip('/') 48 outputdir = os.path.join( 49 'cpython', 50 'Doc', 51 destroot, 52 fname, 53 ) 54 os.system(f'ln -nfs `pwd`/{root}/{fname} {outputdir}') 55 56 gettext_compact = False 57 locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir 58 59 60 # NOTE: Read the Docs does not support "multi document output". 61 # So, we put all the documentation as a single file for now. 62 _stdauthor = r'Guido van Rossum\\and the Python development team' 63 latex_documents = [ 64 ('contents', 'python-docs-es.tex', u'Documentación de Python en Español', 65 _stdauthor, 'manual'), 66 ] 67 68 def setup(app): 69 70 def add_contributing_banner(app, doctree): 71 """ 72 Insert a banner at the top of the index. 73 74 This way, we can easily communicate people to help with the translation, 75 pointing them to different resources. 76 """ 77 78 if app.builder.format != 'html': 79 # Do not include the banner when building with other formats 80 # (this is useful when using -b gettext) 81 return 82 83 from docutils import nodes, core 84 85 message = '¡Ayúdanos a traducir la documentación oficial de Python al Español! ' \ 86 f'Puedes encontrar más información en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \ 87 'Ayuda a acercar Python a más personas de habla hispana.' 88 89 paragraph = core.publish_doctree(message)[0] 90 banner = nodes.warning(ids=['contributing-banner']) 91 banner.append(paragraph) 92 93 for document in doctree.traverse(nodes.document): 94 document.insert(0, banner) 95 96 # Change the sourcedir programmatically because Read the Docs always call it with `.` 97 app.srcdir = 'cpython/Doc' 98 99 app.connect('doctree-read', add_contributing_banner) 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conf.py b/conf.py --- a/conf.py +++ b/conf.py @@ -11,7 +11,7 @@ # # This can be built locally using `sphinx-build` by running # -# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html +# $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html import sys, os, time sys.path.append(os.path.abspath('cpython/Doc/tools/extensions')) @@ -37,6 +37,12 @@ os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES') +exclude_patterns = [ + # This file is not included and it not marked as :orphan: + 'distutils/_setuptools_disclaimer.rst', + 'README.rst', +] + if not os.environ.get('SPHINX_GETTEXT') == 'True': # Override all the files from ``.overrides`` directory import glob
{"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -11,7 +11,7 @@\n #\n # This can be built locally using `sphinx-build` by running\n #\n-# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html\n+# $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html\n \n import sys, os, time\n sys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\n@@ -37,6 +37,12 @@\n os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n \n \n+exclude_patterns = [\n+ # This file is not included and it not marked as :orphan:\n+ 'distutils/_setuptools_disclaimer.rst',\n+ 'README.rst',\n+]\n+\n if not os.environ.get('SPHINX_GETTEXT') == 'True':\n # Override all the files from ``.overrides`` directory\n import glob\n", "issue": "Revisar los `:role:!key`\nCuando hicimos la migraci\u00f3n #27 aceptamos `:role:!key` como `:role:key`.\r\n\r\nLa \u00fanica diferencia entre ellos es que el que tiene `!` no hace un link a la referencia.\r\n\r\nTenemos que revisar que queden consistentes nuevamente.\n", "before_files": [{"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\n# Call patchlevel with the proper path to get the version from\n# instead of hardcoding it\nimport patchlevel\nversion, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc'))\n\nproject = 'Python en Espa\u00f1ol'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n\n\nif not os.environ.get('SPHINX_GETTEXT') == 'True':\n # Override all the files from ``.overrides`` directory\n import glob\n for root, dirs, files in os.walk('.overrides'):\n for fname in files:\n if fname == 'README.rst' and root == '.overrides':\n continue\n destroot = root.replace('.overrides', '').lstrip('/')\n outputdir = os.path.join(\n 'cpython',\n 'Doc',\n destroot,\n fname,\n )\n os.system(f'ln -nfs `pwd`/{root}/{fname} {outputdir}')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\n\n# NOTE: Read the Docs does not support \"multi document output\".\n# So, we put all the documentation as a single file for now.\n_stdauthor = r'Guido van Rossum\\\\and the Python development team'\nlatex_documents = [\n ('contents', 'python-docs-es.tex', u'Documentaci\u00f3n de Python en Espa\u00f1ol',\n _stdauthor, 'manual'),\n]\n\ndef setup(app):\n\n def add_contributing_banner(app, doctree):\n \"\"\"\n Insert a banner at the top of the index.\n\n This way, we can easily communicate people to help with the translation,\n pointing them to different resources.\n \"\"\"\n\n if app.builder.format != 'html':\n # Do not include the banner when building with other formats\n # (this is useful when using -b gettext)\n return\n\n from docutils import nodes, core\n\n message = '\u00a1Ay\u00fadanos a traducir la documentaci\u00f3n oficial de Python al Espa\u00f1ol! ' \\\n f'Puedes encontrar m\u00e1s informaci\u00f3n en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \\\n 'Ayuda a acercar Python a m\u00e1s personas de habla hispana.'\n\n paragraph = core.publish_doctree(message)[0]\n banner = nodes.warning(ids=['contributing-banner'])\n banner.append(paragraph)\n\n for document in doctree.traverse(nodes.document):\n document.insert(0, banner)\n\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n\n app.connect('doctree-read', add_contributing_banner)\n", "path": "conf.py"}], "after_files": [{"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\n# Call patchlevel with the proper path to get the version from\n# instead of hardcoding it\nimport patchlevel\nversion, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc'))\n\nproject = 'Python en Espa\u00f1ol'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n\n\nexclude_patterns = [\n # This file is not included and it not marked as :orphan:\n 'distutils/_setuptools_disclaimer.rst',\n 'README.rst',\n]\n\nif not os.environ.get('SPHINX_GETTEXT') == 'True':\n # Override all the files from ``.overrides`` directory\n import glob\n for root, dirs, files in os.walk('.overrides'):\n for fname in files:\n if fname == 'README.rst' and root == '.overrides':\n continue\n destroot = root.replace('.overrides', '').lstrip('/')\n outputdir = os.path.join(\n 'cpython',\n 'Doc',\n destroot,\n fname,\n )\n os.system(f'ln -nfs `pwd`/{root}/{fname} {outputdir}')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\n\n# NOTE: Read the Docs does not support \"multi document output\".\n# So, we put all the documentation as a single file for now.\n_stdauthor = r'Guido van Rossum\\\\and the Python development team'\nlatex_documents = [\n ('contents', 'python-docs-es.tex', u'Documentaci\u00f3n de Python en Espa\u00f1ol',\n _stdauthor, 'manual'),\n]\n\ndef setup(app):\n\n def add_contributing_banner(app, doctree):\n \"\"\"\n Insert a banner at the top of the index.\n\n This way, we can easily communicate people to help with the translation,\n pointing them to different resources.\n \"\"\"\n\n if app.builder.format != 'html':\n # Do not include the banner when building with other formats\n # (this is useful when using -b gettext)\n return\n\n from docutils import nodes, core\n\n message = '\u00a1Ay\u00fadanos a traducir la documentaci\u00f3n oficial de Python al Espa\u00f1ol! ' \\\n f'Puedes encontrar m\u00e1s informaci\u00f3n en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \\\n 'Ayuda a acercar Python a m\u00e1s personas de habla hispana.'\n\n paragraph = core.publish_doctree(message)[0]\n banner = nodes.warning(ids=['contributing-banner'])\n banner.append(paragraph)\n\n for document in doctree.traverse(nodes.document):\n document.insert(0, banner)\n\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n\n app.connect('doctree-read', add_contributing_banner)\n", "path": "conf.py"}]}
1,395
237
gh_patches_debug_30419
rasdani/github-patches
git_diff
uccser__cs-unplugged-731
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Implement automatic update of .po file The .po file needs to be updated with all static strings for translation, by running the `python manage.py makemessages`. This needs to be run upon any change to templates or database content. This process should be automated on Travis to run when any such files are updated on `develop`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py` Content: ``` 1 """Script to print list of file paths of all completely translated files for a given language.""" 2 3 import os 4 import argparse 5 6 from crowdin_bot import api 7 8 SOURCE_LANGUAGE = "en" 9 10 def get_language_info(language): 11 """Get xml tree from language info api call. 12 13 Args: 14 language: (str) crowdin language code 15 16 Returns: 17 lxml.etree object 18 """ 19 return api.api_call_xml( 20 "language-status", 21 language=language 22 ) 23 24 def process_item(item, parent_path=None, csu_language_code=None): 25 """Return list of completely translated file paths in a given directory tree node. 26 27 Args: 28 item: (etree.Element): itemm node in language-status xml tree 29 (see https://support.crowdin.com/api/language-status/) 30 parent_path: (str) path to the translated file node (None if the current item is 31 the root of the directory tree). 32 csu_language_code: (str) Language code (in locale format) on CSU end 33 (may differ from crowdin language code according to language mapping 34 in yaml file) 35 36 Returns: 37 (list) list of file paths that are completely translated 38 """ 39 if item.find("node_type").text == "file": 40 filename = item.find("name").text 41 if parent_path: 42 path = os.path.join(parent_path, filename) 43 else: 44 path = filename 45 46 # Skip full translated check for *.po - they can always be included 47 if filename.endswith(".po"): 48 return [path] 49 50 if item.find("phrases").text == item.find("approved").text: 51 return [path] 52 else: 53 return [] 54 55 else: 56 inner_nodes = item.find("files") 57 dirname = item.find("name").text 58 if dirname == SOURCE_LANGUAGE: 59 dirname = csu_language_code 60 if parent_path: 61 path = os.path.join(parent_path, dirname) 62 else: 63 path = dirname 64 completed = [] 65 for inner_node in inner_nodes: 66 completed += process_item(inner_node, parent_path=path, csu_language_code=csu_language_code) 67 return completed 68 69 70 if __name__ == "__main__": 71 parser = argparse.ArgumentParser() 72 parser.add_argument('--crowdin-code', required=True, 73 help='Crowdin language code for target language') 74 parser.add_argument('--csu-code', required=True, 75 help='CSU language code for target language') 76 args = parser.parse_args() 77 lang_info = get_language_info(args.crowdin_code) 78 files = lang_info.find("files") 79 completed = [] 80 for item in files: 81 completed += process_item(item, csu_language_code=args.csu_code) 82 print('\n'.join(completed)) 83 ``` Path: `infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py` Content: ``` 1 """Script to print list of all crowdin language codes for project.""" 2 3 from crowdin_bot import api 4 5 NS_DICT = { 6 'ns': "urn:oasis:names:tc:xliff:document:1.2" 7 } 8 9 def get_project_languages(): 10 """Get list of crowdin language codes. 11 12 Returns: 13 (list) list of project crowdin language codes 14 """ 15 info_xml = api.api_call_xml("info") 16 languages = info_xml.find('languages') 17 translatable_languages = [] 18 for language in languages: 19 # Check it's not the incontext pseudo language 20 if language.find("can_translate").text == "1": 21 translatable_languages.append(language.find('code').text) 22 return translatable_languages 23 24 if __name__ == "__main__": 25 print('\n'.join(get_project_languages())) 26 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py --- a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py +++ b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py @@ -43,9 +43,9 @@ else: path = filename - # Skip full translated check for *.po - they can always be included + # Skip *.po - they are handled separately if filename.endswith(".po"): - return [path] + return [] if item.find("phrases").text == item.find("approved").text: return [path] @@ -79,4 +79,5 @@ completed = [] for item in files: completed += process_item(item, csu_language_code=args.csu_code) - print('\n'.join(completed)) + for path in completed: + print(path) diff --git a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py --- a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py +++ b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py @@ -12,14 +12,14 @@ Returns: (list) list of project crowdin language codes """ - info_xml = api.api_call_xml("info") - languages = info_xml.find('languages') - translatable_languages = [] - for language in languages: - # Check it's not the incontext pseudo language - if language.find("can_translate").text == "1": - translatable_languages.append(language.find('code').text) - return translatable_languages + active_languages = [] + trans_status = api.api_call_json("status") + for language in trans_status: + # Check language has actually had some translation done + if int(language["words_approved"]) > 0: + active_languages.append(language["code"]) + return active_languages if __name__ == "__main__": - print('\n'.join(get_project_languages())) + for language in get_project_languages(): + print(language)
{"golden_diff": "diff --git a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py\n--- a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py\n+++ b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py\n@@ -43,9 +43,9 @@\n else:\n path = filename\n \n- # Skip full translated check for *.po - they can always be included\n+ # Skip *.po - they are handled separately\n if filename.endswith(\".po\"):\n- return [path]\n+ return []\n \n if item.find(\"phrases\").text == item.find(\"approved\").text:\n return [path]\n@@ -79,4 +79,5 @@\n completed = []\n for item in files:\n completed += process_item(item, csu_language_code=args.csu_code)\n- print('\\n'.join(completed))\n+ for path in completed:\n+ print(path)\ndiff --git a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py\n--- a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py\n+++ b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py\n@@ -12,14 +12,14 @@\n Returns:\n (list) list of project crowdin language codes\n \"\"\"\n- info_xml = api.api_call_xml(\"info\")\n- languages = info_xml.find('languages')\n- translatable_languages = []\n- for language in languages:\n- # Check it's not the incontext pseudo language\n- if language.find(\"can_translate\").text == \"1\":\n- translatable_languages.append(language.find('code').text)\n- return translatable_languages\n+ active_languages = []\n+ trans_status = api.api_call_json(\"status\")\n+ for language in trans_status:\n+ # Check language has actually had some translation done\n+ if int(language[\"words_approved\"]) > 0:\n+ active_languages.append(language[\"code\"])\n+ return active_languages\n \n if __name__ == \"__main__\":\n- print('\\n'.join(get_project_languages()))\n+ for language in get_project_languages():\n+ print(language)\n", "issue": "Implement automatic update of .po file \nThe .po file needs to be updated with all static strings for translation, by running the `python manage.py makemessages`. This needs to be run upon any change to templates or database content.\r\n\r\nThis process should be automated on Travis to run when any such files are updated on `develop`.\n", "before_files": [{"content": "\"\"\"Script to print list of file paths of all completely translated files for a given language.\"\"\"\n\nimport os\nimport argparse\n\nfrom crowdin_bot import api\n\nSOURCE_LANGUAGE = \"en\"\n\ndef get_language_info(language):\n \"\"\"Get xml tree from language info api call.\n\n Args:\n language: (str) crowdin language code\n\n Returns:\n lxml.etree object\n \"\"\"\n return api.api_call_xml(\n \"language-status\",\n language=language\n )\n\ndef process_item(item, parent_path=None, csu_language_code=None):\n \"\"\"Return list of completely translated file paths in a given directory tree node.\n\n Args:\n item: (etree.Element): itemm node in language-status xml tree\n (see https://support.crowdin.com/api/language-status/)\n parent_path: (str) path to the translated file node (None if the current item is\n the root of the directory tree).\n csu_language_code: (str) Language code (in locale format) on CSU end\n (may differ from crowdin language code according to language mapping\n in yaml file)\n\n Returns:\n (list) list of file paths that are completely translated\n \"\"\"\n if item.find(\"node_type\").text == \"file\":\n filename = item.find(\"name\").text\n if parent_path:\n path = os.path.join(parent_path, filename)\n else:\n path = filename\n\n # Skip full translated check for *.po - they can always be included\n if filename.endswith(\".po\"):\n return [path]\n\n if item.find(\"phrases\").text == item.find(\"approved\").text:\n return [path]\n else:\n return []\n\n else:\n inner_nodes = item.find(\"files\")\n dirname = item.find(\"name\").text\n if dirname == SOURCE_LANGUAGE:\n dirname = csu_language_code\n if parent_path:\n path = os.path.join(parent_path, dirname)\n else:\n path = dirname\n completed = []\n for inner_node in inner_nodes:\n completed += process_item(inner_node, parent_path=path, csu_language_code=csu_language_code)\n return completed\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--crowdin-code', required=True,\n help='Crowdin language code for target language')\n parser.add_argument('--csu-code', required=True,\n help='CSU language code for target language')\n args = parser.parse_args()\n lang_info = get_language_info(args.crowdin_code)\n files = lang_info.find(\"files\")\n completed = []\n for item in files:\n completed += process_item(item, csu_language_code=args.csu_code)\n print('\\n'.join(completed))\n", "path": "infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py"}, {"content": "\"\"\"Script to print list of all crowdin language codes for project.\"\"\"\n\nfrom crowdin_bot import api\n\nNS_DICT = {\n 'ns': \"urn:oasis:names:tc:xliff:document:1.2\"\n}\n\ndef get_project_languages():\n \"\"\"Get list of crowdin language codes.\n\n Returns:\n (list) list of project crowdin language codes\n \"\"\"\n info_xml = api.api_call_xml(\"info\")\n languages = info_xml.find('languages')\n translatable_languages = []\n for language in languages:\n # Check it's not the incontext pseudo language\n if language.find(\"can_translate\").text == \"1\":\n translatable_languages.append(language.find('code').text)\n return translatable_languages\n\nif __name__ == \"__main__\":\n print('\\n'.join(get_project_languages()))\n", "path": "infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py"}], "after_files": [{"content": "\"\"\"Script to print list of file paths of all completely translated files for a given language.\"\"\"\n\nimport os\nimport argparse\n\nfrom crowdin_bot import api\n\nSOURCE_LANGUAGE = \"en\"\n\ndef get_language_info(language):\n \"\"\"Get xml tree from language info api call.\n\n Args:\n language: (str) crowdin language code\n\n Returns:\n lxml.etree object\n \"\"\"\n return api.api_call_xml(\n \"language-status\",\n language=language\n )\n\ndef process_item(item, parent_path=None, csu_language_code=None):\n \"\"\"Return list of completely translated file paths in a given directory tree node.\n\n Args:\n item: (etree.Element): itemm node in language-status xml tree\n (see https://support.crowdin.com/api/language-status/)\n parent_path: (str) path to the translated file node (None if the current item is\n the root of the directory tree).\n csu_language_code: (str) Language code (in locale format) on CSU end\n (may differ from crowdin language code according to language mapping\n in yaml file)\n\n Returns:\n (list) list of file paths that are completely translated\n \"\"\"\n if item.find(\"node_type\").text == \"file\":\n filename = item.find(\"name\").text\n if parent_path:\n path = os.path.join(parent_path, filename)\n else:\n path = filename\n\n # Skip *.po - they are handled separately\n if filename.endswith(\".po\"):\n return []\n\n if item.find(\"phrases\").text == item.find(\"approved\").text:\n return [path]\n else:\n return []\n\n else:\n inner_nodes = item.find(\"files\")\n dirname = item.find(\"name\").text\n if dirname == SOURCE_LANGUAGE:\n dirname = csu_language_code\n if parent_path:\n path = os.path.join(parent_path, dirname)\n else:\n path = dirname\n completed = []\n for inner_node in inner_nodes:\n completed += process_item(inner_node, parent_path=path, csu_language_code=csu_language_code)\n return completed\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--crowdin-code', required=True,\n help='Crowdin language code for target language')\n parser.add_argument('--csu-code', required=True,\n help='CSU language code for target language')\n args = parser.parse_args()\n lang_info = get_language_info(args.crowdin_code)\n files = lang_info.find(\"files\")\n completed = []\n for item in files:\n completed += process_item(item, csu_language_code=args.csu_code)\n for path in completed:\n print(path)\n", "path": "infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py"}, {"content": "\"\"\"Script to print list of all crowdin language codes for project.\"\"\"\n\nfrom crowdin_bot import api\n\nNS_DICT = {\n 'ns': \"urn:oasis:names:tc:xliff:document:1.2\"\n}\n\ndef get_project_languages():\n \"\"\"Get list of crowdin language codes.\n\n Returns:\n (list) list of project crowdin language codes\n \"\"\"\n active_languages = []\n trans_status = api.api_call_json(\"status\")\n for language in trans_status:\n # Check language has actually had some translation done\n if int(language[\"words_approved\"]) > 0:\n active_languages.append(language[\"code\"])\n return active_languages\n\nif __name__ == \"__main__\":\n for language in get_project_languages():\n print(language)\n", "path": "infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py"}]}
1,341
558
gh_patches_debug_7227
rasdani/github-patches
git_diff
huggingface__huggingface_hub-1629
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AttributeError: type object 'tqdm' has no attribute '_lock' ### Describe the bug Getting a tqdm issue when writing a Dask dataframe to the hub. Similar to huggingface/datasets#6066. Using latest Datasets version doesn't seem to resolve it ### Steps to reproduce the bug This is a minimal reproducer: ``` import dask.dataframe as dd import pandas as pd import random import huggingface_hub data = {"number": [random.randint(0,10) for _ in range(1000)]} df = pd.DataFrame.from_dict(data) dataframe = dd.from_pandas(df, npartitions=1) dataframe = dataframe.repartition(npartitions=2) repo_id = "nielsr/test-dask" repo_path = f"hf://datasets/{repo_id}" huggingface_hub.create_repo(repo_id=repo_id, repo_type="dataset", exist_ok=True) dd.to_parquet(dataframe, path=f"{repo_path}/data") ``` Note: I'm intentionally repartioning the Dask dataframe to 2 partitions, as it does work when only having one partition. ### Expected behavior Would expect to write to the hub without any problem. ### Environment info Datasets version 2.14.4 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/huggingface_hub/utils/tqdm.py` Content: ``` 1 #!/usr/bin/env python 2 # coding=utf-8 3 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License 16 """Utility helpers to handle progress bars in `huggingface_hub`. 17 18 Example: 19 1. Use `huggingface_hub.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`. 20 2. To disable progress bars, either use `disable_progress_bars()` helper or set the 21 environment variable `HF_HUB_DISABLE_PROGRESS_BARS` to 1. 22 3. To re-enable progress bars, use `enable_progress_bars()`. 23 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`. 24 25 NOTE: Environment variable `HF_HUB_DISABLE_PROGRESS_BARS` has the priority. 26 27 Example: 28 ```py 29 from huggingface_hub.utils import ( 30 are_progress_bars_disabled, 31 disable_progress_bars, 32 enable_progress_bars, 33 tqdm, 34 ) 35 36 # Disable progress bars globally 37 disable_progress_bars() 38 39 # Use as normal `tqdm` 40 for _ in tqdm(range(5)): 41 do_something() 42 43 # Still not showing progress bars, as `disable=False` is overwritten to `True`. 44 for _ in tqdm(range(5), disable=False): 45 do_something() 46 47 are_progress_bars_disabled() # True 48 49 # Re-enable progress bars globally 50 enable_progress_bars() 51 52 # Progress bar will be shown ! 53 for _ in tqdm(range(5)): 54 do_something() 55 ``` 56 """ 57 import io 58 import warnings 59 from contextlib import contextmanager 60 from pathlib import Path 61 from typing import Iterator, Optional, Union 62 63 from tqdm.auto import tqdm as old_tqdm 64 65 from ..constants import HF_HUB_DISABLE_PROGRESS_BARS 66 67 68 # `HF_HUB_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_hub_progress_bars_disabled` 69 # is a `bool`. If `HF_HUB_DISABLE_PROGRESS_BARS` is set to True or False, it has priority. 70 # If `HF_HUB_DISABLE_PROGRESS_BARS` is None, it means the user have not set the 71 # environment variable and is free to enable/disable progress bars programmatically. 72 # TL;DR: env variable has priority over code. 73 # 74 # By default, progress bars are enabled. 75 _hf_hub_progress_bars_disabled: bool = HF_HUB_DISABLE_PROGRESS_BARS or False 76 77 78 def disable_progress_bars() -> None: 79 """ 80 Disable globally progress bars used in `huggingface_hub` except if `HF_HUB_DISABLE_PROGRESS_BARS` environment 81 variable has been set. 82 83 Use [`~utils.enable_progress_bars`] to re-enable them. 84 """ 85 if HF_HUB_DISABLE_PROGRESS_BARS is False: 86 warnings.warn( 87 "Cannot disable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=0` is set and has" 88 " priority." 89 ) 90 return 91 global _hf_hub_progress_bars_disabled 92 _hf_hub_progress_bars_disabled = True 93 94 95 def enable_progress_bars() -> None: 96 """ 97 Enable globally progress bars used in `huggingface_hub` except if `HF_HUB_DISABLE_PROGRESS_BARS` environment 98 variable has been set. 99 100 Use [`~utils.disable_progress_bars`] to disable them. 101 """ 102 if HF_HUB_DISABLE_PROGRESS_BARS is True: 103 warnings.warn( 104 "Cannot enable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=1` is set and has" 105 " priority." 106 ) 107 return 108 global _hf_hub_progress_bars_disabled 109 _hf_hub_progress_bars_disabled = False 110 111 112 def are_progress_bars_disabled() -> bool: 113 """Return whether progress bars are globally disabled or not. 114 115 Progress bars used in `huggingface_hub` can be enable or disabled globally using [`~utils.enable_progress_bars`] 116 and [`~utils.disable_progress_bars`] or by setting `HF_HUB_DISABLE_PROGRESS_BARS` as environment variable. 117 """ 118 global _hf_hub_progress_bars_disabled 119 return _hf_hub_progress_bars_disabled 120 121 122 class tqdm(old_tqdm): 123 """ 124 Class to override `disable` argument in case progress bars are globally disabled. 125 126 Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324. 127 """ 128 129 def __init__(self, *args, **kwargs): 130 if are_progress_bars_disabled(): 131 kwargs["disable"] = True 132 super().__init__(*args, **kwargs) 133 134 135 @contextmanager 136 def tqdm_stream_file(path: Union[Path, str]) -> Iterator[io.BufferedReader]: 137 """ 138 Open a file as binary and wrap the `read` method to display a progress bar when it's streamed. 139 140 First implemented in `transformers` in 2019 but removed when switched to git-lfs. Used in `huggingface_hub` to show 141 progress bar when uploading an LFS file to the Hub. See github.com/huggingface/transformers/pull/2078#discussion_r354739608 142 for implementation details. 143 144 Note: currently implementation handles only files stored on disk as it is the most common use case. Could be 145 extended to stream any `BinaryIO` object but we might have to debug some corner cases. 146 147 Example: 148 ```py 149 >>> with tqdm_stream_file("config.json") as f: 150 >>> requests.put(url, data=f) 151 config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s] 152 ``` 153 """ 154 if isinstance(path, str): 155 path = Path(path) 156 157 with path.open("rb") as f: 158 total_size = path.stat().st_size 159 pbar = tqdm( 160 unit="B", 161 unit_scale=True, 162 total=total_size, 163 initial=0, 164 desc=path.name, 165 ) 166 167 f_read = f.read 168 169 def _inner_read(size: Optional[int] = -1) -> bytes: 170 data = f_read(size) 171 pbar.update(len(data)) 172 return data 173 174 f.read = _inner_read # type: ignore 175 176 yield f 177 178 pbar.close() 179 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/huggingface_hub/utils/tqdm.py b/src/huggingface_hub/utils/tqdm.py --- a/src/huggingface_hub/utils/tqdm.py +++ b/src/huggingface_hub/utils/tqdm.py @@ -131,6 +131,14 @@ kwargs["disable"] = True super().__init__(*args, **kwargs) + def __delattr__(self, attr: str) -> None: + """Fix for https://github.com/huggingface/huggingface_hub/issues/1603""" + try: + super().__delattr__(attr) + except AttributeError: + if attr != "_lock": + raise + @contextmanager def tqdm_stream_file(path: Union[Path, str]) -> Iterator[io.BufferedReader]:
{"golden_diff": "diff --git a/src/huggingface_hub/utils/tqdm.py b/src/huggingface_hub/utils/tqdm.py\n--- a/src/huggingface_hub/utils/tqdm.py\n+++ b/src/huggingface_hub/utils/tqdm.py\n@@ -131,6 +131,14 @@\n kwargs[\"disable\"] = True\n super().__init__(*args, **kwargs)\n \n+ def __delattr__(self, attr: str) -> None:\n+ \"\"\"Fix for https://github.com/huggingface/huggingface_hub/issues/1603\"\"\"\n+ try:\n+ super().__delattr__(attr)\n+ except AttributeError:\n+ if attr != \"_lock\":\n+ raise\n+\n \n @contextmanager\n def tqdm_stream_file(path: Union[Path, str]) -> Iterator[io.BufferedReader]:\n", "issue": "AttributeError: type object 'tqdm' has no attribute '_lock'\n### Describe the bug\r\n\r\nGetting a tqdm issue when writing a Dask dataframe to the hub.\r\n\r\nSimilar to huggingface/datasets#6066. Using latest Datasets version doesn't seem to resolve it\r\n\r\n### Steps to reproduce the bug\r\n\r\nThis is a minimal reproducer:\r\n```\r\nimport dask.dataframe as dd\r\nimport pandas as pd\r\nimport random\r\n\r\nimport huggingface_hub\r\n\r\ndata = {\"number\": [random.randint(0,10) for _ in range(1000)]}\r\ndf = pd.DataFrame.from_dict(data)\r\ndataframe = dd.from_pandas(df, npartitions=1)\r\ndataframe = dataframe.repartition(npartitions=2)\r\n\r\nrepo_id = \"nielsr/test-dask\"\r\nrepo_path = f\"hf://datasets/{repo_id}\"\r\nhuggingface_hub.create_repo(repo_id=repo_id, repo_type=\"dataset\", exist_ok=True)\r\ndd.to_parquet(dataframe, path=f\"{repo_path}/data\")\r\n```\r\nNote: I'm intentionally repartioning the Dask dataframe to 2 partitions, as it does work when only having one partition. \r\n\r\n### Expected behavior\r\n\r\nWould expect to write to the hub without any problem.\r\n\r\n### Environment info\r\n\r\nDatasets version 2.14.4\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\"\"\"Utility helpers to handle progress bars in `huggingface_hub`.\n\nExample:\n 1. Use `huggingface_hub.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.\n 2. To disable progress bars, either use `disable_progress_bars()` helper or set the\n environment variable `HF_HUB_DISABLE_PROGRESS_BARS` to 1.\n 3. To re-enable progress bars, use `enable_progress_bars()`.\n 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.\n\nNOTE: Environment variable `HF_HUB_DISABLE_PROGRESS_BARS` has the priority.\n\nExample:\n ```py\n from huggingface_hub.utils import (\n are_progress_bars_disabled,\n disable_progress_bars,\n enable_progress_bars,\n tqdm,\n )\n\n # Disable progress bars globally\n disable_progress_bars()\n\n # Use as normal `tqdm`\n for _ in tqdm(range(5)):\n do_something()\n\n # Still not showing progress bars, as `disable=False` is overwritten to `True`.\n for _ in tqdm(range(5), disable=False):\n do_something()\n\n are_progress_bars_disabled() # True\n\n # Re-enable progress bars globally\n enable_progress_bars()\n\n # Progress bar will be shown !\n for _ in tqdm(range(5)):\n do_something()\n ```\n\"\"\"\nimport io\nimport warnings\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Iterator, Optional, Union\n\nfrom tqdm.auto import tqdm as old_tqdm\n\nfrom ..constants import HF_HUB_DISABLE_PROGRESS_BARS\n\n\n# `HF_HUB_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_hub_progress_bars_disabled`\n# is a `bool`. If `HF_HUB_DISABLE_PROGRESS_BARS` is set to True or False, it has priority.\n# If `HF_HUB_DISABLE_PROGRESS_BARS` is None, it means the user have not set the\n# environment variable and is free to enable/disable progress bars programmatically.\n# TL;DR: env variable has priority over code.\n#\n# By default, progress bars are enabled.\n_hf_hub_progress_bars_disabled: bool = HF_HUB_DISABLE_PROGRESS_BARS or False\n\n\ndef disable_progress_bars() -> None:\n \"\"\"\n Disable globally progress bars used in `huggingface_hub` except if `HF_HUB_DISABLE_PROGRESS_BARS` environment\n variable has been set.\n\n Use [`~utils.enable_progress_bars`] to re-enable them.\n \"\"\"\n if HF_HUB_DISABLE_PROGRESS_BARS is False:\n warnings.warn(\n \"Cannot disable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=0` is set and has\"\n \" priority.\"\n )\n return\n global _hf_hub_progress_bars_disabled\n _hf_hub_progress_bars_disabled = True\n\n\ndef enable_progress_bars() -> None:\n \"\"\"\n Enable globally progress bars used in `huggingface_hub` except if `HF_HUB_DISABLE_PROGRESS_BARS` environment\n variable has been set.\n\n Use [`~utils.disable_progress_bars`] to disable them.\n \"\"\"\n if HF_HUB_DISABLE_PROGRESS_BARS is True:\n warnings.warn(\n \"Cannot enable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=1` is set and has\"\n \" priority.\"\n )\n return\n global _hf_hub_progress_bars_disabled\n _hf_hub_progress_bars_disabled = False\n\n\ndef are_progress_bars_disabled() -> bool:\n \"\"\"Return whether progress bars are globally disabled or not.\n\n Progress bars used in `huggingface_hub` can be enable or disabled globally using [`~utils.enable_progress_bars`]\n and [`~utils.disable_progress_bars`] or by setting `HF_HUB_DISABLE_PROGRESS_BARS` as environment variable.\n \"\"\"\n global _hf_hub_progress_bars_disabled\n return _hf_hub_progress_bars_disabled\n\n\nclass tqdm(old_tqdm):\n \"\"\"\n Class to override `disable` argument in case progress bars are globally disabled.\n\n Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if are_progress_bars_disabled():\n kwargs[\"disable\"] = True\n super().__init__(*args, **kwargs)\n\n\n@contextmanager\ndef tqdm_stream_file(path: Union[Path, str]) -> Iterator[io.BufferedReader]:\n \"\"\"\n Open a file as binary and wrap the `read` method to display a progress bar when it's streamed.\n\n First implemented in `transformers` in 2019 but removed when switched to git-lfs. Used in `huggingface_hub` to show\n progress bar when uploading an LFS file to the Hub. See github.com/huggingface/transformers/pull/2078#discussion_r354739608\n for implementation details.\n\n Note: currently implementation handles only files stored on disk as it is the most common use case. Could be\n extended to stream any `BinaryIO` object but we might have to debug some corner cases.\n\n Example:\n ```py\n >>> with tqdm_stream_file(\"config.json\") as f:\n >>> requests.put(url, data=f)\n config.json: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 8.19k/8.19k [00:02<00:00, 3.72kB/s]\n ```\n \"\"\"\n if isinstance(path, str):\n path = Path(path)\n\n with path.open(\"rb\") as f:\n total_size = path.stat().st_size\n pbar = tqdm(\n unit=\"B\",\n unit_scale=True,\n total=total_size,\n initial=0,\n desc=path.name,\n )\n\n f_read = f.read\n\n def _inner_read(size: Optional[int] = -1) -> bytes:\n data = f_read(size)\n pbar.update(len(data))\n return data\n\n f.read = _inner_read # type: ignore\n\n yield f\n\n pbar.close()\n", "path": "src/huggingface_hub/utils/tqdm.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\"\"\"Utility helpers to handle progress bars in `huggingface_hub`.\n\nExample:\n 1. Use `huggingface_hub.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.\n 2. To disable progress bars, either use `disable_progress_bars()` helper or set the\n environment variable `HF_HUB_DISABLE_PROGRESS_BARS` to 1.\n 3. To re-enable progress bars, use `enable_progress_bars()`.\n 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.\n\nNOTE: Environment variable `HF_HUB_DISABLE_PROGRESS_BARS` has the priority.\n\nExample:\n ```py\n from huggingface_hub.utils import (\n are_progress_bars_disabled,\n disable_progress_bars,\n enable_progress_bars,\n tqdm,\n )\n\n # Disable progress bars globally\n disable_progress_bars()\n\n # Use as normal `tqdm`\n for _ in tqdm(range(5)):\n do_something()\n\n # Still not showing progress bars, as `disable=False` is overwritten to `True`.\n for _ in tqdm(range(5), disable=False):\n do_something()\n\n are_progress_bars_disabled() # True\n\n # Re-enable progress bars globally\n enable_progress_bars()\n\n # Progress bar will be shown !\n for _ in tqdm(range(5)):\n do_something()\n ```\n\"\"\"\nimport io\nimport warnings\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Iterator, Optional, Union\n\nfrom tqdm.auto import tqdm as old_tqdm\n\nfrom ..constants import HF_HUB_DISABLE_PROGRESS_BARS\n\n\n# `HF_HUB_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_hub_progress_bars_disabled`\n# is a `bool`. If `HF_HUB_DISABLE_PROGRESS_BARS` is set to True or False, it has priority.\n# If `HF_HUB_DISABLE_PROGRESS_BARS` is None, it means the user have not set the\n# environment variable and is free to enable/disable progress bars programmatically.\n# TL;DR: env variable has priority over code.\n#\n# By default, progress bars are enabled.\n_hf_hub_progress_bars_disabled: bool = HF_HUB_DISABLE_PROGRESS_BARS or False\n\n\ndef disable_progress_bars() -> None:\n \"\"\"\n Disable globally progress bars used in `huggingface_hub` except if `HF_HUB_DISABLE_PROGRESS_BARS` environment\n variable has been set.\n\n Use [`~utils.enable_progress_bars`] to re-enable them.\n \"\"\"\n if HF_HUB_DISABLE_PROGRESS_BARS is False:\n warnings.warn(\n \"Cannot disable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=0` is set and has\"\n \" priority.\"\n )\n return\n global _hf_hub_progress_bars_disabled\n _hf_hub_progress_bars_disabled = True\n\n\ndef enable_progress_bars() -> None:\n \"\"\"\n Enable globally progress bars used in `huggingface_hub` except if `HF_HUB_DISABLE_PROGRESS_BARS` environment\n variable has been set.\n\n Use [`~utils.disable_progress_bars`] to disable them.\n \"\"\"\n if HF_HUB_DISABLE_PROGRESS_BARS is True:\n warnings.warn(\n \"Cannot enable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=1` is set and has\"\n \" priority.\"\n )\n return\n global _hf_hub_progress_bars_disabled\n _hf_hub_progress_bars_disabled = False\n\n\ndef are_progress_bars_disabled() -> bool:\n \"\"\"Return whether progress bars are globally disabled or not.\n\n Progress bars used in `huggingface_hub` can be enable or disabled globally using [`~utils.enable_progress_bars`]\n and [`~utils.disable_progress_bars`] or by setting `HF_HUB_DISABLE_PROGRESS_BARS` as environment variable.\n \"\"\"\n global _hf_hub_progress_bars_disabled\n return _hf_hub_progress_bars_disabled\n\n\nclass tqdm(old_tqdm):\n \"\"\"\n Class to override `disable` argument in case progress bars are globally disabled.\n\n Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if are_progress_bars_disabled():\n kwargs[\"disable\"] = True\n super().__init__(*args, **kwargs)\n\n def __delattr__(self, attr: str) -> None:\n \"\"\"Fix for https://github.com/huggingface/huggingface_hub/issues/1603\"\"\"\n try:\n super().__delattr__(attr)\n except AttributeError:\n if attr != \"_lock\":\n raise\n\n\n@contextmanager\ndef tqdm_stream_file(path: Union[Path, str]) -> Iterator[io.BufferedReader]:\n \"\"\"\n Open a file as binary and wrap the `read` method to display a progress bar when it's streamed.\n\n First implemented in `transformers` in 2019 but removed when switched to git-lfs. Used in `huggingface_hub` to show\n progress bar when uploading an LFS file to the Hub. See github.com/huggingface/transformers/pull/2078#discussion_r354739608\n for implementation details.\n\n Note: currently implementation handles only files stored on disk as it is the most common use case. Could be\n extended to stream any `BinaryIO` object but we might have to debug some corner cases.\n\n Example:\n ```py\n >>> with tqdm_stream_file(\"config.json\") as f:\n >>> requests.put(url, data=f)\n config.json: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 8.19k/8.19k [00:02<00:00, 3.72kB/s]\n ```\n \"\"\"\n if isinstance(path, str):\n path = Path(path)\n\n with path.open(\"rb\") as f:\n total_size = path.stat().st_size\n pbar = tqdm(\n unit=\"B\",\n unit_scale=True,\n total=total_size,\n initial=0,\n desc=path.name,\n )\n\n f_read = f.read\n\n def _inner_read(size: Optional[int] = -1) -> bytes:\n data = f_read(size)\n pbar.update(len(data))\n return data\n\n f.read = _inner_read # type: ignore\n\n yield f\n\n pbar.close()\n", "path": "src/huggingface_hub/utils/tqdm.py"}]}
2,493
179
gh_patches_debug_3174
rasdani/github-patches
git_diff
pre-commit__pre-commit-1614
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Can't use ruby 2.7.1 on MacOS Hi, Bumping my ruby hooks to version 2.7.1 worked fine for me on Ubuntu but doesn't work for my colleagues using MacOS, is there something to do about bumping rbenv archives ? Thanks --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/make_archives.py` Content: ``` 1 import argparse 2 import os.path 3 import tarfile 4 from typing import Optional 5 from typing import Sequence 6 7 from pre_commit import output 8 from pre_commit.util import cmd_output_b 9 from pre_commit.util import rmtree 10 from pre_commit.util import tmpdir 11 12 13 # This is a script for generating the tarred resources for git repo 14 # dependencies. Currently it's just for "vendoring" ruby support packages. 15 16 17 REPOS = ( 18 ('rbenv', 'git://github.com/rbenv/rbenv', 'a3fa9b7'), 19 ('ruby-build', 'git://github.com/rbenv/ruby-build', '1a902f3'), 20 ( 21 'ruby-download', 22 'git://github.com/garnieretienne/rvm-download', 23 '09bd7c6', 24 ), 25 ) 26 27 28 def make_archive(name: str, repo: str, ref: str, destdir: str) -> str: 29 """Makes an archive of a repository in the given destdir. 30 31 :param text name: Name to give the archive. For instance foo. The file 32 that is created will be called foo.tar.gz. 33 :param text repo: Repository to clone. 34 :param text ref: Tag/SHA/branch to check out. 35 :param text destdir: Directory to place archives in. 36 """ 37 output_path = os.path.join(destdir, f'{name}.tar.gz') 38 with tmpdir() as tempdir: 39 # Clone the repository to the temporary directory 40 cmd_output_b('git', 'clone', repo, tempdir) 41 cmd_output_b('git', 'checkout', ref, cwd=tempdir) 42 43 # We don't want the '.git' directory 44 # It adds a bunch of size to the archive and we don't use it at 45 # runtime 46 rmtree(os.path.join(tempdir, '.git')) 47 48 with tarfile.open(output_path, 'w|gz') as tf: 49 tf.add(tempdir, name) 50 51 return output_path 52 53 54 def main(argv: Optional[Sequence[str]] = None) -> int: 55 parser = argparse.ArgumentParser() 56 parser.add_argument('--dest', default='pre_commit/resources') 57 args = parser.parse_args(argv) 58 for archive_name, repo, ref in REPOS: 59 output.write_line(f'Making {archive_name}.tar.gz for {repo}@{ref}') 60 make_archive(archive_name, repo, ref, args.dest) 61 return 0 62 63 64 if __name__ == '__main__': 65 exit(main()) 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/make_archives.py b/pre_commit/make_archives.py --- a/pre_commit/make_archives.py +++ b/pre_commit/make_archives.py @@ -15,8 +15,8 @@ REPOS = ( - ('rbenv', 'git://github.com/rbenv/rbenv', 'a3fa9b7'), - ('ruby-build', 'git://github.com/rbenv/ruby-build', '1a902f3'), + ('rbenv', 'git://github.com/rbenv/rbenv', '0843745'), + ('ruby-build', 'git://github.com/rbenv/ruby-build', '258455e'), ( 'ruby-download', 'git://github.com/garnieretienne/rvm-download',
{"golden_diff": "diff --git a/pre_commit/make_archives.py b/pre_commit/make_archives.py\n--- a/pre_commit/make_archives.py\n+++ b/pre_commit/make_archives.py\n@@ -15,8 +15,8 @@\n \n \n REPOS = (\n- ('rbenv', 'git://github.com/rbenv/rbenv', 'a3fa9b7'),\n- ('ruby-build', 'git://github.com/rbenv/ruby-build', '1a902f3'),\n+ ('rbenv', 'git://github.com/rbenv/rbenv', '0843745'),\n+ ('ruby-build', 'git://github.com/rbenv/ruby-build', '258455e'),\n (\n 'ruby-download',\n 'git://github.com/garnieretienne/rvm-download',\n", "issue": "Can't use ruby 2.7.1 on MacOS\nHi, \r\n\r\nBumping my ruby hooks to version 2.7.1 worked fine for me on Ubuntu but doesn't work for my colleagues using MacOS, is there something to do about bumping rbenv archives ? \r\n\r\nThanks\n", "before_files": [{"content": "import argparse\nimport os.path\nimport tarfile\nfrom typing import Optional\nfrom typing import Sequence\n\nfrom pre_commit import output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\nfrom pre_commit.util import tmpdir\n\n\n# This is a script for generating the tarred resources for git repo\n# dependencies. Currently it's just for \"vendoring\" ruby support packages.\n\n\nREPOS = (\n ('rbenv', 'git://github.com/rbenv/rbenv', 'a3fa9b7'),\n ('ruby-build', 'git://github.com/rbenv/ruby-build', '1a902f3'),\n (\n 'ruby-download',\n 'git://github.com/garnieretienne/rvm-download',\n '09bd7c6',\n ),\n)\n\n\ndef make_archive(name: str, repo: str, ref: str, destdir: str) -> str:\n \"\"\"Makes an archive of a repository in the given destdir.\n\n :param text name: Name to give the archive. For instance foo. The file\n that is created will be called foo.tar.gz.\n :param text repo: Repository to clone.\n :param text ref: Tag/SHA/branch to check out.\n :param text destdir: Directory to place archives in.\n \"\"\"\n output_path = os.path.join(destdir, f'{name}.tar.gz')\n with tmpdir() as tempdir:\n # Clone the repository to the temporary directory\n cmd_output_b('git', 'clone', repo, tempdir)\n cmd_output_b('git', 'checkout', ref, cwd=tempdir)\n\n # We don't want the '.git' directory\n # It adds a bunch of size to the archive and we don't use it at\n # runtime\n rmtree(os.path.join(tempdir, '.git'))\n\n with tarfile.open(output_path, 'w|gz') as tf:\n tf.add(tempdir, name)\n\n return output_path\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dest', default='pre_commit/resources')\n args = parser.parse_args(argv)\n for archive_name, repo, ref in REPOS:\n output.write_line(f'Making {archive_name}.tar.gz for {repo}@{ref}')\n make_archive(archive_name, repo, ref, args.dest)\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/make_archives.py"}], "after_files": [{"content": "import argparse\nimport os.path\nimport tarfile\nfrom typing import Optional\nfrom typing import Sequence\n\nfrom pre_commit import output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\nfrom pre_commit.util import tmpdir\n\n\n# This is a script for generating the tarred resources for git repo\n# dependencies. Currently it's just for \"vendoring\" ruby support packages.\n\n\nREPOS = (\n ('rbenv', 'git://github.com/rbenv/rbenv', '0843745'),\n ('ruby-build', 'git://github.com/rbenv/ruby-build', '258455e'),\n (\n 'ruby-download',\n 'git://github.com/garnieretienne/rvm-download',\n '09bd7c6',\n ),\n)\n\n\ndef make_archive(name: str, repo: str, ref: str, destdir: str) -> str:\n \"\"\"Makes an archive of a repository in the given destdir.\n\n :param text name: Name to give the archive. For instance foo. The file\n that is created will be called foo.tar.gz.\n :param text repo: Repository to clone.\n :param text ref: Tag/SHA/branch to check out.\n :param text destdir: Directory to place archives in.\n \"\"\"\n output_path = os.path.join(destdir, f'{name}.tar.gz')\n with tmpdir() as tempdir:\n # Clone the repository to the temporary directory\n cmd_output_b('git', 'clone', repo, tempdir)\n cmd_output_b('git', 'checkout', ref, cwd=tempdir)\n\n # We don't want the '.git' directory\n # It adds a bunch of size to the archive and we don't use it at\n # runtime\n rmtree(os.path.join(tempdir, '.git'))\n\n with tarfile.open(output_path, 'w|gz') as tf:\n tf.add(tempdir, name)\n\n return output_path\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dest', default='pre_commit/resources')\n args = parser.parse_args(argv)\n for archive_name, repo, ref in REPOS:\n output.write_line(f'Making {archive_name}.tar.gz for {repo}@{ref}')\n make_archive(archive_name, repo, ref, args.dest)\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/make_archives.py"}]}
994
186
gh_patches_debug_6635
rasdani/github-patches
git_diff
voxel51__fiftyone-252
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docs table of contents spacing issue on small screens On small screens, the table of contents has too much space below it; it should be tight with the bottom of the contents so that the main content is visible: <img width="717" alt="Screen Shot 2020-07-15 at 3 45 52 PM" src="https://user-images.githubusercontent.com/25985824/87589202-e2d04600-c6b2-11ea-8f24-d3e14ec4cc7e.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/conf.py` Content: ``` 1 """ 2 Sphinx configuration file. 3 4 For a full list of available options, see: 5 https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 | Copyright 2017-2020, Voxel51, Inc. 8 | `voxel51.com <https://voxel51.com/>`_ 9 | 10 """ 11 12 import fiftyone.constants as foc 13 14 15 # -- Path setup -------------------------------------------------------------- 16 17 # If extensions (or modules to document with autodoc) are in another directory, 18 # add these directories to sys.path here. If the directory is relative to the 19 # documentation root, use os.path.abspath to make it absolute, like shown here. 20 # 21 22 # -- Project information ----------------------------------------------------- 23 24 project = "FiftyOne" 25 copyright = foc.COPYRIGHT 26 author = foc.AUTHOR 27 release = foc.VERSION 28 29 30 # -- General configuration --------------------------------------------------- 31 32 # Add any Sphinx extension module names here, as strings. They can be 33 # extensions coming with Sphinx (named "sphinx.ext.*") or your custom 34 # ones. 35 extensions = [ 36 "sphinx.ext.autodoc", 37 "sphinx.ext.intersphinx", 38 "sphinx.ext.napoleon", 39 "sphinx.ext.autosectionlabel", 40 "m2r", 41 "nbsphinx", 42 "sphinx_tabs.tabs", 43 "sphinx_copybutton", 44 ] 45 46 # Types of class members to generate documentation for. 47 autodoc_default_options = {"members": True, "inherited-members": True} 48 autodoc_inherit_docstrings = True 49 autodoc_member_order = "bysource" 50 autoclass_content = "class" 51 52 # Add any paths that contain templates here, relative to this directory. 53 templates_path = ["_templates"] 54 55 # The suffix(es) of source filenames. 56 # You can specify multiple suffix as a list of strings. 57 source_suffix = [".rst", ".md"] 58 59 # Parse relative links to MD files into ref and doc directives. 60 m2r_parse_relative_links = True 61 62 # List of patterns, relative to source directory, that match files and 63 # directories to ignore when looking for source files. 64 # This pattern also affects html_static_path and html_extra_path. 65 exclude_patterns = [] 66 67 # Disable nbshinx loading require.js - this breaks the pytorch theme's 68 # scrolling handling, and we don't appear to have any notebook content that 69 # requires it 70 nbsphinx_requirejs_path = "" 71 72 # Adds a link to download the notebook to the built HTML 73 nbsphinx_prolog = """ 74 75 .. note:: 76 77 Download notebook: 78 :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>` 79 80 """ 81 82 # -- Options for HTML output ------------------------------------------------- 83 84 # The theme to use for HTML and HTML Help pages. See the documentation for 85 # a list of builtin themes. 86 # 87 html_theme = "pytorch_sphinx_theme" 88 html_theme_path = ["../theme"] 89 html_theme_options = { 90 "pytorch_project": "docs", 91 } 92 93 # Add any paths that contain custom static files (such as style sheets) here, 94 # relative to this directory. They are copied after the builtin static files, 95 # so a file named "default.css" will overwrite the builtin "default.css". 96 html_static_path = ["_static"] 97 98 # These paths are either relative to html_static_path 99 # or fully qualified paths (eg. https://...) 100 html_css_files = ["css/voxel51-website.css", "css/custom.css"] 101 html_js_files = ["js/voxel51-website.js", "js/custom.js"] 102 103 html_context = { 104 "address_main_line1": "410 N 4th Ave, 3rd Floor", 105 "address_main_line2": "Ann Arbor, MI 48104", 106 "phone_main": "+1 734-489-1134", 107 "email_info": "[email protected]", 108 "link_blog": "https://blog.voxel51.com/", 109 "link_careers": "https://voxel51.com/careers/", 110 "link_contactus": "mailto:[email protected]?subject=[Voxel51]%20Contact%20us", 111 "link_demo": "https://voxel51.com/demo/", 112 "link_docs_fiftyone": "https://voxel51.com/docs/fiftyone/", 113 "link_fiftyone": "https://voxel51.com/fiftyone/", 114 "link_github": "https://github.com/", 115 "link_home": "https://voxel51.com/", 116 "link_linkedin": "https://www.linkedin.com/in/", 117 "link_ourstory": "https://voxel51.com/ourstory/", 118 "link_pdi": "https://pdi.voxel51.com/", 119 "link_platform": "https://voxel51.com/platform/", 120 "link_platform_login": "https://console.voxel51.com/login", 121 "link_press": "https://voxel51.com/press/", 122 "link_privacypolicy": "https://voxel51.com/privacy/", 123 "link_schedulecall": "mailto:[email protected]?subject=[Voxel51]%20Schedule%20a%20call", 124 "link_scheduledemo": "https://meetings.hubspot.com/michael908", 125 "link_scoop_demo": "https://demo.voxel51.com", 126 "link_scoop_login": "https://scoop.voxel51.com/", 127 "link_status": "https://status.voxel51.com/", 128 "link_termsofservice": "https://voxel51.com/terms/", 129 "link_twitter": "https://twitter.com/", 130 "link_usecase_advertising": "https://voxel51.com/usecases/advertising/", 131 "link_usecase_auto": "https://voxel51.com/usecases/automotive/", 132 "link_usecase_research": "https://voxel51.com/usecases/research/", 133 "link_usecases": "https://voxel51.com/usecases/", 134 "link_usecases_entry": "https://voxel51.com/usecases/automotive/", 135 "link_voxel51_facebook": "https://www.facebook.com/voxel51/", 136 "link_voxel51_github": "https://github.com/voxel51/", 137 "link_voxel51_linkedin": "https://www.linkedin.com/company/voxel51/", 138 "link_voxel51_twitter": "https://twitter.com/voxel51", 139 } 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -100,6 +100,10 @@ html_css_files = ["css/voxel51-website.css", "css/custom.css"] html_js_files = ["js/voxel51-website.js", "js/custom.js"] +# Prevent RST source files from being included in output +html_copy_source = False + +# Links - copied from website config html_context = { "address_main_line1": "410 N 4th Ave, 3rd Floor", "address_main_line2": "Ann Arbor, MI 48104",
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -100,6 +100,10 @@\n html_css_files = [\"css/voxel51-website.css\", \"css/custom.css\"]\n html_js_files = [\"js/voxel51-website.js\", \"js/custom.js\"]\n \n+# Prevent RST source files from being included in output\n+html_copy_source = False\n+\n+# Links - copied from website config\n html_context = {\n \"address_main_line1\": \"410 N 4th Ave, 3rd Floor\",\n \"address_main_line2\": \"Ann Arbor, MI 48104\",\n", "issue": "Docs table of contents spacing issue on small screens\nOn small screens, the table of contents has too much space below it; it should be tight with the bottom of the contents so that the main content is visible:\r\n\r\n<img width=\"717\" alt=\"Screen Shot 2020-07-15 at 3 45 52 PM\" src=\"https://user-images.githubusercontent.com/25985824/87589202-e2d04600-c6b2-11ea-8f24-d3e14ec4cc7e.png\">\r\n\n", "before_files": [{"content": "\"\"\"\nSphinx configuration file.\n\nFor a full list of available options, see:\nhttps://www.sphinx-doc.org/en/master/usage/configuration.html\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n\nimport fiftyone.constants as foc\n\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\n# -- Project information -----------------------------------------------------\n\nproject = \"FiftyOne\"\ncopyright = foc.COPYRIGHT\nauthor = foc.AUTHOR\nrelease = foc.VERSION\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosectionlabel\",\n \"m2r\",\n \"nbsphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinx_copybutton\",\n]\n\n# Types of class members to generate documentation for.\nautodoc_default_options = {\"members\": True, \"inherited-members\": True}\nautodoc_inherit_docstrings = True\nautodoc_member_order = \"bysource\"\nautoclass_content = \"class\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of strings.\nsource_suffix = [\".rst\", \".md\"]\n\n# Parse relative links to MD files into ref and doc directives.\nm2r_parse_relative_links = True\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# Disable nbshinx loading require.js - this breaks the pytorch theme's\n# scrolling handling, and we don't appear to have any notebook content that\n# requires it\nnbsphinx_requirejs_path = \"\"\n\n# Adds a link to download the notebook to the built HTML\nnbsphinx_prolog = \"\"\"\n\n.. note::\n\n Download notebook:\n :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`\n\n\"\"\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [\"../theme\"]\nhtml_theme_options = {\n \"pytorch_project\": \"docs\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# These paths are either relative to html_static_path\n# or fully qualified paths (eg. https://...)\nhtml_css_files = [\"css/voxel51-website.css\", \"css/custom.css\"]\nhtml_js_files = [\"js/voxel51-website.js\", \"js/custom.js\"]\n\nhtml_context = {\n \"address_main_line1\": \"410 N 4th Ave, 3rd Floor\",\n \"address_main_line2\": \"Ann Arbor, MI 48104\",\n \"phone_main\": \"+1 734-489-1134\",\n \"email_info\": \"[email protected]\",\n \"link_blog\": \"https://blog.voxel51.com/\",\n \"link_careers\": \"https://voxel51.com/careers/\",\n \"link_contactus\": \"mailto:[email protected]?subject=[Voxel51]%20Contact%20us\",\n \"link_demo\": \"https://voxel51.com/demo/\",\n \"link_docs_fiftyone\": \"https://voxel51.com/docs/fiftyone/\",\n \"link_fiftyone\": \"https://voxel51.com/fiftyone/\",\n \"link_github\": \"https://github.com/\",\n \"link_home\": \"https://voxel51.com/\",\n \"link_linkedin\": \"https://www.linkedin.com/in/\",\n \"link_ourstory\": \"https://voxel51.com/ourstory/\",\n \"link_pdi\": \"https://pdi.voxel51.com/\",\n \"link_platform\": \"https://voxel51.com/platform/\",\n \"link_platform_login\": \"https://console.voxel51.com/login\",\n \"link_press\": \"https://voxel51.com/press/\",\n \"link_privacypolicy\": \"https://voxel51.com/privacy/\",\n \"link_schedulecall\": \"mailto:[email protected]?subject=[Voxel51]%20Schedule%20a%20call\",\n \"link_scheduledemo\": \"https://meetings.hubspot.com/michael908\",\n \"link_scoop_demo\": \"https://demo.voxel51.com\",\n \"link_scoop_login\": \"https://scoop.voxel51.com/\",\n \"link_status\": \"https://status.voxel51.com/\",\n \"link_termsofservice\": \"https://voxel51.com/terms/\",\n \"link_twitter\": \"https://twitter.com/\",\n \"link_usecase_advertising\": \"https://voxel51.com/usecases/advertising/\",\n \"link_usecase_auto\": \"https://voxel51.com/usecases/automotive/\",\n \"link_usecase_research\": \"https://voxel51.com/usecases/research/\",\n \"link_usecases\": \"https://voxel51.com/usecases/\",\n \"link_usecases_entry\": \"https://voxel51.com/usecases/automotive/\",\n \"link_voxel51_facebook\": \"https://www.facebook.com/voxel51/\",\n \"link_voxel51_github\": \"https://github.com/voxel51/\",\n \"link_voxel51_linkedin\": \"https://www.linkedin.com/company/voxel51/\",\n \"link_voxel51_twitter\": \"https://twitter.com/voxel51\",\n}\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "\"\"\"\nSphinx configuration file.\n\nFor a full list of available options, see:\nhttps://www.sphinx-doc.org/en/master/usage/configuration.html\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n\nimport fiftyone.constants as foc\n\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\n# -- Project information -----------------------------------------------------\n\nproject = \"FiftyOne\"\ncopyright = foc.COPYRIGHT\nauthor = foc.AUTHOR\nrelease = foc.VERSION\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosectionlabel\",\n \"m2r\",\n \"nbsphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinx_copybutton\",\n]\n\n# Types of class members to generate documentation for.\nautodoc_default_options = {\"members\": True, \"inherited-members\": True}\nautodoc_inherit_docstrings = True\nautodoc_member_order = \"bysource\"\nautoclass_content = \"class\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of strings.\nsource_suffix = [\".rst\", \".md\"]\n\n# Parse relative links to MD files into ref and doc directives.\nm2r_parse_relative_links = True\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# Disable nbshinx loading require.js - this breaks the pytorch theme's\n# scrolling handling, and we don't appear to have any notebook content that\n# requires it\nnbsphinx_requirejs_path = \"\"\n\n# Adds a link to download the notebook to the built HTML\nnbsphinx_prolog = \"\"\"\n\n.. note::\n\n Download notebook:\n :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`\n\n\"\"\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [\"../theme\"]\nhtml_theme_options = {\n \"pytorch_project\": \"docs\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# These paths are either relative to html_static_path\n# or fully qualified paths (eg. https://...)\nhtml_css_files = [\"css/voxel51-website.css\", \"css/custom.css\"]\nhtml_js_files = [\"js/voxel51-website.js\", \"js/custom.js\"]\n\n# Prevent RST source files from being included in output\nhtml_copy_source = False\n\n# Links - copied from website config\nhtml_context = {\n \"address_main_line1\": \"410 N 4th Ave, 3rd Floor\",\n \"address_main_line2\": \"Ann Arbor, MI 48104\",\n \"phone_main\": \"+1 734-489-1134\",\n \"email_info\": \"[email protected]\",\n \"link_blog\": \"https://blog.voxel51.com/\",\n \"link_careers\": \"https://voxel51.com/careers/\",\n \"link_contactus\": \"mailto:[email protected]?subject=[Voxel51]%20Contact%20us\",\n \"link_demo\": \"https://voxel51.com/demo/\",\n \"link_docs_fiftyone\": \"https://voxel51.com/docs/fiftyone/\",\n \"link_fiftyone\": \"https://voxel51.com/fiftyone/\",\n \"link_github\": \"https://github.com/\",\n \"link_home\": \"https://voxel51.com/\",\n \"link_linkedin\": \"https://www.linkedin.com/in/\",\n \"link_ourstory\": \"https://voxel51.com/ourstory/\",\n \"link_pdi\": \"https://pdi.voxel51.com/\",\n \"link_platform\": \"https://voxel51.com/platform/\",\n \"link_platform_login\": \"https://console.voxel51.com/login\",\n \"link_press\": \"https://voxel51.com/press/\",\n \"link_privacypolicy\": \"https://voxel51.com/privacy/\",\n \"link_schedulecall\": \"mailto:[email protected]?subject=[Voxel51]%20Schedule%20a%20call\",\n \"link_scheduledemo\": \"https://meetings.hubspot.com/michael908\",\n \"link_scoop_demo\": \"https://demo.voxel51.com\",\n \"link_scoop_login\": \"https://scoop.voxel51.com/\",\n \"link_status\": \"https://status.voxel51.com/\",\n \"link_termsofservice\": \"https://voxel51.com/terms/\",\n \"link_twitter\": \"https://twitter.com/\",\n \"link_usecase_advertising\": \"https://voxel51.com/usecases/advertising/\",\n \"link_usecase_auto\": \"https://voxel51.com/usecases/automotive/\",\n \"link_usecase_research\": \"https://voxel51.com/usecases/research/\",\n \"link_usecases\": \"https://voxel51.com/usecases/\",\n \"link_usecases_entry\": \"https://voxel51.com/usecases/automotive/\",\n \"link_voxel51_facebook\": \"https://www.facebook.com/voxel51/\",\n \"link_voxel51_github\": \"https://github.com/voxel51/\",\n \"link_voxel51_linkedin\": \"https://www.linkedin.com/company/voxel51/\",\n \"link_voxel51_twitter\": \"https://twitter.com/voxel51\",\n}\n", "path": "docs/source/conf.py"}]}
2,139
157
gh_patches_debug_9467
rasdani/github-patches
git_diff
aws__aws-cli-4036
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- EKS update-config not working when updating cluster I am not sure if by design. In case it isnt, executing: ``` aws eks update-kubeconfig --name dev-tools --kubeconfig ~/.kube/dev-tools ``` Yields into following the error message: ``` Cluster status not active ``` When the cluster is updating: `"status": "UPDATING",`. **Steps:** 1. create cluster with older version 2. update-cluster to newer version 3. execute `aws eks update-kubeconfig` 4. error ------ After looking into the source code it looks like this is because it only checks for the status "Active". Everything else is not allowed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awscli/customizations/eks/update_kubeconfig.py` Content: ``` 1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 # 7 # http://aws.amazon.com/apache2.0/ 8 # 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 14 import os 15 import logging 16 17 from botocore.compat import OrderedDict 18 19 from awscli.customizations.commands import BasicCommand 20 from awscli.customizations.utils import uni_print 21 from awscli.customizations.eks.exceptions import EKSClusterError 22 from awscli.customizations.eks.kubeconfig import (Kubeconfig, 23 KubeconfigError, 24 KubeconfigLoader, 25 KubeconfigWriter, 26 KubeconfigValidator, 27 KubeconfigAppender) 28 from awscli.customizations.eks.ordered_yaml import ordered_yaml_dump 29 30 LOG = logging.getLogger(__name__) 31 32 DEFAULT_PATH = os.path.expanduser("~/.kube/config") 33 34 # Use the endpoint for kubernetes 1.10 35 # To get the most recent endpoint we will need to 36 # Do a check on the cluster's version number 37 API_VERSION = "client.authentication.k8s.io/v1alpha1" 38 39 class UpdateKubeconfigCommand(BasicCommand): 40 NAME = 'update-kubeconfig' 41 42 DESCRIPTION = BasicCommand.FROM_FILE( 43 'eks', 44 'update-kubeconfig', 45 '_description.rst' 46 ) 47 48 ARG_TABLE = [ 49 { 50 'name': 'name', 51 'help_text': ("The name of the cluster for which " 52 "to create a kubeconfig entry. " 53 "This cluster must exist in your account and in the " 54 "specified or configured default Region " 55 "for your AWS CLI installation."), 56 'required': True 57 }, 58 { 59 'name': 'kubeconfig', 60 'help_text': ("Optionally specify a kubeconfig file to append " 61 "with your configuration. " 62 "By default, the configuration is written to the " 63 "first file path in the KUBECONFIG " 64 "environment variable (if it is set) " 65 "or the default kubeconfig path (.kube/config) " 66 "in your home directory."), 67 'required': False 68 }, 69 { 70 'name': 'role-arn', 71 'help_text': ("To assume a role for cluster authentication, " 72 "specify an IAM role ARN with this option. " 73 "For example, if you created a cluster " 74 "while assuming an IAM role, " 75 "then you must also assume that role to " 76 "connect to the cluster the first time."), 77 'required': False 78 }, 79 { 80 'name': 'dry-run', 81 'action': 'store_true', 82 'default': False, 83 'help_text': ("Print the merged kubeconfig to stdout instead of " 84 "writing it to the specified file."), 85 'required': False 86 }, 87 { 88 'name': 'verbose', 89 'action': 'store_true', 90 'default': False, 91 'help_text': ("Print more detailed output " 92 "when writing to the kubeconfig file, " 93 "including the appended entries.") 94 }, 95 { 96 'name': 'alias', 97 'help_text': ("Alias for the cluster context name. " 98 "Defaults to match cluster ARN."), 99 'required': False 100 } 101 ] 102 103 def _display_entries(self, entries): 104 """ 105 Display entries in yaml format 106 107 :param entries: a list of OrderedDicts to be printed 108 :type entries: list 109 """ 110 uni_print("Entries:\n\n") 111 for entry in entries: 112 uni_print(ordered_yaml_dump(entry)) 113 uni_print("\n") 114 115 def _run_main(self, parsed_args, parsed_globals): 116 client = EKSClient(self._session, 117 parsed_args.name, 118 parsed_args.role_arn, 119 parsed_globals) 120 new_cluster_dict = client.get_cluster_entry() 121 new_user_dict = client.get_user_entry() 122 123 config_selector = KubeconfigSelector( 124 os.environ.get("KUBECONFIG", ""), 125 parsed_args.kubeconfig 126 ) 127 config = config_selector.choose_kubeconfig( 128 new_cluster_dict["name"] 129 ) 130 updating_existing = config.has_cluster(new_cluster_dict["name"]) 131 appender = KubeconfigAppender() 132 new_context_dict = appender.insert_cluster_user_pair(config, 133 new_cluster_dict, 134 new_user_dict, 135 parsed_args.alias) 136 137 if parsed_args.dry_run: 138 uni_print(config.dump_content()) 139 else: 140 writer = KubeconfigWriter() 141 writer.write_kubeconfig(config) 142 143 if updating_existing: 144 uni_print("Updated context {0} in {1}\n".format( 145 new_context_dict["name"], config.path 146 )) 147 else: 148 uni_print("Added new context {0} to {1}\n".format( 149 new_context_dict["name"], config.path 150 )) 151 152 if parsed_args.verbose: 153 self._display_entries([ 154 new_context_dict, 155 new_user_dict, 156 new_cluster_dict 157 ]) 158 159 160 161 class KubeconfigSelector(object): 162 163 def __init__(self, env_variable, path_in, validator=None, 164 loader=None): 165 """ 166 Parse KUBECONFIG into a list of absolute paths. 167 Also replace the empty list with DEFAULT_PATH 168 169 :param env_variable: KUBECONFIG as a long string 170 :type env_variable: string 171 172 :param path_in: The path passed in through the CLI 173 :type path_in: string or None 174 """ 175 if validator is None: 176 validator = KubeconfigValidator() 177 self._validator = validator 178 179 if loader is None: 180 loader = KubeconfigLoader(validator) 181 self._loader = loader 182 183 if path_in is not None: 184 # Override environment variable 185 self._paths = [self._expand_path(path_in)] 186 else: 187 # Get the list of paths from the environment variable 188 if env_variable == "": 189 env_variable = DEFAULT_PATH 190 self._paths = [self._expand_path(element) 191 for element in env_variable.split(os.pathsep) 192 if len(element.strip()) > 0] 193 if len(self._paths) == 0: 194 self._paths = [DEFAULT_PATH] 195 196 def choose_kubeconfig(self, cluster_name): 197 """ 198 Choose which kubeconfig file to read from. 199 If name is already an entry in one of the $KUBECONFIG files, 200 choose that one. 201 Otherwise choose the first file. 202 203 :param cluster_name: The name of the cluster which is going to be added 204 :type cluster_name: String 205 206 :return: a chosen Kubeconfig based on above rules 207 :rtype: Kubeconfig 208 """ 209 # Search for an existing entry to update 210 for candidate_path in self._paths: 211 try: 212 loaded_config = self._loader.load_kubeconfig(candidate_path) 213 214 if loaded_config.has_cluster(cluster_name): 215 LOG.debug("Found entry to update at {0}".format( 216 candidate_path 217 )) 218 return loaded_config 219 except KubeconfigError as e: 220 LOG.warning("Passing {0}:{1}".format(candidate_path, e)) 221 222 # No entry was found, use the first file in KUBECONFIG 223 # 224 # Note: This could raise KubeconfigErrors if paths[0] is corrupted 225 return self._loader.load_kubeconfig(self._paths[0]) 226 227 def _expand_path(self, path): 228 """ A helper to expand a path to a full absolute path. """ 229 return os.path.abspath(os.path.expanduser(path)) 230 231 232 class EKSClient(object): 233 def __init__(self, session, cluster_name, role_arn, parsed_globals=None): 234 self._session = session 235 self._cluster_name = cluster_name 236 self._role_arn = role_arn 237 self._cluster_description = None 238 self._globals = parsed_globals 239 240 def _get_cluster_description(self): 241 """ 242 Use an eks describe-cluster call to get the cluster description 243 Cache the response in self._cluster_description. 244 describe-cluster will only be called once. 245 """ 246 if self._cluster_description is None: 247 if self._globals is None: 248 client = self._session.create_client("eks") 249 else: 250 client = self._session.create_client( 251 "eks", 252 region_name=self._globals.region, 253 endpoint_url=self._globals.endpoint_url, 254 verify=self._globals.verify_ssl 255 ) 256 full_description = client.describe_cluster(name=self._cluster_name) 257 self._cluster_description = full_description["cluster"] 258 259 if "status" not in self._cluster_description: 260 raise EKSClusterError("Cluster not found") 261 if self._cluster_description["status"] != "ACTIVE": 262 raise EKSClusterError("Cluster status not active") 263 264 return self._cluster_description 265 266 def get_cluster_entry(self): 267 """ 268 Return a cluster entry generated using 269 the previously obtained description. 270 """ 271 272 cert_data = self._get_cluster_description().get("certificateAuthority", 273 {"data": ""})["data"] 274 endpoint = self._get_cluster_description().get("endpoint") 275 arn = self._get_cluster_description().get("arn") 276 277 return OrderedDict([ 278 ("cluster", OrderedDict([ 279 ("certificate-authority-data", cert_data), 280 ("server", endpoint) 281 ])), 282 ("name", arn) 283 ]) 284 285 def get_user_entry(self): 286 """ 287 Return a user entry generated using 288 the previously obtained description. 289 """ 290 291 region = self._get_cluster_description().get("arn").split(":")[3] 292 293 generated_user = OrderedDict([ 294 ("name", self._get_cluster_description().get("arn", "")), 295 ("user", OrderedDict([ 296 ("exec", OrderedDict([ 297 ("apiVersion", API_VERSION), 298 ("args", 299 [ 300 "--region", 301 region, 302 "eks", 303 "get-token", 304 "--cluster-name", 305 self._cluster_name, 306 ]), 307 ("command", "aws") 308 ])) 309 ])) 310 ]) 311 312 if self._role_arn is not None: 313 generated_user["user"]["exec"]["args"].extend([ 314 "--role", 315 self._role_arn 316 ]) 317 318 if self._session.profile: 319 generated_user["user"]["exec"]["env"] = [OrderedDict([ 320 ("name", "AWS_PROFILE"), 321 ("value", self._session.profile) 322 ])] 323 324 return generated_user 325 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py --- a/awscli/customizations/eks/update_kubeconfig.py +++ b/awscli/customizations/eks/update_kubeconfig.py @@ -258,8 +258,10 @@ if "status" not in self._cluster_description: raise EKSClusterError("Cluster not found") - if self._cluster_description["status"] != "ACTIVE": - raise EKSClusterError("Cluster status not active") + if self._cluster_description["status"] not in ["ACTIVE", "UPDATING"]: + raise EKSClusterError("Cluster status is {0}".format( + self._cluster_description["status"] + )) return self._cluster_description
{"golden_diff": "diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py\n--- a/awscli/customizations/eks/update_kubeconfig.py\n+++ b/awscli/customizations/eks/update_kubeconfig.py\n@@ -258,8 +258,10 @@\n \n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n- if self._cluster_description[\"status\"] != \"ACTIVE\":\n- raise EKSClusterError(\"Cluster status not active\")\n+ if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n+ raise EKSClusterError(\"Cluster status is {0}\".format(\n+ self._cluster_description[\"status\"]\n+ ))\n \n return self._cluster_description\n", "issue": "EKS update-config not working when updating cluster\nI am not sure if by design. In case it isnt, executing:\r\n\r\n```\r\naws eks update-kubeconfig --name dev-tools --kubeconfig ~/.kube/dev-tools\r\n```\r\n\r\nYields into following the error message:\r\n```\r\nCluster status not active\r\n```\r\nWhen the cluster is updating: `\"status\": \"UPDATING\",`.\r\n\r\n\r\n**Steps:**\r\n1. create cluster with older version\r\n2. update-cluster to newer version\r\n3. execute `aws eks update-kubeconfig`\r\n4. error\r\n\r\n------\r\nAfter looking into the source code it looks like this is because it only checks for the status \"Active\". Everything else is not allowed.\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport logging\n\nfrom botocore.compat import OrderedDict\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.eks.exceptions import EKSClusterError\nfrom awscli.customizations.eks.kubeconfig import (Kubeconfig,\n KubeconfigError,\n KubeconfigLoader,\n KubeconfigWriter,\n KubeconfigValidator,\n KubeconfigAppender)\nfrom awscli.customizations.eks.ordered_yaml import ordered_yaml_dump\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_PATH = os.path.expanduser(\"~/.kube/config\")\n\n# Use the endpoint for kubernetes 1.10\n# To get the most recent endpoint we will need to\n# Do a check on the cluster's version number\nAPI_VERSION = \"client.authentication.k8s.io/v1alpha1\"\n\nclass UpdateKubeconfigCommand(BasicCommand):\n NAME = 'update-kubeconfig'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'eks',\n 'update-kubeconfig',\n '_description.rst'\n )\n\n ARG_TABLE = [\n {\n 'name': 'name',\n 'help_text': (\"The name of the cluster for which \"\n \"to create a kubeconfig entry. \"\n \"This cluster must exist in your account and in the \"\n \"specified or configured default Region \"\n \"for your AWS CLI installation.\"),\n 'required': True\n },\n {\n 'name': 'kubeconfig',\n 'help_text': (\"Optionally specify a kubeconfig file to append \"\n \"with your configuration. \"\n \"By default, the configuration is written to the \"\n \"first file path in the KUBECONFIG \"\n \"environment variable (if it is set) \"\n \"or the default kubeconfig path (.kube/config) \"\n \"in your home directory.\"),\n 'required': False\n },\n {\n 'name': 'role-arn',\n 'help_text': (\"To assume a role for cluster authentication, \"\n \"specify an IAM role ARN with this option. \"\n \"For example, if you created a cluster \"\n \"while assuming an IAM role, \"\n \"then you must also assume that role to \"\n \"connect to the cluster the first time.\"),\n 'required': False\n },\n {\n 'name': 'dry-run',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print the merged kubeconfig to stdout instead of \"\n \"writing it to the specified file.\"),\n 'required': False\n },\n {\n 'name': 'verbose',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print more detailed output \"\n \"when writing to the kubeconfig file, \"\n \"including the appended entries.\")\n },\n {\n 'name': 'alias',\n 'help_text': (\"Alias for the cluster context name. \"\n \"Defaults to match cluster ARN.\"),\n 'required': False\n }\n ]\n\n def _display_entries(self, entries):\n \"\"\"\n Display entries in yaml format\n\n :param entries: a list of OrderedDicts to be printed\n :type entries: list\n \"\"\"\n uni_print(\"Entries:\\n\\n\")\n for entry in entries:\n uni_print(ordered_yaml_dump(entry))\n uni_print(\"\\n\")\n\n def _run_main(self, parsed_args, parsed_globals):\n client = EKSClient(self._session,\n parsed_args.name,\n parsed_args.role_arn,\n parsed_globals)\n new_cluster_dict = client.get_cluster_entry()\n new_user_dict = client.get_user_entry()\n\n config_selector = KubeconfigSelector(\n os.environ.get(\"KUBECONFIG\", \"\"),\n parsed_args.kubeconfig\n )\n config = config_selector.choose_kubeconfig(\n new_cluster_dict[\"name\"]\n )\n updating_existing = config.has_cluster(new_cluster_dict[\"name\"])\n appender = KubeconfigAppender()\n new_context_dict = appender.insert_cluster_user_pair(config,\n new_cluster_dict,\n new_user_dict,\n parsed_args.alias)\n\n if parsed_args.dry_run:\n uni_print(config.dump_content())\n else:\n writer = KubeconfigWriter()\n writer.write_kubeconfig(config)\n\n if updating_existing:\n uni_print(\"Updated context {0} in {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n else:\n uni_print(\"Added new context {0} to {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n\n if parsed_args.verbose:\n self._display_entries([\n new_context_dict,\n new_user_dict,\n new_cluster_dict\n ])\n\n\n\nclass KubeconfigSelector(object):\n\n def __init__(self, env_variable, path_in, validator=None,\n loader=None):\n \"\"\"\n Parse KUBECONFIG into a list of absolute paths.\n Also replace the empty list with DEFAULT_PATH\n\n :param env_variable: KUBECONFIG as a long string\n :type env_variable: string\n\n :param path_in: The path passed in through the CLI\n :type path_in: string or None\n \"\"\"\n if validator is None:\n validator = KubeconfigValidator()\n self._validator = validator\n\n if loader is None:\n loader = KubeconfigLoader(validator)\n self._loader = loader\n\n if path_in is not None:\n # Override environment variable\n self._paths = [self._expand_path(path_in)]\n else:\n # Get the list of paths from the environment variable\n if env_variable == \"\":\n env_variable = DEFAULT_PATH\n self._paths = [self._expand_path(element)\n for element in env_variable.split(os.pathsep)\n if len(element.strip()) > 0]\n if len(self._paths) == 0:\n self._paths = [DEFAULT_PATH]\n\n def choose_kubeconfig(self, cluster_name):\n \"\"\"\n Choose which kubeconfig file to read from.\n If name is already an entry in one of the $KUBECONFIG files,\n choose that one.\n Otherwise choose the first file.\n\n :param cluster_name: The name of the cluster which is going to be added\n :type cluster_name: String\n\n :return: a chosen Kubeconfig based on above rules\n :rtype: Kubeconfig\n \"\"\"\n # Search for an existing entry to update\n for candidate_path in self._paths:\n try:\n loaded_config = self._loader.load_kubeconfig(candidate_path)\n\n if loaded_config.has_cluster(cluster_name):\n LOG.debug(\"Found entry to update at {0}\".format(\n candidate_path\n ))\n return loaded_config\n except KubeconfigError as e:\n LOG.warning(\"Passing {0}:{1}\".format(candidate_path, e))\n\n # No entry was found, use the first file in KUBECONFIG\n #\n # Note: This could raise KubeconfigErrors if paths[0] is corrupted\n return self._loader.load_kubeconfig(self._paths[0])\n\n def _expand_path(self, path):\n \"\"\" A helper to expand a path to a full absolute path. \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\nclass EKSClient(object):\n def __init__(self, session, cluster_name, role_arn, parsed_globals=None):\n self._session = session\n self._cluster_name = cluster_name\n self._role_arn = role_arn\n self._cluster_description = None\n self._globals = parsed_globals\n\n def _get_cluster_description(self):\n \"\"\"\n Use an eks describe-cluster call to get the cluster description\n Cache the response in self._cluster_description.\n describe-cluster will only be called once.\n \"\"\"\n if self._cluster_description is None:\n if self._globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] != \"ACTIVE\":\n raise EKSClusterError(\"Cluster status not active\")\n\n return self._cluster_description\n\n def get_cluster_entry(self):\n \"\"\"\n Return a cluster entry generated using\n the previously obtained description.\n \"\"\"\n\n cert_data = self._get_cluster_description().get(\"certificateAuthority\",\n {\"data\": \"\"})[\"data\"]\n endpoint = self._get_cluster_description().get(\"endpoint\")\n arn = self._get_cluster_description().get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])\n\n def get_user_entry(self):\n \"\"\"\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n\n region = self._get_cluster_description().get(\"arn\").split(\":\")[3]\n\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n (\"exec\", OrderedDict([\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n \"--region\",\n region,\n \"eks\",\n \"get-token\",\n \"--cluster-name\",\n self._cluster_name,\n ]),\n (\"command\", \"aws\")\n ]))\n ]))\n ])\n\n if self._role_arn is not None:\n generated_user[\"user\"][\"exec\"][\"args\"].extend([\n \"--role\",\n self._role_arn\n ])\n\n if self._session.profile:\n generated_user[\"user\"][\"exec\"][\"env\"] = [OrderedDict([\n (\"name\", \"AWS_PROFILE\"),\n (\"value\", self._session.profile)\n ])]\n\n return generated_user\n", "path": "awscli/customizations/eks/update_kubeconfig.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport logging\n\nfrom botocore.compat import OrderedDict\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.eks.exceptions import EKSClusterError\nfrom awscli.customizations.eks.kubeconfig import (Kubeconfig,\n KubeconfigError,\n KubeconfigLoader,\n KubeconfigWriter,\n KubeconfigValidator,\n KubeconfigAppender)\nfrom awscli.customizations.eks.ordered_yaml import ordered_yaml_dump\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_PATH = os.path.expanduser(\"~/.kube/config\")\n\n# Use the endpoint for kubernetes 1.10\n# To get the most recent endpoint we will need to\n# Do a check on the cluster's version number\nAPI_VERSION = \"client.authentication.k8s.io/v1alpha1\"\n\nclass UpdateKubeconfigCommand(BasicCommand):\n NAME = 'update-kubeconfig'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'eks',\n 'update-kubeconfig',\n '_description.rst'\n )\n\n ARG_TABLE = [\n {\n 'name': 'name',\n 'help_text': (\"The name of the cluster for which \"\n \"to create a kubeconfig entry. \"\n \"This cluster must exist in your account and in the \"\n \"specified or configured default Region \"\n \"for your AWS CLI installation.\"),\n 'required': True\n },\n {\n 'name': 'kubeconfig',\n 'help_text': (\"Optionally specify a kubeconfig file to append \"\n \"with your configuration. \"\n \"By default, the configuration is written to the \"\n \"first file path in the KUBECONFIG \"\n \"environment variable (if it is set) \"\n \"or the default kubeconfig path (.kube/config) \"\n \"in your home directory.\"),\n 'required': False\n },\n {\n 'name': 'role-arn',\n 'help_text': (\"To assume a role for cluster authentication, \"\n \"specify an IAM role ARN with this option. \"\n \"For example, if you created a cluster \"\n \"while assuming an IAM role, \"\n \"then you must also assume that role to \"\n \"connect to the cluster the first time.\"),\n 'required': False\n },\n {\n 'name': 'dry-run',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print the merged kubeconfig to stdout instead of \"\n \"writing it to the specified file.\"),\n 'required': False\n },\n {\n 'name': 'verbose',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print more detailed output \"\n \"when writing to the kubeconfig file, \"\n \"including the appended entries.\")\n },\n {\n 'name': 'alias',\n 'help_text': (\"Alias for the cluster context name. \"\n \"Defaults to match cluster ARN.\"),\n 'required': False\n }\n ]\n\n def _display_entries(self, entries):\n \"\"\"\n Display entries in yaml format\n\n :param entries: a list of OrderedDicts to be printed\n :type entries: list\n \"\"\"\n uni_print(\"Entries:\\n\\n\")\n for entry in entries:\n uni_print(ordered_yaml_dump(entry))\n uni_print(\"\\n\")\n\n def _run_main(self, parsed_args, parsed_globals):\n client = EKSClient(self._session,\n parsed_args.name,\n parsed_args.role_arn,\n parsed_globals)\n new_cluster_dict = client.get_cluster_entry()\n new_user_dict = client.get_user_entry()\n\n config_selector = KubeconfigSelector(\n os.environ.get(\"KUBECONFIG\", \"\"),\n parsed_args.kubeconfig\n )\n config = config_selector.choose_kubeconfig(\n new_cluster_dict[\"name\"]\n )\n updating_existing = config.has_cluster(new_cluster_dict[\"name\"])\n appender = KubeconfigAppender()\n new_context_dict = appender.insert_cluster_user_pair(config,\n new_cluster_dict,\n new_user_dict,\n parsed_args.alias)\n\n if parsed_args.dry_run:\n uni_print(config.dump_content())\n else:\n writer = KubeconfigWriter()\n writer.write_kubeconfig(config)\n\n if updating_existing:\n uni_print(\"Updated context {0} in {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n else:\n uni_print(\"Added new context {0} to {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n\n if parsed_args.verbose:\n self._display_entries([\n new_context_dict,\n new_user_dict,\n new_cluster_dict\n ])\n\n\n\nclass KubeconfigSelector(object):\n\n def __init__(self, env_variable, path_in, validator=None,\n loader=None):\n \"\"\"\n Parse KUBECONFIG into a list of absolute paths.\n Also replace the empty list with DEFAULT_PATH\n\n :param env_variable: KUBECONFIG as a long string\n :type env_variable: string\n\n :param path_in: The path passed in through the CLI\n :type path_in: string or None\n \"\"\"\n if validator is None:\n validator = KubeconfigValidator()\n self._validator = validator\n\n if loader is None:\n loader = KubeconfigLoader(validator)\n self._loader = loader\n\n if path_in is not None:\n # Override environment variable\n self._paths = [self._expand_path(path_in)]\n else:\n # Get the list of paths from the environment variable\n if env_variable == \"\":\n env_variable = DEFAULT_PATH\n self._paths = [self._expand_path(element)\n for element in env_variable.split(os.pathsep)\n if len(element.strip()) > 0]\n if len(self._paths) == 0:\n self._paths = [DEFAULT_PATH]\n\n def choose_kubeconfig(self, cluster_name):\n \"\"\"\n Choose which kubeconfig file to read from.\n If name is already an entry in one of the $KUBECONFIG files,\n choose that one.\n Otherwise choose the first file.\n\n :param cluster_name: The name of the cluster which is going to be added\n :type cluster_name: String\n\n :return: a chosen Kubeconfig based on above rules\n :rtype: Kubeconfig\n \"\"\"\n # Search for an existing entry to update\n for candidate_path in self._paths:\n try:\n loaded_config = self._loader.load_kubeconfig(candidate_path)\n\n if loaded_config.has_cluster(cluster_name):\n LOG.debug(\"Found entry to update at {0}\".format(\n candidate_path\n ))\n return loaded_config\n except KubeconfigError as e:\n LOG.warning(\"Passing {0}:{1}\".format(candidate_path, e))\n\n # No entry was found, use the first file in KUBECONFIG\n #\n # Note: This could raise KubeconfigErrors if paths[0] is corrupted\n return self._loader.load_kubeconfig(self._paths[0])\n\n def _expand_path(self, path):\n \"\"\" A helper to expand a path to a full absolute path. \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\nclass EKSClient(object):\n def __init__(self, session, cluster_name, role_arn, parsed_globals=None):\n self._session = session\n self._cluster_name = cluster_name\n self._role_arn = role_arn\n self._cluster_description = None\n self._globals = parsed_globals\n\n def _get_cluster_description(self):\n \"\"\"\n Use an eks describe-cluster call to get the cluster description\n Cache the response in self._cluster_description.\n describe-cluster will only be called once.\n \"\"\"\n if self._cluster_description is None:\n if self._globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description\n\n def get_cluster_entry(self):\n \"\"\"\n Return a cluster entry generated using\n the previously obtained description.\n \"\"\"\n\n cert_data = self._get_cluster_description().get(\"certificateAuthority\",\n {\"data\": \"\"})[\"data\"]\n endpoint = self._get_cluster_description().get(\"endpoint\")\n arn = self._get_cluster_description().get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])\n\n def get_user_entry(self):\n \"\"\"\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n\n region = self._get_cluster_description().get(\"arn\").split(\":\")[3]\n\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n (\"exec\", OrderedDict([\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n \"--region\",\n region,\n \"eks\",\n \"get-token\",\n \"--cluster-name\",\n self._cluster_name,\n ]),\n (\"command\", \"aws\")\n ]))\n ]))\n ])\n\n if self._role_arn is not None:\n generated_user[\"user\"][\"exec\"][\"args\"].extend([\n \"--role\",\n self._role_arn\n ])\n\n if self._session.profile:\n generated_user[\"user\"][\"exec\"][\"env\"] = [OrderedDict([\n (\"name\", \"AWS_PROFILE\"),\n (\"value\", self._session.profile)\n ])]\n\n return generated_user\n", "path": "awscli/customizations/eks/update_kubeconfig.py"}]}
3,589
184
gh_patches_debug_42726
rasdani/github-patches
git_diff
plotly__dash-1745
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Split out Dash CI only requirements into `requires-ci.txt`? Both `pip install dash[dev]` and `pip install dash[testing]` are extra public Dash features (respectively for building Dash components, and using the shipped Dash app test utilities). Looking at [`requires-dev.txt`](https://github.com/plotly/dash/blob/dev/requires-dev.txt) and [`requires-testing.txt`](https://github.com/plotly/dash/blob/dev/requires-testing.txt), there appears to have leaked https://github.com/plotly/dash CI/test suite dependencies into both files (i.e. dependencies not used by the features in `dash[dev]` and `dash[testing]`). Latest one introduced is [`flaky`](https://github.com/plotly/dash/pull/1741). Split out Dash CI only dependencies to a new file? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dash/long_callback/managers/diskcache_manager.py` Content: ``` 1 from . import BaseLongCallbackManager 2 3 _pending_value = "__$pending__" 4 5 6 class DiskcacheLongCallbackManager(BaseLongCallbackManager): 7 def __init__(self, cache, cache_by=None, expire=None): 8 """ 9 Long callback manager that runs callback logic in a subprocess and stores 10 results on disk using diskcache 11 12 :param cache: 13 A diskcache.Cache or diskcache.FanoutCache instance. See the diskcache 14 documentation for information on configuration options. 15 :param cache_by: 16 A list of zero-argument functions. When provided, caching is enabled and 17 the return values of these functions are combined with the callback 18 function's input arguments and source code to generate cache keys. 19 :param expire: 20 If provided, a cache entry will be removed when it has not been accessed 21 for ``expire`` seconds. If not provided, the lifetime of cache entries 22 is determined by the default behavior of the ``cache`` instance. 23 """ 24 try: 25 import diskcache # pylint: disable=import-outside-toplevel 26 import psutil # noqa: F401,E402 pylint: disable=import-outside-toplevel,unused-import,unused-variable,import-error 27 import multiprocess # noqa: F401,E402 pylint: disable=import-outside-toplevel,unused-import,unused-variable 28 except ImportError as missing_imports: 29 raise ImportError( 30 """\ 31 DiskcacheLongCallbackManager requires the multiprocess, diskcache, and psutil packages 32 which can be installed using pip... 33 34 $ pip install multiprocess diskcache psutil 35 36 or conda. 37 38 $ conda install -c conda-forge multiprocess diskcache psutil\n""" 39 ) from missing_imports 40 41 if not isinstance(cache, (diskcache.Cache, diskcache.FanoutCache)): 42 raise ValueError("First argument must be a diskcache.Cache object") 43 super().__init__(cache_by) 44 self.handle = cache 45 self.expire = expire 46 47 def terminate_job(self, job): 48 import psutil # pylint: disable=import-outside-toplevel,import-error 49 50 if job is None: 51 return 52 53 # Use diskcache transaction so multiple process don't try to kill the 54 # process at the same time 55 with self.handle.transact(): 56 if psutil.pid_exists(job): 57 process = psutil.Process(job) 58 59 for proc in process.children(recursive=True): 60 try: 61 proc.kill() 62 except psutil.NoSuchProcess: 63 pass 64 65 try: 66 process.kill() 67 except psutil.NoSuchProcess: 68 pass 69 70 try: 71 process.wait(0.5) 72 except (psutil.TimeoutExpired, psutil.NoSuchProcess): 73 pass 74 75 def terminate_unhealthy_job(self, job): 76 import psutil # pylint: disable=import-outside-toplevel,import-error 77 78 if job and psutil.pid_exists(job): 79 if not self.job_running(job): 80 self.terminate_job(job) 81 return True 82 83 return False 84 85 def job_running(self, job): 86 import psutil # pylint: disable=import-outside-toplevel,import-error 87 88 if job and psutil.pid_exists(job): 89 proc = psutil.Process(job) 90 return proc.status() != psutil.STATUS_ZOMBIE 91 return False 92 93 def make_job_fn(self, fn, progress, args_deps): 94 return _make_job_fn(fn, self.handle, progress, args_deps) 95 96 def clear_cache_entry(self, key): 97 self.handle.delete(key) 98 99 def call_job_fn(self, key, job_fn, args): 100 # pylint: disable-next=import-outside-toplevel,no-name-in-module,import-error 101 from multiprocess import Process 102 103 # pylint: disable-next=not-callable 104 proc = Process(target=job_fn, args=(key, self._make_progress_key(key), args)) 105 proc.start() 106 return proc.pid 107 108 def get_progress(self, key): 109 progress_key = self._make_progress_key(key) 110 return self.handle.get(progress_key) 111 112 def result_ready(self, key): 113 return self.handle.get(key) is not None 114 115 def get_result(self, key, job): 116 # Get result value 117 result = self.handle.get(key) 118 if result is None: 119 return None 120 121 # Clear result if not caching 122 if self.cache_by is None: 123 self.clear_cache_entry(key) 124 else: 125 if self.expire: 126 self.handle.touch(key, expire=self.expire) 127 128 self.clear_cache_entry(self._make_progress_key(key)) 129 130 self.terminate_job(job) 131 return result 132 133 134 def _make_job_fn(fn, cache, progress, args_deps): 135 def job_fn(result_key, progress_key, user_callback_args): 136 def _set_progress(progress_value): 137 cache.set(progress_key, progress_value) 138 139 maybe_progress = [_set_progress] if progress else [] 140 if isinstance(args_deps, dict): 141 user_callback_output = fn(*maybe_progress, **user_callback_args) 142 elif isinstance(args_deps, (list, tuple)): 143 user_callback_output = fn(*maybe_progress, *user_callback_args) 144 else: 145 user_callback_output = fn(*maybe_progress, user_callback_args) 146 cache.set(result_key, user_callback_output) 147 148 return job_fn 149 ``` Path: `dash/long_callback/managers/celery_manager.py` Content: ``` 1 import json 2 import inspect 3 import hashlib 4 5 from _plotly_utils.utils import PlotlyJSONEncoder 6 from dash.long_callback.managers import BaseLongCallbackManager 7 8 9 class CeleryLongCallbackManager(BaseLongCallbackManager): 10 def __init__(self, celery_app, cache_by=None, expire=None): 11 """ 12 Long callback manager that runs callback logic on a celery task queue, 13 and stores results using a celery result backend. 14 15 :param celery_app: 16 A celery.Celery application instance that must be configured with a 17 result backend. See the celery documentation for information on 18 configuration options. 19 :param cache_by: 20 A list of zero-argument functions. When provided, caching is enabled and 21 the return values of these functions are combined with the callback 22 function's input arguments and source code to generate cache keys. 23 :param expire: 24 If provided, a cache entry will be removed when it has not been accessed 25 for ``expire`` seconds. If not provided, the lifetime of cache entries 26 is determined by the default behavior of the celery result backend. 27 """ 28 import celery # pylint: disable=import-outside-toplevel,import-error 29 from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error 30 DisabledBackend, 31 ) 32 33 if not isinstance(celery_app, celery.Celery): 34 raise ValueError("First argument must be a celery.Celery object") 35 36 if isinstance(celery_app.backend, DisabledBackend): 37 raise ValueError("Celery instance must be configured with a result backend") 38 39 super().__init__(cache_by) 40 self.handle = celery_app 41 self.expire = expire 42 43 def terminate_job(self, job): 44 if job is None: 45 return 46 47 self.handle.control.terminate(job) 48 49 def terminate_unhealthy_job(self, job): 50 task = self.get_task(job) 51 if task and task.status in ("FAILURE", "REVOKED"): 52 return self.terminate_job(job) 53 return False 54 55 def job_running(self, job): 56 future = self.get_task(job) 57 return future and future.status in ( 58 "PENDING", 59 "RECEIVED", 60 "STARTED", 61 "RETRY", 62 "PROGRESS", 63 ) 64 65 def make_job_fn(self, fn, progress, args_deps): 66 return _make_job_fn(fn, self.handle, progress, args_deps) 67 68 def get_task(self, job): 69 if job: 70 return self.handle.AsyncResult(job) 71 72 return None 73 74 def clear_cache_entry(self, key): 75 self.handle.backend.delete(key) 76 77 def call_job_fn(self, key, job_fn, args): 78 task = job_fn.delay(key, self._make_progress_key(key), args) 79 return task.task_id 80 81 def get_progress(self, key): 82 progress_key = self._make_progress_key(key) 83 progress_data = self.handle.backend.get(progress_key) 84 if progress_data: 85 return json.loads(progress_data) 86 87 return None 88 89 def result_ready(self, key): 90 return self.handle.backend.get(key) is not None 91 92 def get_result(self, key, job): 93 # Get result value 94 result = self.handle.backend.get(key) 95 if result is None: 96 return None 97 98 result = json.loads(result) 99 100 # Clear result if not caching 101 if self.cache_by is None: 102 self.clear_cache_entry(key) 103 else: 104 if self.expire: 105 # Set/update expiration time 106 self.handle.backend.expire(key, self.expire) 107 self.clear_cache_entry(self._make_progress_key(key)) 108 109 self.terminate_job(job) 110 return result 111 112 113 def _make_job_fn(fn, celery_app, progress, args_deps): 114 cache = celery_app.backend 115 116 # Hash function source and module to create a unique (but stable) celery task name 117 fn_source = inspect.getsource(fn) 118 fn_str = fn_source 119 fn_hash = hashlib.sha1(fn_str.encode("utf-8")).hexdigest() 120 121 @celery_app.task(name=f"long_callback_{fn_hash}") 122 def job_fn(result_key, progress_key, user_callback_args, fn=fn): 123 def _set_progress(progress_value): 124 cache.set(progress_key, json.dumps(progress_value, cls=PlotlyJSONEncoder)) 125 126 maybe_progress = [_set_progress] if progress else [] 127 if isinstance(args_deps, dict): 128 user_callback_output = fn(*maybe_progress, **user_callback_args) 129 elif isinstance(args_deps, (list, tuple)): 130 user_callback_output = fn(*maybe_progress, *user_callback_args) 131 else: 132 user_callback_output = fn(*maybe_progress, user_callback_args) 133 134 cache.set(result_key, json.dumps(user_callback_output, cls=PlotlyJSONEncoder)) 135 136 return job_fn 137 ``` Path: `setup.py` Content: ``` 1 import io 2 from setuptools import setup, find_packages 3 4 main_ns = {} 5 exec(open("dash/version.py").read(), main_ns) # pylint: disable=exec-used, consider-using-with 6 7 8 def read_req_file(req_type): 9 with open("requires-{}.txt".format(req_type)) as fp: 10 requires = (line.strip() for line in fp) 11 return [req for req in requires if req and not req.startswith("#")] 12 13 14 setup( 15 name="dash", 16 version=main_ns["__version__"], 17 author="Chris Parmer", 18 author_email="[email protected]", 19 packages=find_packages(exclude=["tests*"]), 20 include_package_data=True, 21 license="MIT", 22 description=( 23 "A Python framework for building reactive web-apps. " 24 "Developed by Plotly." 25 ), 26 long_description=io.open("README.md", encoding="utf-8").read(), # pylint: disable=consider-using-with 27 long_description_content_type="text/markdown", 28 install_requires=read_req_file("install"), 29 python_requires=">=3.6", 30 extras_require={ 31 "dev": read_req_file("dev"), 32 "testing": read_req_file("testing"), 33 }, 34 entry_points={ 35 "console_scripts": [ 36 "dash-generate-components = " 37 "dash.development.component_generator:cli", 38 "renderer = dash.development.build_process:renderer", 39 "dash-update-components = dash.development.update_components:cli" 40 ], 41 "pytest11": ["dash = dash.testing.plugin"], 42 }, 43 url="https://plotly.com/dash", 44 classifiers=[ 45 "Development Status :: 5 - Production/Stable", 46 "Environment :: Web Environment", 47 "Framework :: Dash", 48 "Framework :: Flask", 49 "Intended Audience :: Developers", 50 "Intended Audience :: Education", 51 "Intended Audience :: Financial and Insurance Industry", 52 "Intended Audience :: Healthcare Industry", 53 "Intended Audience :: Manufacturing", 54 "Intended Audience :: Science/Research", 55 "License :: OSI Approved :: MIT License", 56 "Programming Language :: Python", 57 "Programming Language :: Python :: 3", 58 "Programming Language :: Python :: 3.6", 59 "Programming Language :: Python :: 3.7", 60 "Programming Language :: Python :: 3.8", 61 "Programming Language :: Python :: 3.9", 62 "Topic :: Database :: Front-Ends", 63 "Topic :: Office/Business :: Financial :: Spreadsheet", 64 "Topic :: Scientific/Engineering :: Visualization", 65 "Topic :: Software Development :: Libraries :: Application Frameworks", 66 "Topic :: Software Development :: Widget Sets", 67 ], 68 ) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dash/long_callback/managers/celery_manager.py b/dash/long_callback/managers/celery_manager.py --- a/dash/long_callback/managers/celery_manager.py +++ b/dash/long_callback/managers/celery_manager.py @@ -25,10 +25,18 @@ for ``expire`` seconds. If not provided, the lifetime of cache entries is determined by the default behavior of the celery result backend. """ - import celery # pylint: disable=import-outside-toplevel,import-error - from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error - DisabledBackend, - ) + try: + import celery # pylint: disable=import-outside-toplevel,import-error + from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error + DisabledBackend, + ) + except ImportError as missing_imports: + raise ImportError( + """\ +CeleryLongCallbackManager requires extra dependencies which can be installed doing + + $ pip install "dash[celery]"\n""" + ) from missing_imports if not isinstance(celery_app, celery.Celery): raise ValueError("First argument must be a celery.Celery object") diff --git a/dash/long_callback/managers/diskcache_manager.py b/dash/long_callback/managers/diskcache_manager.py --- a/dash/long_callback/managers/diskcache_manager.py +++ b/dash/long_callback/managers/diskcache_manager.py @@ -4,14 +4,15 @@ class DiskcacheLongCallbackManager(BaseLongCallbackManager): - def __init__(self, cache, cache_by=None, expire=None): + def __init__(self, cache=None, cache_by=None, expire=None): """ Long callback manager that runs callback logic in a subprocess and stores results on disk using diskcache :param cache: A diskcache.Cache or diskcache.FanoutCache instance. See the diskcache - documentation for information on configuration options. + documentation for information on configuration options. If not provided, + a diskcache.Cache instance will be created with default values. :param cache_by: A list of zero-argument functions. When provided, caching is enabled and the return values of these functions are combined with the callback @@ -28,20 +29,22 @@ except ImportError as missing_imports: raise ImportError( """\ -DiskcacheLongCallbackManager requires the multiprocess, diskcache, and psutil packages -which can be installed using pip... +DiskcacheLongCallbackManager requires extra dependencies which can be installed doing - $ pip install multiprocess diskcache psutil - -or conda. - - $ conda install -c conda-forge multiprocess diskcache psutil\n""" + $ pip install "dash[diskcache]"\n""" ) from missing_imports - if not isinstance(cache, (diskcache.Cache, diskcache.FanoutCache)): - raise ValueError("First argument must be a diskcache.Cache object") + if cache is None: + self.handle = diskcache.Cache() + else: + if not isinstance(cache, (diskcache.Cache, diskcache.FanoutCache)): + raise ValueError( + "First argument must be a diskcache.Cache " + "or diskcache.FanoutCache object" + ) + self.handle = cache + super().__init__(cache_by) - self.handle = cache self.expire = expire def terminate_job(self, job): diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -28,8 +28,11 @@ install_requires=read_req_file("install"), python_requires=">=3.6", extras_require={ + "ci": read_req_file("ci"), "dev": read_req_file("dev"), "testing": read_req_file("testing"), + "celery": read_req_file("celery"), + "diskcache": read_req_file("diskcache"), }, entry_points={ "console_scripts": [
{"golden_diff": "diff --git a/dash/long_callback/managers/celery_manager.py b/dash/long_callback/managers/celery_manager.py\n--- a/dash/long_callback/managers/celery_manager.py\n+++ b/dash/long_callback/managers/celery_manager.py\n@@ -25,10 +25,18 @@\n for ``expire`` seconds. If not provided, the lifetime of cache entries\n is determined by the default behavior of the celery result backend.\n \"\"\"\n- import celery # pylint: disable=import-outside-toplevel,import-error\n- from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error\n- DisabledBackend,\n- )\n+ try:\n+ import celery # pylint: disable=import-outside-toplevel,import-error\n+ from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error\n+ DisabledBackend,\n+ )\n+ except ImportError as missing_imports:\n+ raise ImportError(\n+ \"\"\"\\\n+CeleryLongCallbackManager requires extra dependencies which can be installed doing\n+\n+ $ pip install \"dash[celery]\"\\n\"\"\"\n+ ) from missing_imports\n \n if not isinstance(celery_app, celery.Celery):\n raise ValueError(\"First argument must be a celery.Celery object\")\ndiff --git a/dash/long_callback/managers/diskcache_manager.py b/dash/long_callback/managers/diskcache_manager.py\n--- a/dash/long_callback/managers/diskcache_manager.py\n+++ b/dash/long_callback/managers/diskcache_manager.py\n@@ -4,14 +4,15 @@\n \n \n class DiskcacheLongCallbackManager(BaseLongCallbackManager):\n- def __init__(self, cache, cache_by=None, expire=None):\n+ def __init__(self, cache=None, cache_by=None, expire=None):\n \"\"\"\n Long callback manager that runs callback logic in a subprocess and stores\n results on disk using diskcache\n \n :param cache:\n A diskcache.Cache or diskcache.FanoutCache instance. See the diskcache\n- documentation for information on configuration options.\n+ documentation for information on configuration options. If not provided,\n+ a diskcache.Cache instance will be created with default values.\n :param cache_by:\n A list of zero-argument functions. When provided, caching is enabled and\n the return values of these functions are combined with the callback\n@@ -28,20 +29,22 @@\n except ImportError as missing_imports:\n raise ImportError(\n \"\"\"\\\n-DiskcacheLongCallbackManager requires the multiprocess, diskcache, and psutil packages\n-which can be installed using pip...\n+DiskcacheLongCallbackManager requires extra dependencies which can be installed doing\n \n- $ pip install multiprocess diskcache psutil\n-\n-or conda.\n-\n- $ conda install -c conda-forge multiprocess diskcache psutil\\n\"\"\"\n+ $ pip install \"dash[diskcache]\"\\n\"\"\"\n ) from missing_imports\n \n- if not isinstance(cache, (diskcache.Cache, diskcache.FanoutCache)):\n- raise ValueError(\"First argument must be a diskcache.Cache object\")\n+ if cache is None:\n+ self.handle = diskcache.Cache()\n+ else:\n+ if not isinstance(cache, (diskcache.Cache, diskcache.FanoutCache)):\n+ raise ValueError(\n+ \"First argument must be a diskcache.Cache \"\n+ \"or diskcache.FanoutCache object\"\n+ )\n+ self.handle = cache\n+\n super().__init__(cache_by)\n- self.handle = cache\n self.expire = expire\n \n def terminate_job(self, job):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,8 +28,11 @@\n install_requires=read_req_file(\"install\"),\n python_requires=\">=3.6\",\n extras_require={\n+ \"ci\": read_req_file(\"ci\"),\n \"dev\": read_req_file(\"dev\"),\n \"testing\": read_req_file(\"testing\"),\n+ \"celery\": read_req_file(\"celery\"),\n+ \"diskcache\": read_req_file(\"diskcache\"),\n },\n entry_points={\n \"console_scripts\": [\n", "issue": "Split out Dash CI only requirements into `requires-ci.txt`?\nBoth `pip install dash[dev]` and `pip install dash[testing]` are extra public Dash features (respectively for building Dash components, and using the shipped Dash app test utilities).\r\n\r\nLooking at [`requires-dev.txt`](https://github.com/plotly/dash/blob/dev/requires-dev.txt) and [`requires-testing.txt`](https://github.com/plotly/dash/blob/dev/requires-testing.txt), there appears to have leaked https://github.com/plotly/dash CI/test suite dependencies into both files (i.e. dependencies not used by the features in `dash[dev]` and `dash[testing]`). Latest one introduced is [`flaky`](https://github.com/plotly/dash/pull/1741).\r\n\r\nSplit out Dash CI only dependencies to a new file?\n", "before_files": [{"content": "from . import BaseLongCallbackManager\n\n_pending_value = \"__$pending__\"\n\n\nclass DiskcacheLongCallbackManager(BaseLongCallbackManager):\n def __init__(self, cache, cache_by=None, expire=None):\n \"\"\"\n Long callback manager that runs callback logic in a subprocess and stores\n results on disk using diskcache\n\n :param cache:\n A diskcache.Cache or diskcache.FanoutCache instance. See the diskcache\n documentation for information on configuration options.\n :param cache_by:\n A list of zero-argument functions. When provided, caching is enabled and\n the return values of these functions are combined with the callback\n function's input arguments and source code to generate cache keys.\n :param expire:\n If provided, a cache entry will be removed when it has not been accessed\n for ``expire`` seconds. If not provided, the lifetime of cache entries\n is determined by the default behavior of the ``cache`` instance.\n \"\"\"\n try:\n import diskcache # pylint: disable=import-outside-toplevel\n import psutil # noqa: F401,E402 pylint: disable=import-outside-toplevel,unused-import,unused-variable,import-error\n import multiprocess # noqa: F401,E402 pylint: disable=import-outside-toplevel,unused-import,unused-variable\n except ImportError as missing_imports:\n raise ImportError(\n \"\"\"\\\nDiskcacheLongCallbackManager requires the multiprocess, diskcache, and psutil packages\nwhich can be installed using pip...\n\n $ pip install multiprocess diskcache psutil\n\nor conda.\n\n $ conda install -c conda-forge multiprocess diskcache psutil\\n\"\"\"\n ) from missing_imports\n\n if not isinstance(cache, (diskcache.Cache, diskcache.FanoutCache)):\n raise ValueError(\"First argument must be a diskcache.Cache object\")\n super().__init__(cache_by)\n self.handle = cache\n self.expire = expire\n\n def terminate_job(self, job):\n import psutil # pylint: disable=import-outside-toplevel,import-error\n\n if job is None:\n return\n\n # Use diskcache transaction so multiple process don't try to kill the\n # process at the same time\n with self.handle.transact():\n if psutil.pid_exists(job):\n process = psutil.Process(job)\n\n for proc in process.children(recursive=True):\n try:\n proc.kill()\n except psutil.NoSuchProcess:\n pass\n\n try:\n process.kill()\n except psutil.NoSuchProcess:\n pass\n\n try:\n process.wait(0.5)\n except (psutil.TimeoutExpired, psutil.NoSuchProcess):\n pass\n\n def terminate_unhealthy_job(self, job):\n import psutil # pylint: disable=import-outside-toplevel,import-error\n\n if job and psutil.pid_exists(job):\n if not self.job_running(job):\n self.terminate_job(job)\n return True\n\n return False\n\n def job_running(self, job):\n import psutil # pylint: disable=import-outside-toplevel,import-error\n\n if job and psutil.pid_exists(job):\n proc = psutil.Process(job)\n return proc.status() != psutil.STATUS_ZOMBIE\n return False\n\n def make_job_fn(self, fn, progress, args_deps):\n return _make_job_fn(fn, self.handle, progress, args_deps)\n\n def clear_cache_entry(self, key):\n self.handle.delete(key)\n\n def call_job_fn(self, key, job_fn, args):\n # pylint: disable-next=import-outside-toplevel,no-name-in-module,import-error\n from multiprocess import Process\n\n # pylint: disable-next=not-callable\n proc = Process(target=job_fn, args=(key, self._make_progress_key(key), args))\n proc.start()\n return proc.pid\n\n def get_progress(self, key):\n progress_key = self._make_progress_key(key)\n return self.handle.get(progress_key)\n\n def result_ready(self, key):\n return self.handle.get(key) is not None\n\n def get_result(self, key, job):\n # Get result value\n result = self.handle.get(key)\n if result is None:\n return None\n\n # Clear result if not caching\n if self.cache_by is None:\n self.clear_cache_entry(key)\n else:\n if self.expire:\n self.handle.touch(key, expire=self.expire)\n\n self.clear_cache_entry(self._make_progress_key(key))\n\n self.terminate_job(job)\n return result\n\n\ndef _make_job_fn(fn, cache, progress, args_deps):\n def job_fn(result_key, progress_key, user_callback_args):\n def _set_progress(progress_value):\n cache.set(progress_key, progress_value)\n\n maybe_progress = [_set_progress] if progress else []\n if isinstance(args_deps, dict):\n user_callback_output = fn(*maybe_progress, **user_callback_args)\n elif isinstance(args_deps, (list, tuple)):\n user_callback_output = fn(*maybe_progress, *user_callback_args)\n else:\n user_callback_output = fn(*maybe_progress, user_callback_args)\n cache.set(result_key, user_callback_output)\n\n return job_fn\n", "path": "dash/long_callback/managers/diskcache_manager.py"}, {"content": "import json\nimport inspect\nimport hashlib\n\nfrom _plotly_utils.utils import PlotlyJSONEncoder\nfrom dash.long_callback.managers import BaseLongCallbackManager\n\n\nclass CeleryLongCallbackManager(BaseLongCallbackManager):\n def __init__(self, celery_app, cache_by=None, expire=None):\n \"\"\"\n Long callback manager that runs callback logic on a celery task queue,\n and stores results using a celery result backend.\n\n :param celery_app:\n A celery.Celery application instance that must be configured with a\n result backend. See the celery documentation for information on\n configuration options.\n :param cache_by:\n A list of zero-argument functions. When provided, caching is enabled and\n the return values of these functions are combined with the callback\n function's input arguments and source code to generate cache keys.\n :param expire:\n If provided, a cache entry will be removed when it has not been accessed\n for ``expire`` seconds. If not provided, the lifetime of cache entries\n is determined by the default behavior of the celery result backend.\n \"\"\"\n import celery # pylint: disable=import-outside-toplevel,import-error\n from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error\n DisabledBackend,\n )\n\n if not isinstance(celery_app, celery.Celery):\n raise ValueError(\"First argument must be a celery.Celery object\")\n\n if isinstance(celery_app.backend, DisabledBackend):\n raise ValueError(\"Celery instance must be configured with a result backend\")\n\n super().__init__(cache_by)\n self.handle = celery_app\n self.expire = expire\n\n def terminate_job(self, job):\n if job is None:\n return\n\n self.handle.control.terminate(job)\n\n def terminate_unhealthy_job(self, job):\n task = self.get_task(job)\n if task and task.status in (\"FAILURE\", \"REVOKED\"):\n return self.terminate_job(job)\n return False\n\n def job_running(self, job):\n future = self.get_task(job)\n return future and future.status in (\n \"PENDING\",\n \"RECEIVED\",\n \"STARTED\",\n \"RETRY\",\n \"PROGRESS\",\n )\n\n def make_job_fn(self, fn, progress, args_deps):\n return _make_job_fn(fn, self.handle, progress, args_deps)\n\n def get_task(self, job):\n if job:\n return self.handle.AsyncResult(job)\n\n return None\n\n def clear_cache_entry(self, key):\n self.handle.backend.delete(key)\n\n def call_job_fn(self, key, job_fn, args):\n task = job_fn.delay(key, self._make_progress_key(key), args)\n return task.task_id\n\n def get_progress(self, key):\n progress_key = self._make_progress_key(key)\n progress_data = self.handle.backend.get(progress_key)\n if progress_data:\n return json.loads(progress_data)\n\n return None\n\n def result_ready(self, key):\n return self.handle.backend.get(key) is not None\n\n def get_result(self, key, job):\n # Get result value\n result = self.handle.backend.get(key)\n if result is None:\n return None\n\n result = json.loads(result)\n\n # Clear result if not caching\n if self.cache_by is None:\n self.clear_cache_entry(key)\n else:\n if self.expire:\n # Set/update expiration time\n self.handle.backend.expire(key, self.expire)\n self.clear_cache_entry(self._make_progress_key(key))\n\n self.terminate_job(job)\n return result\n\n\ndef _make_job_fn(fn, celery_app, progress, args_deps):\n cache = celery_app.backend\n\n # Hash function source and module to create a unique (but stable) celery task name\n fn_source = inspect.getsource(fn)\n fn_str = fn_source\n fn_hash = hashlib.sha1(fn_str.encode(\"utf-8\")).hexdigest()\n\n @celery_app.task(name=f\"long_callback_{fn_hash}\")\n def job_fn(result_key, progress_key, user_callback_args, fn=fn):\n def _set_progress(progress_value):\n cache.set(progress_key, json.dumps(progress_value, cls=PlotlyJSONEncoder))\n\n maybe_progress = [_set_progress] if progress else []\n if isinstance(args_deps, dict):\n user_callback_output = fn(*maybe_progress, **user_callback_args)\n elif isinstance(args_deps, (list, tuple)):\n user_callback_output = fn(*maybe_progress, *user_callback_args)\n else:\n user_callback_output = fn(*maybe_progress, user_callback_args)\n\n cache.set(result_key, json.dumps(user_callback_output, cls=PlotlyJSONEncoder))\n\n return job_fn\n", "path": "dash/long_callback/managers/celery_manager.py"}, {"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open(\"dash/version.py\").read(), main_ns) # pylint: disable=exec-used, consider-using-with\n\n\ndef read_req_file(req_type):\n with open(\"requires-{}.txt\".format(req_type)) as fp:\n requires = (line.strip() for line in fp)\n return [req for req in requires if req and not req.startswith(\"#\")]\n\n\nsetup(\n name=\"dash\",\n version=main_ns[\"__version__\"],\n author=\"Chris Parmer\",\n author_email=\"[email protected]\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n license=\"MIT\",\n description=(\n \"A Python framework for building reactive web-apps. \"\n \"Developed by Plotly.\"\n ),\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(), # pylint: disable=consider-using-with\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n python_requires=\">=3.6\",\n extras_require={\n \"dev\": read_req_file(\"dev\"),\n \"testing\": read_req_file(\"testing\"),\n },\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components = \"\n \"dash.development.component_generator:cli\",\n \"renderer = dash.development.build_process:renderer\",\n \"dash-update-components = dash.development.update_components:cli\"\n ],\n \"pytest11\": [\"dash = dash.testing.plugin\"],\n },\n url=\"https://plotly.com/dash\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Dash\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from . import BaseLongCallbackManager\n\n_pending_value = \"__$pending__\"\n\n\nclass DiskcacheLongCallbackManager(BaseLongCallbackManager):\n def __init__(self, cache=None, cache_by=None, expire=None):\n \"\"\"\n Long callback manager that runs callback logic in a subprocess and stores\n results on disk using diskcache\n\n :param cache:\n A diskcache.Cache or diskcache.FanoutCache instance. See the diskcache\n documentation for information on configuration options. If not provided,\n a diskcache.Cache instance will be created with default values.\n :param cache_by:\n A list of zero-argument functions. When provided, caching is enabled and\n the return values of these functions are combined with the callback\n function's input arguments and source code to generate cache keys.\n :param expire:\n If provided, a cache entry will be removed when it has not been accessed\n for ``expire`` seconds. If not provided, the lifetime of cache entries\n is determined by the default behavior of the ``cache`` instance.\n \"\"\"\n try:\n import diskcache # pylint: disable=import-outside-toplevel\n import psutil # noqa: F401,E402 pylint: disable=import-outside-toplevel,unused-import,unused-variable,import-error\n import multiprocess # noqa: F401,E402 pylint: disable=import-outside-toplevel,unused-import,unused-variable\n except ImportError as missing_imports:\n raise ImportError(\n \"\"\"\\\nDiskcacheLongCallbackManager requires extra dependencies which can be installed doing\n\n $ pip install \"dash[diskcache]\"\\n\"\"\"\n ) from missing_imports\n\n if cache is None:\n self.handle = diskcache.Cache()\n else:\n if not isinstance(cache, (diskcache.Cache, diskcache.FanoutCache)):\n raise ValueError(\n \"First argument must be a diskcache.Cache \"\n \"or diskcache.FanoutCache object\"\n )\n self.handle = cache\n\n super().__init__(cache_by)\n self.expire = expire\n\n def terminate_job(self, job):\n import psutil # pylint: disable=import-outside-toplevel,import-error\n\n if job is None:\n return\n\n # Use diskcache transaction so multiple process don't try to kill the\n # process at the same time\n with self.handle.transact():\n if psutil.pid_exists(job):\n process = psutil.Process(job)\n\n for proc in process.children(recursive=True):\n try:\n proc.kill()\n except psutil.NoSuchProcess:\n pass\n\n try:\n process.kill()\n except psutil.NoSuchProcess:\n pass\n\n try:\n process.wait(0.5)\n except (psutil.TimeoutExpired, psutil.NoSuchProcess):\n pass\n\n def terminate_unhealthy_job(self, job):\n import psutil # pylint: disable=import-outside-toplevel,import-error\n\n if job and psutil.pid_exists(job):\n if not self.job_running(job):\n self.terminate_job(job)\n return True\n\n return False\n\n def job_running(self, job):\n import psutil # pylint: disable=import-outside-toplevel,import-error\n\n if job and psutil.pid_exists(job):\n proc = psutil.Process(job)\n return proc.status() != psutil.STATUS_ZOMBIE\n return False\n\n def make_job_fn(self, fn, progress, args_deps):\n return _make_job_fn(fn, self.handle, progress, args_deps)\n\n def clear_cache_entry(self, key):\n self.handle.delete(key)\n\n def call_job_fn(self, key, job_fn, args):\n # pylint: disable-next=import-outside-toplevel,no-name-in-module,import-error\n from multiprocess import Process\n\n # pylint: disable-next=not-callable\n proc = Process(target=job_fn, args=(key, self._make_progress_key(key), args))\n proc.start()\n return proc.pid\n\n def get_progress(self, key):\n progress_key = self._make_progress_key(key)\n return self.handle.get(progress_key)\n\n def result_ready(self, key):\n return self.handle.get(key) is not None\n\n def get_result(self, key, job):\n # Get result value\n result = self.handle.get(key)\n if result is None:\n return None\n\n # Clear result if not caching\n if self.cache_by is None:\n self.clear_cache_entry(key)\n else:\n if self.expire:\n self.handle.touch(key, expire=self.expire)\n\n self.clear_cache_entry(self._make_progress_key(key))\n\n self.terminate_job(job)\n return result\n\n\ndef _make_job_fn(fn, cache, progress, args_deps):\n def job_fn(result_key, progress_key, user_callback_args):\n def _set_progress(progress_value):\n cache.set(progress_key, progress_value)\n\n maybe_progress = [_set_progress] if progress else []\n if isinstance(args_deps, dict):\n user_callback_output = fn(*maybe_progress, **user_callback_args)\n elif isinstance(args_deps, (list, tuple)):\n user_callback_output = fn(*maybe_progress, *user_callback_args)\n else:\n user_callback_output = fn(*maybe_progress, user_callback_args)\n cache.set(result_key, user_callback_output)\n\n return job_fn\n", "path": "dash/long_callback/managers/diskcache_manager.py"}, {"content": "import json\nimport inspect\nimport hashlib\n\nfrom _plotly_utils.utils import PlotlyJSONEncoder\nfrom dash.long_callback.managers import BaseLongCallbackManager\n\n\nclass CeleryLongCallbackManager(BaseLongCallbackManager):\n def __init__(self, celery_app, cache_by=None, expire=None):\n \"\"\"\n Long callback manager that runs callback logic on a celery task queue,\n and stores results using a celery result backend.\n\n :param celery_app:\n A celery.Celery application instance that must be configured with a\n result backend. See the celery documentation for information on\n configuration options.\n :param cache_by:\n A list of zero-argument functions. When provided, caching is enabled and\n the return values of these functions are combined with the callback\n function's input arguments and source code to generate cache keys.\n :param expire:\n If provided, a cache entry will be removed when it has not been accessed\n for ``expire`` seconds. If not provided, the lifetime of cache entries\n is determined by the default behavior of the celery result backend.\n \"\"\"\n try:\n import celery # pylint: disable=import-outside-toplevel,import-error\n from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error\n DisabledBackend,\n )\n except ImportError as missing_imports:\n raise ImportError(\n \"\"\"\\\nCeleryLongCallbackManager requires extra dependencies which can be installed doing\n\n $ pip install \"dash[celery]\"\\n\"\"\"\n ) from missing_imports\n\n if not isinstance(celery_app, celery.Celery):\n raise ValueError(\"First argument must be a celery.Celery object\")\n\n if isinstance(celery_app.backend, DisabledBackend):\n raise ValueError(\"Celery instance must be configured with a result backend\")\n\n super().__init__(cache_by)\n self.handle = celery_app\n self.expire = expire\n\n def terminate_job(self, job):\n if job is None:\n return\n\n self.handle.control.terminate(job)\n\n def terminate_unhealthy_job(self, job):\n task = self.get_task(job)\n if task and task.status in (\"FAILURE\", \"REVOKED\"):\n return self.terminate_job(job)\n return False\n\n def job_running(self, job):\n future = self.get_task(job)\n return future and future.status in (\n \"PENDING\",\n \"RECEIVED\",\n \"STARTED\",\n \"RETRY\",\n \"PROGRESS\",\n )\n\n def make_job_fn(self, fn, progress, args_deps):\n return _make_job_fn(fn, self.handle, progress, args_deps)\n\n def get_task(self, job):\n if job:\n return self.handle.AsyncResult(job)\n\n return None\n\n def clear_cache_entry(self, key):\n self.handle.backend.delete(key)\n\n def call_job_fn(self, key, job_fn, args):\n task = job_fn.delay(key, self._make_progress_key(key), args)\n return task.task_id\n\n def get_progress(self, key):\n progress_key = self._make_progress_key(key)\n progress_data = self.handle.backend.get(progress_key)\n if progress_data:\n return json.loads(progress_data)\n\n return None\n\n def result_ready(self, key):\n return self.handle.backend.get(key) is not None\n\n def get_result(self, key, job):\n # Get result value\n result = self.handle.backend.get(key)\n if result is None:\n return None\n\n result = json.loads(result)\n\n # Clear result if not caching\n if self.cache_by is None:\n self.clear_cache_entry(key)\n else:\n if self.expire:\n # Set/update expiration time\n self.handle.backend.expire(key, self.expire)\n self.clear_cache_entry(self._make_progress_key(key))\n\n self.terminate_job(job)\n return result\n\n\ndef _make_job_fn(fn, celery_app, progress, args_deps):\n cache = celery_app.backend\n\n # Hash function source and module to create a unique (but stable) celery task name\n fn_source = inspect.getsource(fn)\n fn_str = fn_source\n fn_hash = hashlib.sha1(fn_str.encode(\"utf-8\")).hexdigest()\n\n @celery_app.task(name=f\"long_callback_{fn_hash}\")\n def job_fn(result_key, progress_key, user_callback_args, fn=fn):\n def _set_progress(progress_value):\n cache.set(progress_key, json.dumps(progress_value, cls=PlotlyJSONEncoder))\n\n maybe_progress = [_set_progress] if progress else []\n if isinstance(args_deps, dict):\n user_callback_output = fn(*maybe_progress, **user_callback_args)\n elif isinstance(args_deps, (list, tuple)):\n user_callback_output = fn(*maybe_progress, *user_callback_args)\n else:\n user_callback_output = fn(*maybe_progress, user_callback_args)\n\n cache.set(result_key, json.dumps(user_callback_output, cls=PlotlyJSONEncoder))\n\n return job_fn\n", "path": "dash/long_callback/managers/celery_manager.py"}, {"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open(\"dash/version.py\").read(), main_ns) # pylint: disable=exec-used, consider-using-with\n\n\ndef read_req_file(req_type):\n with open(\"requires-{}.txt\".format(req_type)) as fp:\n requires = (line.strip() for line in fp)\n return [req for req in requires if req and not req.startswith(\"#\")]\n\n\nsetup(\n name=\"dash\",\n version=main_ns[\"__version__\"],\n author=\"Chris Parmer\",\n author_email=\"[email protected]\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n license=\"MIT\",\n description=(\n \"A Python framework for building reactive web-apps. \"\n \"Developed by Plotly.\"\n ),\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(), # pylint: disable=consider-using-with\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n python_requires=\">=3.6\",\n extras_require={\n \"ci\": read_req_file(\"ci\"),\n \"dev\": read_req_file(\"dev\"),\n \"testing\": read_req_file(\"testing\"),\n \"celery\": read_req_file(\"celery\"),\n \"diskcache\": read_req_file(\"diskcache\"),\n },\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components = \"\n \"dash.development.component_generator:cli\",\n \"renderer = dash.development.build_process:renderer\",\n \"dash-update-components = dash.development.update_components:cli\"\n ],\n \"pytest11\": [\"dash = dash.testing.plugin\"],\n },\n url=\"https://plotly.com/dash\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Dash\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n)\n", "path": "setup.py"}]}
3,989
933
gh_patches_debug_22127
rasdani/github-patches
git_diff
ansible__ansible-35005
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- /tmp directory being used <!--- Verify first that your issue/request is not already reported on GitHub. Also test if the latest release, and master branch are affected too. --> ##### ISSUE TYPE <!--- Pick one below and delete the rest --> - Bug Report ##### COMPONENT NAME <!--- Name of the module, plugin, task or feature Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path --> template and potentially more ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes below --> ``` $ ansible --version ansible 2.2.1.0 config file = /opt/****/ansible.cfg configured module search path = [ '/home/****/ansible'] ``` ##### CONFIGURATION <!--- If using Ansible 2.4 or above, paste the results of "ansible-config dump --only-changed" Otherwise, mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables). --> ``` $ cat ansible.cfg | grep tmp local_tmp = /opt/***/.ansible/tmp remote_tmp = /opt/***/.ansible/tmp $ ``` ##### OS / ENVIRONMENT <!--- Mention the OS you are running Ansible from, and the OS you are managing, or say "N/A" for anything that is not platform-specific. Also mention the specific version of what you are trying to control, e.g. if this is a network bug the version of firmware on the network device. --> ``` $ cat /etc/os-release NAME="Red Hat Enterprise Linux Server" VERSION="7.3 (Maipo)" ID="rhel" ID_LIKE="fedora" VERSION_ID="7.3" PRETTY_NAME="Red Hat Enterprise Linux" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:redhat:enterprise_linux:7.3:GA:server" HOME_URL="https://www.redhat.com/" BUG_REPORT_URL="https://bugzilla.redhat.com/" REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7" REDHAT_BUGZILLA_PRODUCT_VERSION=7.3 REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" REDHAT_SUPPORT_PRODUCT_VERSION="7.3" ``` ##### SUMMARY <!--- Explain the problem briefly --> Files are writing to /tmp when remote_tmp and local_tmp is defined ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: "{{ site }} - GENERATE JSON REPORT PER SITE STATS ONLY" template: src: "{{ role_path }}/templates/reports/sites/partial_table.j2" dest: '{{ output_dir }}/reports/html/partial/{{ site }}.partial' ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> ``` localhost : ok=360 changed=29 unreachable=0 failed=0 ``` ##### ACTUAL RESULTS <!--- What actually happened? If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes below --> ``` TASK [reports : *** - GENERATE JSON REPORT PER SITE STATS ONLY] **************** fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "[Errno 13] Permission denied: '/tmp/tmpuv61yn'"} PLAY RECAP ********************************************************************* localhost : ok=360 changed=29 unreachable=0 failed=1 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/plugins/action/template.py` Content: ``` 1 # (c) 2015, Michael DeHaan <[email protected]> 2 # 3 # This file is part of Ansible 4 # 5 # Ansible is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Ansible is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 17 from __future__ import (absolute_import, division, print_function) 18 __metaclass__ = type 19 20 import os 21 import shutil 22 import tempfile 23 24 from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail 25 from ansible.module_utils._text import to_bytes, to_text 26 from ansible.module_utils.parsing.convert_bool import boolean 27 from ansible.plugins.action import ActionBase 28 from ansible.template import generate_ansible_template_vars 29 30 31 class ActionModule(ActionBase): 32 33 TRANSFERS_FILES = True 34 DEFAULT_NEWLINE_SEQUENCE = "\n" 35 36 def run(self, tmp=None, task_vars=None): 37 ''' handler for template operations ''' 38 39 if task_vars is None: 40 task_vars = dict() 41 42 result = super(ActionModule, self).run(tmp, task_vars) 43 44 source = self._task.args.get('src', None) 45 dest = self._task.args.get('dest', None) 46 force = boolean(self._task.args.get('force', True), strict=False) 47 follow = boolean(self._task.args.get('follow', False), strict=False) 48 state = self._task.args.get('state', None) 49 newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE) 50 variable_start_string = self._task.args.get('variable_start_string', None) 51 variable_end_string = self._task.args.get('variable_end_string', None) 52 block_start_string = self._task.args.get('block_start_string', None) 53 block_end_string = self._task.args.get('block_end_string', None) 54 trim_blocks = self._task.args.get('trim_blocks', None) 55 56 wrong_sequences = ["\\n", "\\r", "\\r\\n"] 57 allowed_sequences = ["\n", "\r", "\r\n"] 58 59 # We need to convert unescaped sequences to proper escaped sequences for Jinja2 60 if newline_sequence in wrong_sequences: 61 newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)] 62 63 try: 64 if state is not None: 65 raise AnsibleActionFail("'state' cannot be specified on a template") 66 elif source is None or dest is None: 67 raise AnsibleActionFail("src and dest are required") 68 elif newline_sequence not in allowed_sequences: 69 raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n") 70 else: 71 try: 72 source = self._find_needle('templates', source) 73 except AnsibleError as e: 74 raise AnsibleActionFail(to_text(e)) 75 76 # Get vault decrypted tmp file 77 try: 78 tmp_source = self._loader.get_real_file(source) 79 except AnsibleFileNotFound as e: 80 raise AnsibleActionFail("could not find src=%s, %s" % (source, to_text(e))) 81 82 # template the source data locally & get ready to transfer 83 try: 84 with open(tmp_source, 'r') as f: 85 template_data = to_text(f.read()) 86 87 # set jinja2 internal search path for includes 88 searchpath = task_vars.get('ansible_search_path', []) 89 searchpath.extend([self._loader._basedir, os.path.dirname(source)]) 90 91 # We want to search into the 'templates' subdir of each search path in 92 # addition to our original search paths. 93 newsearchpath = [] 94 for p in searchpath: 95 newsearchpath.append(os.path.join(p, 'templates')) 96 newsearchpath.append(p) 97 searchpath = newsearchpath 98 99 self._templar.environment.loader.searchpath = searchpath 100 self._templar.environment.newline_sequence = newline_sequence 101 if block_start_string is not None: 102 self._templar.environment.block_start_string = block_start_string 103 if block_end_string is not None: 104 self._templar.environment.block_end_string = block_end_string 105 if variable_start_string is not None: 106 self._templar.environment.variable_start_string = variable_start_string 107 if variable_end_string is not None: 108 self._templar.environment.variable_end_string = variable_end_string 109 if trim_blocks is not None: 110 self._templar.environment.trim_blocks = bool(trim_blocks) 111 112 # add ansible 'template' vars 113 temp_vars = task_vars.copy() 114 temp_vars.update(generate_ansible_template_vars(source)) 115 116 old_vars = self._templar._available_variables 117 self._templar.set_available_variables(temp_vars) 118 resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False) 119 self._templar.set_available_variables(old_vars) 120 except AnsibleAction: 121 raise 122 except Exception as e: 123 raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e))) 124 finally: 125 self._loader.cleanup_tmp_file(tmp_source) 126 127 new_task = self._task.copy() 128 new_task.args.pop('newline_sequence', None) 129 new_task.args.pop('block_start_string', None) 130 new_task.args.pop('block_end_string', None) 131 new_task.args.pop('variable_start_string', None) 132 new_task.args.pop('variable_end_string', None) 133 new_task.args.pop('trim_blocks', None) 134 try: 135 tempdir = tempfile.mkdtemp() 136 result_file = os.path.join(tempdir, os.path.basename(source)) 137 with open(result_file, 'wb') as f: 138 f.write(to_bytes(resultant, errors='surrogate_or_strict')) 139 140 new_task.args.update( 141 dict( 142 src=result_file, 143 dest=dest, 144 follow=follow, 145 ), 146 ) 147 copy_action = self._shared_loader_obj.action_loader.get('copy', 148 task=new_task, 149 connection=self._connection, 150 play_context=self._play_context, 151 loader=self._loader, 152 templar=self._templar, 153 shared_loader_obj=self._shared_loader_obj) 154 result.update(copy_action.run(task_vars=task_vars)) 155 finally: 156 shutil.rmtree(tempdir) 157 except AnsibleAction as e: 158 result.update(e.result) 159 finally: 160 self._remove_tmp_path(self._connection._shell.tempdir) 161 162 return result 163 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -21,6 +21,7 @@ import shutil import tempfile +from ansible import constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail from ansible.module_utils._text import to_bytes, to_text from ansible.module_utils.parsing.convert_bool import boolean @@ -132,7 +133,7 @@ new_task.args.pop('variable_end_string', None) new_task.args.pop('trim_blocks', None) try: - tempdir = tempfile.mkdtemp() + tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP) result_file = os.path.join(tempdir, os.path.basename(source)) with open(result_file, 'wb') as f: f.write(to_bytes(resultant, errors='surrogate_or_strict'))
{"golden_diff": "diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py\n--- a/lib/ansible/plugins/action/template.py\n+++ b/lib/ansible/plugins/action/template.py\n@@ -21,6 +21,7 @@\n import shutil\n import tempfile\n \n+from ansible import constants as C\n from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail\n from ansible.module_utils._text import to_bytes, to_text\n from ansible.module_utils.parsing.convert_bool import boolean\n@@ -132,7 +133,7 @@\n new_task.args.pop('variable_end_string', None)\n new_task.args.pop('trim_blocks', None)\n try:\n- tempdir = tempfile.mkdtemp()\n+ tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)\n result_file = os.path.join(tempdir, os.path.basename(source))\n with open(result_file, 'wb') as f:\n f.write(to_bytes(resultant, errors='surrogate_or_strict'))\n", "issue": "/tmp directory being used\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and master branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest -->\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!---\r\nName of the module, plugin, task or feature\r\nDo not include extra details here, e.g. \"vyos_command\" not \"the network module vyos_command\" or the full path\r\n-->\r\ntemplate and potentially more\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\n$ ansible --version\r\nansible 2.2.1.0\r\n config file = /opt/****/ansible.cfg\r\n configured module search path = [ '/home/****/ansible']\r\n```\r\n\r\n##### CONFIGURATION\r\n<!---\r\nIf using Ansible 2.4 or above, paste the results of \"ansible-config dump --only-changed\"\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).\r\n-->\r\n```\r\n$ cat ansible.cfg | grep tmp\r\nlocal_tmp = /opt/***/.ansible/tmp\r\nremote_tmp = /opt/***/.ansible/tmp\r\n$\r\n\r\n```\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.\r\n-->\r\n```\r\n$ cat /etc/os-release\r\nNAME=\"Red Hat Enterprise Linux Server\"\r\nVERSION=\"7.3 (Maipo)\"\r\nID=\"rhel\"\r\nID_LIKE=\"fedora\"\r\nVERSION_ID=\"7.3\"\r\nPRETTY_NAME=\"Red Hat Enterprise Linux\"\r\nANSI_COLOR=\"0;31\"\r\nCPE_NAME=\"cpe:/o:redhat:enterprise_linux:7.3:GA:server\"\r\nHOME_URL=\"https://www.redhat.com/\"\r\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\r\n\r\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 7\"\r\nREDHAT_BUGZILLA_PRODUCT_VERSION=7.3\r\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\r\nREDHAT_SUPPORT_PRODUCT_VERSION=\"7.3\"\r\n\r\n```\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\nFiles are writing to /tmp when remote_tmp and local_tmp is defined\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n\r\n\r\n```yaml\r\n\r\n - name: \"{{ site }} - GENERATE JSON REPORT PER SITE STATS ONLY\"\r\n template:\r\n src: \"{{ role_path }}/templates/reports/sites/partial_table.j2\"\r\n dest: '{{ output_dir }}/reports/html/partial/{{ site }}.partial'\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\n```\r\nlocalhost : ok=360 changed=29 unreachable=0 failed=0\r\n```\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\nTASK [reports : *** - GENERATE JSON REPORT PER SITE STATS ONLY] ****************\r\nfatal: [localhost]: FAILED! => {\"changed\": false, \"failed\": true, \"msg\": \"[Errno 13] Permission denied: '/tmp/tmpuv61yn'\"}\r\n\r\nPLAY RECAP *********************************************************************\r\nlocalhost : ok=360 changed=29 unreachable=0 failed=1\r\n```\r\n\r\n\n", "before_files": [{"content": "# (c) 2015, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport shutil\nimport tempfile\n\nfrom ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail\nfrom ansible.module_utils._text import to_bytes, to_text\nfrom ansible.module_utils.parsing.convert_bool import boolean\nfrom ansible.plugins.action import ActionBase\nfrom ansible.template import generate_ansible_template_vars\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = True\n DEFAULT_NEWLINE_SEQUENCE = \"\\n\"\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for template operations '''\n\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n\n source = self._task.args.get('src', None)\n dest = self._task.args.get('dest', None)\n force = boolean(self._task.args.get('force', True), strict=False)\n follow = boolean(self._task.args.get('follow', False), strict=False)\n state = self._task.args.get('state', None)\n newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)\n variable_start_string = self._task.args.get('variable_start_string', None)\n variable_end_string = self._task.args.get('variable_end_string', None)\n block_start_string = self._task.args.get('block_start_string', None)\n block_end_string = self._task.args.get('block_end_string', None)\n trim_blocks = self._task.args.get('trim_blocks', None)\n\n wrong_sequences = [\"\\\\n\", \"\\\\r\", \"\\\\r\\\\n\"]\n allowed_sequences = [\"\\n\", \"\\r\", \"\\r\\n\"]\n\n # We need to convert unescaped sequences to proper escaped sequences for Jinja2\n if newline_sequence in wrong_sequences:\n newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]\n\n try:\n if state is not None:\n raise AnsibleActionFail(\"'state' cannot be specified on a template\")\n elif source is None or dest is None:\n raise AnsibleActionFail(\"src and dest are required\")\n elif newline_sequence not in allowed_sequences:\n raise AnsibleActionFail(\"newline_sequence needs to be one of: \\n, \\r or \\r\\n\")\n else:\n try:\n source = self._find_needle('templates', source)\n except AnsibleError as e:\n raise AnsibleActionFail(to_text(e))\n\n # Get vault decrypted tmp file\n try:\n tmp_source = self._loader.get_real_file(source)\n except AnsibleFileNotFound as e:\n raise AnsibleActionFail(\"could not find src=%s, %s\" % (source, to_text(e)))\n\n # template the source data locally & get ready to transfer\n try:\n with open(tmp_source, 'r') as f:\n template_data = to_text(f.read())\n\n # set jinja2 internal search path for includes\n searchpath = task_vars.get('ansible_search_path', [])\n searchpath.extend([self._loader._basedir, os.path.dirname(source)])\n\n # We want to search into the 'templates' subdir of each search path in\n # addition to our original search paths.\n newsearchpath = []\n for p in searchpath:\n newsearchpath.append(os.path.join(p, 'templates'))\n newsearchpath.append(p)\n searchpath = newsearchpath\n\n self._templar.environment.loader.searchpath = searchpath\n self._templar.environment.newline_sequence = newline_sequence\n if block_start_string is not None:\n self._templar.environment.block_start_string = block_start_string\n if block_end_string is not None:\n self._templar.environment.block_end_string = block_end_string\n if variable_start_string is not None:\n self._templar.environment.variable_start_string = variable_start_string\n if variable_end_string is not None:\n self._templar.environment.variable_end_string = variable_end_string\n if trim_blocks is not None:\n self._templar.environment.trim_blocks = bool(trim_blocks)\n\n # add ansible 'template' vars\n temp_vars = task_vars.copy()\n temp_vars.update(generate_ansible_template_vars(source))\n\n old_vars = self._templar._available_variables\n self._templar.set_available_variables(temp_vars)\n resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)\n self._templar.set_available_variables(old_vars)\n except AnsibleAction:\n raise\n except Exception as e:\n raise AnsibleActionFail(\"%s: %s\" % (type(e).__name__, to_text(e)))\n finally:\n self._loader.cleanup_tmp_file(tmp_source)\n\n new_task = self._task.copy()\n new_task.args.pop('newline_sequence', None)\n new_task.args.pop('block_start_string', None)\n new_task.args.pop('block_end_string', None)\n new_task.args.pop('variable_start_string', None)\n new_task.args.pop('variable_end_string', None)\n new_task.args.pop('trim_blocks', None)\n try:\n tempdir = tempfile.mkdtemp()\n result_file = os.path.join(tempdir, os.path.basename(source))\n with open(result_file, 'wb') as f:\n f.write(to_bytes(resultant, errors='surrogate_or_strict'))\n\n new_task.args.update(\n dict(\n src=result_file,\n dest=dest,\n follow=follow,\n ),\n )\n copy_action = self._shared_loader_obj.action_loader.get('copy',\n task=new_task,\n connection=self._connection,\n play_context=self._play_context,\n loader=self._loader,\n templar=self._templar,\n shared_loader_obj=self._shared_loader_obj)\n result.update(copy_action.run(task_vars=task_vars))\n finally:\n shutil.rmtree(tempdir)\n except AnsibleAction as e:\n result.update(e.result)\n finally:\n self._remove_tmp_path(self._connection._shell.tempdir)\n\n return result\n", "path": "lib/ansible/plugins/action/template.py"}], "after_files": [{"content": "# (c) 2015, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport shutil\nimport tempfile\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail\nfrom ansible.module_utils._text import to_bytes, to_text\nfrom ansible.module_utils.parsing.convert_bool import boolean\nfrom ansible.plugins.action import ActionBase\nfrom ansible.template import generate_ansible_template_vars\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = True\n DEFAULT_NEWLINE_SEQUENCE = \"\\n\"\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for template operations '''\n\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n\n source = self._task.args.get('src', None)\n dest = self._task.args.get('dest', None)\n force = boolean(self._task.args.get('force', True), strict=False)\n follow = boolean(self._task.args.get('follow', False), strict=False)\n state = self._task.args.get('state', None)\n newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)\n variable_start_string = self._task.args.get('variable_start_string', None)\n variable_end_string = self._task.args.get('variable_end_string', None)\n block_start_string = self._task.args.get('block_start_string', None)\n block_end_string = self._task.args.get('block_end_string', None)\n trim_blocks = self._task.args.get('trim_blocks', None)\n\n wrong_sequences = [\"\\\\n\", \"\\\\r\", \"\\\\r\\\\n\"]\n allowed_sequences = [\"\\n\", \"\\r\", \"\\r\\n\"]\n\n # We need to convert unescaped sequences to proper escaped sequences for Jinja2\n if newline_sequence in wrong_sequences:\n newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]\n\n try:\n if state is not None:\n raise AnsibleActionFail(\"'state' cannot be specified on a template\")\n elif source is None or dest is None:\n raise AnsibleActionFail(\"src and dest are required\")\n elif newline_sequence not in allowed_sequences:\n raise AnsibleActionFail(\"newline_sequence needs to be one of: \\n, \\r or \\r\\n\")\n else:\n try:\n source = self._find_needle('templates', source)\n except AnsibleError as e:\n raise AnsibleActionFail(to_text(e))\n\n # Get vault decrypted tmp file\n try:\n tmp_source = self._loader.get_real_file(source)\n except AnsibleFileNotFound as e:\n raise AnsibleActionFail(\"could not find src=%s, %s\" % (source, to_text(e)))\n\n # template the source data locally & get ready to transfer\n try:\n with open(tmp_source, 'r') as f:\n template_data = to_text(f.read())\n\n # set jinja2 internal search path for includes\n searchpath = task_vars.get('ansible_search_path', [])\n searchpath.extend([self._loader._basedir, os.path.dirname(source)])\n\n # We want to search into the 'templates' subdir of each search path in\n # addition to our original search paths.\n newsearchpath = []\n for p in searchpath:\n newsearchpath.append(os.path.join(p, 'templates'))\n newsearchpath.append(p)\n searchpath = newsearchpath\n\n self._templar.environment.loader.searchpath = searchpath\n self._templar.environment.newline_sequence = newline_sequence\n if block_start_string is not None:\n self._templar.environment.block_start_string = block_start_string\n if block_end_string is not None:\n self._templar.environment.block_end_string = block_end_string\n if variable_start_string is not None:\n self._templar.environment.variable_start_string = variable_start_string\n if variable_end_string is not None:\n self._templar.environment.variable_end_string = variable_end_string\n if trim_blocks is not None:\n self._templar.environment.trim_blocks = bool(trim_blocks)\n\n # add ansible 'template' vars\n temp_vars = task_vars.copy()\n temp_vars.update(generate_ansible_template_vars(source))\n\n old_vars = self._templar._available_variables\n self._templar.set_available_variables(temp_vars)\n resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)\n self._templar.set_available_variables(old_vars)\n except AnsibleAction:\n raise\n except Exception as e:\n raise AnsibleActionFail(\"%s: %s\" % (type(e).__name__, to_text(e)))\n finally:\n self._loader.cleanup_tmp_file(tmp_source)\n\n new_task = self._task.copy()\n new_task.args.pop('newline_sequence', None)\n new_task.args.pop('block_start_string', None)\n new_task.args.pop('block_end_string', None)\n new_task.args.pop('variable_start_string', None)\n new_task.args.pop('variable_end_string', None)\n new_task.args.pop('trim_blocks', None)\n try:\n tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)\n result_file = os.path.join(tempdir, os.path.basename(source))\n with open(result_file, 'wb') as f:\n f.write(to_bytes(resultant, errors='surrogate_or_strict'))\n\n new_task.args.update(\n dict(\n src=result_file,\n dest=dest,\n follow=follow,\n ),\n )\n copy_action = self._shared_loader_obj.action_loader.get('copy',\n task=new_task,\n connection=self._connection,\n play_context=self._play_context,\n loader=self._loader,\n templar=self._templar,\n shared_loader_obj=self._shared_loader_obj)\n result.update(copy_action.run(task_vars=task_vars))\n finally:\n shutil.rmtree(tempdir)\n except AnsibleAction as e:\n result.update(e.result)\n finally:\n self._remove_tmp_path(self._connection._shell.tempdir)\n\n return result\n", "path": "lib/ansible/plugins/action/template.py"}]}
2,948
220
gh_patches_debug_23808
rasdani/github-patches
git_diff
statsmodels__statsmodels-6969
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ENH/BUG iqr is not scaled for normal distribution https://www.statsmodels.org/stable/_modules/statsmodels/tools/eval_measures.html#iqr computes raw IQR, I thought we have scaling to normal distribution as in `robust.scale.mad` (iqr is now also available in scipy) code search finds adjustment or usage like `iqr = (q75 - q25) / 1.349` I never remember: are mad and iqr scale for variance or standard deviation (sqrt or not) and there is a bug in axis handling ! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `statsmodels/robust/scale.py` Content: ``` 1 """ 2 Support and standalone functions for Robust Linear Models 3 4 References 5 ---------- 6 PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981. 7 8 R Venables, B Ripley. 'Modern Applied Statistics in S' 9 Springer, New York, 2002. 10 """ 11 import numpy as np 12 from scipy.stats import norm as Gaussian 13 from . import norms 14 from statsmodels.tools import tools 15 from statsmodels.tools.validation import array_like, float_like 16 17 18 def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median): 19 # c \approx .6745 20 """ 21 The Median Absolute Deviation along given axis of an array 22 23 Parameters 24 ---------- 25 a : array_like 26 Input array. 27 c : float, optional 28 The normalization constant. Defined as scipy.stats.norm.ppf(3/4.), 29 which is approximately .6745. 30 axis : int, optional 31 The default is 0. Can also be None. 32 center : callable or float 33 If a callable is provided, such as the default `np.median` then it 34 is expected to be called center(a). The axis argument will be applied 35 via np.apply_over_axes. Otherwise, provide a float. 36 37 Returns 38 ------- 39 mad : float 40 `mad` = median(abs(`a` - center))/`c` 41 """ 42 a = array_like(a, 'a', ndim=None) 43 c = float_like(c, 'c') 44 if callable(center) and a.size: 45 center = np.apply_over_axes(center, a, axis) 46 else: 47 center = 0.0 48 49 return np.median((np.abs(a-center)) / c, axis=axis) 50 51 52 class Huber(object): 53 """ 54 Huber's proposal 2 for estimating location and scale jointly. 55 56 Parameters 57 ---------- 58 c : float, optional 59 Threshold used in threshold for chi=psi**2. Default value is 1.5. 60 tol : float, optional 61 Tolerance for convergence. Default value is 1e-08. 62 maxiter : int, optional0 63 Maximum number of iterations. Default value is 30. 64 norm : statsmodels.robust.norms.RobustNorm, optional 65 A robust norm used in M estimator of location. If None, 66 the location estimator defaults to a one-step 67 fixed point version of the M-estimator using Huber's T. 68 69 call 70 Return joint estimates of Huber's scale and location. 71 72 Examples 73 -------- 74 >>> import numpy as np 75 >>> import statsmodels.api as sm 76 >>> chem_data = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03, 77 ... 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7, 78 ... 3.77, 5.28, 28.95]) 79 >>> sm.robust.scale.huber(chem_data) 80 (array(3.2054980819923693), array(0.67365260010478967)) 81 """ 82 83 def __init__(self, c=1.5, tol=1.0e-08, maxiter=30, norm=None): 84 self.c = c 85 self.maxiter = maxiter 86 self.tol = tol 87 self.norm = norm 88 tmp = 2 * Gaussian.cdf(c) - 1 89 self.gamma = tmp + c**2 * (1 - tmp) - 2 * c * Gaussian.pdf(c) 90 91 def __call__(self, a, mu=None, initscale=None, axis=0): 92 """ 93 Compute Huber's proposal 2 estimate of scale, using an optional 94 initial value of scale and an optional estimate of mu. If mu 95 is supplied, it is not reestimated. 96 97 Parameters 98 ---------- 99 a : ndarray 100 1d array 101 mu : float or None, optional 102 If the location mu is supplied then it is not reestimated. 103 Default is None, which means that it is estimated. 104 initscale : float or None, optional 105 A first guess on scale. If initscale is None then the standardized 106 median absolute deviation of a is used. 107 108 Notes 109 ----- 110 `Huber` minimizes the function 111 112 sum(psi((a[i]-mu)/scale)**2) 113 114 as a function of (mu, scale), where 115 116 psi(x) = np.clip(x, -self.c, self.c) 117 """ 118 a = np.asarray(a) 119 if mu is None: 120 n = a.shape[0] - 1 121 mu = np.median(a, axis=axis) 122 est_mu = True 123 else: 124 n = a.shape[0] 125 mu = mu 126 est_mu = False 127 128 if initscale is None: 129 scale = mad(a, axis=axis) 130 else: 131 scale = initscale 132 scale = tools.unsqueeze(scale, axis, a.shape) 133 mu = tools.unsqueeze(mu, axis, a.shape) 134 return self._estimate_both(a, scale, mu, axis, est_mu, n) 135 136 def _estimate_both(self, a, scale, mu, axis, est_mu, n): 137 """ 138 Estimate scale and location simultaneously with the following 139 pseudo_loop: 140 141 while not_converged: 142 mu, scale = estimate_location(a, scale, mu), estimate_scale(a, scale, mu) 143 144 where estimate_location is an M-estimator and estimate_scale implements 145 the check used in Section 5.5 of Venables & Ripley 146 """ # noqa:E501 147 for _ in range(self.maxiter): 148 # Estimate the mean along a given axis 149 if est_mu: 150 if self.norm is None: 151 # This is a one-step fixed-point estimator 152 # if self.norm == norms.HuberT 153 # It should be faster than using norms.HuberT 154 nmu = np.clip(a, mu-self.c*scale, 155 mu+self.c*scale).sum(axis) / a.shape[axis] 156 else: 157 nmu = norms.estimate_location(a, scale, self.norm, axis, 158 mu, self.maxiter, self.tol) 159 else: 160 # Effectively, do nothing 161 nmu = mu.squeeze() 162 nmu = tools.unsqueeze(nmu, axis, a.shape) 163 164 subset = np.less_equal(np.abs((a - mu)/scale), self.c) 165 card = subset.sum(axis) 166 167 scale_num = np.sum(subset * (a - nmu)**2, axis) 168 scale_denom = (n * self.gamma - (a.shape[axis] - card) * self.c**2) 169 nscale = np.sqrt(scale_num / scale_denom) 170 nscale = tools.unsqueeze(nscale, axis, a.shape) 171 172 test1 = np.alltrue(np.less_equal(np.abs(scale - nscale), 173 nscale * self.tol)) 174 test2 = np.alltrue( 175 np.less_equal(np.abs(mu - nmu), nscale * self.tol)) 176 if not (test1 and test2): 177 mu = nmu 178 scale = nscale 179 else: 180 return nmu.squeeze(), nscale.squeeze() 181 raise ValueError('joint estimation of location and scale failed ' 182 'to converge in %d iterations' % self.maxiter) 183 184 185 huber = Huber() 186 187 188 class HuberScale(object): 189 r""" 190 Huber's scaling for fitting robust linear models. 191 192 Huber's scale is intended to be used as the scale estimate in the 193 IRLS algorithm and is slightly different than the `Huber` class. 194 195 Parameters 196 ---------- 197 d : float, optional 198 d is the tuning constant for Huber's scale. Default is 2.5 199 tol : float, optional 200 The convergence tolerance 201 maxiter : int, optiona 202 The maximum number of iterations. The default is 30. 203 204 Methods 205 ------- 206 call 207 Return's Huber's scale computed as below 208 209 Notes 210 -------- 211 Huber's scale is the iterative solution to 212 213 scale_(i+1)**2 = 1/(n*h)*sum(chi(r/sigma_i)*sigma_i**2 214 215 where the Huber function is 216 217 chi(x) = (x**2)/2 for \|x\| < d 218 chi(x) = (d**2)/2 for \|x\| >= d 219 220 and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\ 221 scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2) 222 """ 223 def __init__(self, d=2.5, tol=1e-08, maxiter=30): 224 self.d = d 225 self.tol = tol 226 self.maxiter = maxiter 227 228 def __call__(self, df_resid, nobs, resid): 229 h = df_resid / nobs * ( 230 self.d ** 2 231 + (1 - self.d ** 2) * Gaussian.cdf(self.d) 232 - .5 - self.d / (np.sqrt(2 * np.pi)) * np.exp(-.5 * self.d ** 2) 233 ) 234 s = mad(resid) 235 236 def subset(x): 237 return np.less(np.abs(resid / x), self.d) 238 239 def chi(s): 240 return subset(s) * (resid / s) ** 2 / 2 + (1 - subset(s)) * \ 241 (self.d ** 2 / 2) 242 243 scalehist = [np.inf, s] 244 niter = 1 245 while (np.abs(scalehist[niter - 1] - scalehist[niter]) > self.tol 246 and niter < self.maxiter): 247 nscale = np.sqrt(1 / (nobs * h) * np.sum(chi(scalehist[-1])) * 248 scalehist[-1] ** 2) 249 scalehist.append(nscale) 250 niter += 1 251 # TODO: raise on convergence failure? 252 return scalehist[-1] 253 254 255 hubers_scale = HuberScale() 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/statsmodels/robust/scale.py b/statsmodels/robust/scale.py --- a/statsmodels/robust/scale.py +++ b/statsmodels/robust/scale.py @@ -49,6 +49,44 @@ return np.median((np.abs(a-center)) / c, axis=axis) +def iqr(a, c=Gaussian.ppf(3/4) - Gaussian.ppf(1/4), axis=0, center=np.median): + """ + The normalized interquartile range along given axis of an array + + Parameters + ---------- + a : array_like + Input array. + c : float, optional + The normalization constant, used to get consistent estimates of the + standard deviation at the normal distribution. Defined as + scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is + approximately 1.349. + axis : int, optional + The default is 0. Can also be None. + center : callable or float + If a callable is provided, such as the default `np.median` then it + is expected to be called center(a). The axis argument will be applied + via np.apply_over_axes. Otherwise, provide a float. + + Returns + ------- + The normalized interquartile range + """ + a = array_like(a, 'a', ndim=None) + c = float_like(c, 'c') + + if a.size == 0: + return np.nan + else: + if callable(center) and a.size: + center = np.apply_over_axes(center, a, axis) + else: + center = 0.0 + quantiles = np.quantile(a - center, [0.25, 0.75], axis=axis) + return np.squeeze(np.diff(quantiles, axis=0) / c) + + class Huber(object): """ Huber's proposal 2 for estimating location and scale jointly.
{"golden_diff": "diff --git a/statsmodels/robust/scale.py b/statsmodels/robust/scale.py\n--- a/statsmodels/robust/scale.py\n+++ b/statsmodels/robust/scale.py\n@@ -49,6 +49,44 @@\n return np.median((np.abs(a-center)) / c, axis=axis)\n \n \n+def iqr(a, c=Gaussian.ppf(3/4) - Gaussian.ppf(1/4), axis=0, center=np.median):\n+ \"\"\"\n+ The normalized interquartile range along given axis of an array\n+\n+ Parameters\n+ ----------\n+ a : array_like\n+ Input array.\n+ c : float, optional\n+ The normalization constant, used to get consistent estimates of the\n+ standard deviation at the normal distribution. Defined as\n+ scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is\n+ approximately 1.349.\n+ axis : int, optional\n+ The default is 0. Can also be None.\n+ center : callable or float\n+ If a callable is provided, such as the default `np.median` then it\n+ is expected to be called center(a). The axis argument will be applied\n+ via np.apply_over_axes. Otherwise, provide a float.\n+\n+ Returns\n+ -------\n+ The normalized interquartile range\n+ \"\"\"\n+ a = array_like(a, 'a', ndim=None)\n+ c = float_like(c, 'c')\n+\n+ if a.size == 0:\n+ return np.nan\n+ else:\n+ if callable(center) and a.size:\n+ center = np.apply_over_axes(center, a, axis)\n+ else:\n+ center = 0.0\n+ quantiles = np.quantile(a - center, [0.25, 0.75], axis=axis)\n+ return np.squeeze(np.diff(quantiles, axis=0) / c)\n+\n+\n class Huber(object):\n \"\"\"\n Huber's proposal 2 for estimating location and scale jointly.\n", "issue": "ENH/BUG iqr is not scaled for normal distribution\nhttps://www.statsmodels.org/stable/_modules/statsmodels/tools/eval_measures.html#iqr\r\n\r\ncomputes raw IQR, I thought we have scaling to normal distribution as in `robust.scale.mad`\r\n(iqr is now also available in scipy)\r\n\r\ncode search finds adjustment or usage like `iqr = (q75 - q25) / 1.349`\r\n\r\nI never remember: are mad and iqr scale for variance or standard deviation (sqrt or not)\r\n\r\nand there is a bug in axis handling !\r\n\n", "before_files": [{"content": "\"\"\"\nSupport and standalone functions for Robust Linear Models\n\nReferences\n----------\nPJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981.\n\nR Venables, B Ripley. 'Modern Applied Statistics in S'\n Springer, New York, 2002.\n\"\"\"\nimport numpy as np\nfrom scipy.stats import norm as Gaussian\nfrom . import norms\nfrom statsmodels.tools import tools\nfrom statsmodels.tools.validation import array_like, float_like\n\n\ndef mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median):\n # c \\approx .6745\n \"\"\"\n The Median Absolute Deviation along given axis of an array\n\n Parameters\n ----------\n a : array_like\n Input array.\n c : float, optional\n The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),\n which is approximately .6745.\n axis : int, optional\n The default is 0. Can also be None.\n center : callable or float\n If a callable is provided, such as the default `np.median` then it\n is expected to be called center(a). The axis argument will be applied\n via np.apply_over_axes. Otherwise, provide a float.\n\n Returns\n -------\n mad : float\n `mad` = median(abs(`a` - center))/`c`\n \"\"\"\n a = array_like(a, 'a', ndim=None)\n c = float_like(c, 'c')\n if callable(center) and a.size:\n center = np.apply_over_axes(center, a, axis)\n else:\n center = 0.0\n\n return np.median((np.abs(a-center)) / c, axis=axis)\n\n\nclass Huber(object):\n \"\"\"\n Huber's proposal 2 for estimating location and scale jointly.\n\n Parameters\n ----------\n c : float, optional\n Threshold used in threshold for chi=psi**2. Default value is 1.5.\n tol : float, optional\n Tolerance for convergence. Default value is 1e-08.\n maxiter : int, optional0\n Maximum number of iterations. Default value is 30.\n norm : statsmodels.robust.norms.RobustNorm, optional\n A robust norm used in M estimator of location. If None,\n the location estimator defaults to a one-step\n fixed point version of the M-estimator using Huber's T.\n\n call\n Return joint estimates of Huber's scale and location.\n\n Examples\n --------\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> chem_data = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,\n ... 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7,\n ... 3.77, 5.28, 28.95])\n >>> sm.robust.scale.huber(chem_data)\n (array(3.2054980819923693), array(0.67365260010478967))\n \"\"\"\n\n def __init__(self, c=1.5, tol=1.0e-08, maxiter=30, norm=None):\n self.c = c\n self.maxiter = maxiter\n self.tol = tol\n self.norm = norm\n tmp = 2 * Gaussian.cdf(c) - 1\n self.gamma = tmp + c**2 * (1 - tmp) - 2 * c * Gaussian.pdf(c)\n\n def __call__(self, a, mu=None, initscale=None, axis=0):\n \"\"\"\n Compute Huber's proposal 2 estimate of scale, using an optional\n initial value of scale and an optional estimate of mu. If mu\n is supplied, it is not reestimated.\n\n Parameters\n ----------\n a : ndarray\n 1d array\n mu : float or None, optional\n If the location mu is supplied then it is not reestimated.\n Default is None, which means that it is estimated.\n initscale : float or None, optional\n A first guess on scale. If initscale is None then the standardized\n median absolute deviation of a is used.\n\n Notes\n -----\n `Huber` minimizes the function\n\n sum(psi((a[i]-mu)/scale)**2)\n\n as a function of (mu, scale), where\n\n psi(x) = np.clip(x, -self.c, self.c)\n \"\"\"\n a = np.asarray(a)\n if mu is None:\n n = a.shape[0] - 1\n mu = np.median(a, axis=axis)\n est_mu = True\n else:\n n = a.shape[0]\n mu = mu\n est_mu = False\n\n if initscale is None:\n scale = mad(a, axis=axis)\n else:\n scale = initscale\n scale = tools.unsqueeze(scale, axis, a.shape)\n mu = tools.unsqueeze(mu, axis, a.shape)\n return self._estimate_both(a, scale, mu, axis, est_mu, n)\n\n def _estimate_both(self, a, scale, mu, axis, est_mu, n):\n \"\"\"\n Estimate scale and location simultaneously with the following\n pseudo_loop:\n\n while not_converged:\n mu, scale = estimate_location(a, scale, mu), estimate_scale(a, scale, mu)\n\n where estimate_location is an M-estimator and estimate_scale implements\n the check used in Section 5.5 of Venables & Ripley\n \"\"\" # noqa:E501\n for _ in range(self.maxiter):\n # Estimate the mean along a given axis\n if est_mu:\n if self.norm is None:\n # This is a one-step fixed-point estimator\n # if self.norm == norms.HuberT\n # It should be faster than using norms.HuberT\n nmu = np.clip(a, mu-self.c*scale,\n mu+self.c*scale).sum(axis) / a.shape[axis]\n else:\n nmu = norms.estimate_location(a, scale, self.norm, axis,\n mu, self.maxiter, self.tol)\n else:\n # Effectively, do nothing\n nmu = mu.squeeze()\n nmu = tools.unsqueeze(nmu, axis, a.shape)\n\n subset = np.less_equal(np.abs((a - mu)/scale), self.c)\n card = subset.sum(axis)\n\n scale_num = np.sum(subset * (a - nmu)**2, axis)\n scale_denom = (n * self.gamma - (a.shape[axis] - card) * self.c**2)\n nscale = np.sqrt(scale_num / scale_denom)\n nscale = tools.unsqueeze(nscale, axis, a.shape)\n\n test1 = np.alltrue(np.less_equal(np.abs(scale - nscale),\n nscale * self.tol))\n test2 = np.alltrue(\n np.less_equal(np.abs(mu - nmu), nscale * self.tol))\n if not (test1 and test2):\n mu = nmu\n scale = nscale\n else:\n return nmu.squeeze(), nscale.squeeze()\n raise ValueError('joint estimation of location and scale failed '\n 'to converge in %d iterations' % self.maxiter)\n\n\nhuber = Huber()\n\n\nclass HuberScale(object):\n r\"\"\"\n Huber's scaling for fitting robust linear models.\n\n Huber's scale is intended to be used as the scale estimate in the\n IRLS algorithm and is slightly different than the `Huber` class.\n\n Parameters\n ----------\n d : float, optional\n d is the tuning constant for Huber's scale. Default is 2.5\n tol : float, optional\n The convergence tolerance\n maxiter : int, optiona\n The maximum number of iterations. The default is 30.\n\n Methods\n -------\n call\n Return's Huber's scale computed as below\n\n Notes\n --------\n Huber's scale is the iterative solution to\n\n scale_(i+1)**2 = 1/(n*h)*sum(chi(r/sigma_i)*sigma_i**2\n\n where the Huber function is\n\n chi(x) = (x**2)/2 for \\|x\\| < d\n chi(x) = (d**2)/2 for \\|x\\| >= d\n\n and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\\\n scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2)\n \"\"\"\n def __init__(self, d=2.5, tol=1e-08, maxiter=30):\n self.d = d\n self.tol = tol\n self.maxiter = maxiter\n\n def __call__(self, df_resid, nobs, resid):\n h = df_resid / nobs * (\n self.d ** 2\n + (1 - self.d ** 2) * Gaussian.cdf(self.d)\n - .5 - self.d / (np.sqrt(2 * np.pi)) * np.exp(-.5 * self.d ** 2)\n )\n s = mad(resid)\n\n def subset(x):\n return np.less(np.abs(resid / x), self.d)\n\n def chi(s):\n return subset(s) * (resid / s) ** 2 / 2 + (1 - subset(s)) * \\\n (self.d ** 2 / 2)\n\n scalehist = [np.inf, s]\n niter = 1\n while (np.abs(scalehist[niter - 1] - scalehist[niter]) > self.tol\n and niter < self.maxiter):\n nscale = np.sqrt(1 / (nobs * h) * np.sum(chi(scalehist[-1])) *\n scalehist[-1] ** 2)\n scalehist.append(nscale)\n niter += 1\n # TODO: raise on convergence failure?\n return scalehist[-1]\n\n\nhubers_scale = HuberScale()\n", "path": "statsmodels/robust/scale.py"}], "after_files": [{"content": "\"\"\"\nSupport and standalone functions for Robust Linear Models\n\nReferences\n----------\nPJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981.\n\nR Venables, B Ripley. 'Modern Applied Statistics in S'\n Springer, New York, 2002.\n\"\"\"\nimport numpy as np\nfrom scipy.stats import norm as Gaussian\nfrom . import norms\nfrom statsmodels.tools import tools\nfrom statsmodels.tools.validation import array_like, float_like\n\n\ndef mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median):\n # c \\approx .6745\n \"\"\"\n The Median Absolute Deviation along given axis of an array\n\n Parameters\n ----------\n a : array_like\n Input array.\n c : float, optional\n The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),\n which is approximately .6745.\n axis : int, optional\n The default is 0. Can also be None.\n center : callable or float\n If a callable is provided, such as the default `np.median` then it\n is expected to be called center(a). The axis argument will be applied\n via np.apply_over_axes. Otherwise, provide a float.\n\n Returns\n -------\n mad : float\n `mad` = median(abs(`a` - center))/`c`\n \"\"\"\n a = array_like(a, 'a', ndim=None)\n c = float_like(c, 'c')\n if callable(center) and a.size:\n center = np.apply_over_axes(center, a, axis)\n else:\n center = 0.0\n\n return np.median((np.abs(a-center)) / c, axis=axis)\n\n\ndef iqr(a, c=Gaussian.ppf(3/4) - Gaussian.ppf(1/4), axis=0, center=np.median):\n \"\"\"\n The normalized interquartile range along given axis of an array\n\n Parameters\n ----------\n a : array_like\n Input array.\n c : float, optional\n The normalization constant, used to get consistent estimates of the\n standard deviation at the normal distribution. Defined as\n scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is\n approximately 1.349.\n axis : int, optional\n The default is 0. Can also be None.\n center : callable or float\n If a callable is provided, such as the default `np.median` then it\n is expected to be called center(a). The axis argument will be applied\n via np.apply_over_axes. Otherwise, provide a float.\n\n Returns\n -------\n The normalized interquartile range\n \"\"\"\n a = array_like(a, 'a', ndim=None)\n c = float_like(c, 'c')\n\n if a.size == 0:\n return np.nan\n else:\n if callable(center) and a.size:\n center = np.apply_over_axes(center, a, axis)\n else:\n center = 0.0\n quantiles = np.quantile(a - center, [0.25, 0.75], axis=axis)\n return np.squeeze(np.diff(quantiles, axis=0) / c)\n\n\nclass Huber(object):\n \"\"\"\n Huber's proposal 2 for estimating location and scale jointly.\n\n Parameters\n ----------\n c : float, optional\n Threshold used in threshold for chi=psi**2. Default value is 1.5.\n tol : float, optional\n Tolerance for convergence. Default value is 1e-08.\n maxiter : int, optional0\n Maximum number of iterations. Default value is 30.\n norm : statsmodels.robust.norms.RobustNorm, optional\n A robust norm used in M estimator of location. If None,\n the location estimator defaults to a one-step\n fixed point version of the M-estimator using Huber's T.\n\n call\n Return joint estimates of Huber's scale and location.\n\n Examples\n --------\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> chem_data = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,\n ... 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7,\n ... 3.77, 5.28, 28.95])\n >>> sm.robust.scale.huber(chem_data)\n (array(3.2054980819923693), array(0.67365260010478967))\n \"\"\"\n\n def __init__(self, c=1.5, tol=1.0e-08, maxiter=30, norm=None):\n self.c = c\n self.maxiter = maxiter\n self.tol = tol\n self.norm = norm\n tmp = 2 * Gaussian.cdf(c) - 1\n self.gamma = tmp + c**2 * (1 - tmp) - 2 * c * Gaussian.pdf(c)\n\n def __call__(self, a, mu=None, initscale=None, axis=0):\n \"\"\"\n Compute Huber's proposal 2 estimate of scale, using an optional\n initial value of scale and an optional estimate of mu. If mu\n is supplied, it is not reestimated.\n\n Parameters\n ----------\n a : ndarray\n 1d array\n mu : float or None, optional\n If the location mu is supplied then it is not reestimated.\n Default is None, which means that it is estimated.\n initscale : float or None, optional\n A first guess on scale. If initscale is None then the standardized\n median absolute deviation of a is used.\n\n Notes\n -----\n `Huber` minimizes the function\n\n sum(psi((a[i]-mu)/scale)**2)\n\n as a function of (mu, scale), where\n\n psi(x) = np.clip(x, -self.c, self.c)\n \"\"\"\n a = np.asarray(a)\n if mu is None:\n n = a.shape[0] - 1\n mu = np.median(a, axis=axis)\n est_mu = True\n else:\n n = a.shape[0]\n mu = mu\n est_mu = False\n\n if initscale is None:\n scale = mad(a, axis=axis)\n else:\n scale = initscale\n scale = tools.unsqueeze(scale, axis, a.shape)\n mu = tools.unsqueeze(mu, axis, a.shape)\n return self._estimate_both(a, scale, mu, axis, est_mu, n)\n\n def _estimate_both(self, a, scale, mu, axis, est_mu, n):\n \"\"\"\n Estimate scale and location simultaneously with the following\n pseudo_loop:\n\n while not_converged:\n mu, scale = estimate_location(a, scale, mu), estimate_scale(a, scale, mu)\n\n where estimate_location is an M-estimator and estimate_scale implements\n the check used in Section 5.5 of Venables & Ripley\n \"\"\" # noqa:E501\n for _ in range(self.maxiter):\n # Estimate the mean along a given axis\n if est_mu:\n if self.norm is None:\n # This is a one-step fixed-point estimator\n # if self.norm == norms.HuberT\n # It should be faster than using norms.HuberT\n nmu = np.clip(a, mu-self.c*scale,\n mu+self.c*scale).sum(axis) / a.shape[axis]\n else:\n nmu = norms.estimate_location(a, scale, self.norm, axis,\n mu, self.maxiter, self.tol)\n else:\n # Effectively, do nothing\n nmu = mu.squeeze()\n nmu = tools.unsqueeze(nmu, axis, a.shape)\n\n subset = np.less_equal(np.abs((a - mu)/scale), self.c)\n card = subset.sum(axis)\n\n scale_num = np.sum(subset * (a - nmu)**2, axis)\n scale_denom = (n * self.gamma - (a.shape[axis] - card) * self.c**2)\n nscale = np.sqrt(scale_num / scale_denom)\n nscale = tools.unsqueeze(nscale, axis, a.shape)\n\n test1 = np.alltrue(np.less_equal(np.abs(scale - nscale),\n nscale * self.tol))\n test2 = np.alltrue(\n np.less_equal(np.abs(mu - nmu), nscale * self.tol))\n if not (test1 and test2):\n mu = nmu\n scale = nscale\n else:\n return nmu.squeeze(), nscale.squeeze()\n raise ValueError('joint estimation of location and scale failed '\n 'to converge in %d iterations' % self.maxiter)\n\n\nhuber = Huber()\n\n\nclass HuberScale(object):\n r\"\"\"\n Huber's scaling for fitting robust linear models.\n\n Huber's scale is intended to be used as the scale estimate in the\n IRLS algorithm and is slightly different than the `Huber` class.\n\n Parameters\n ----------\n d : float, optional\n d is the tuning constant for Huber's scale. Default is 2.5\n tol : float, optional\n The convergence tolerance\n maxiter : int, optiona\n The maximum number of iterations. The default is 30.\n\n Methods\n -------\n call\n Return's Huber's scale computed as below\n\n Notes\n --------\n Huber's scale is the iterative solution to\n\n scale_(i+1)**2 = 1/(n*h)*sum(chi(r/sigma_i)*sigma_i**2\n\n where the Huber function is\n\n chi(x) = (x**2)/2 for \\|x\\| < d\n chi(x) = (d**2)/2 for \\|x\\| >= d\n\n and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\\\n scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2)\n \"\"\"\n def __init__(self, d=2.5, tol=1e-08, maxiter=30):\n self.d = d\n self.tol = tol\n self.maxiter = maxiter\n\n def __call__(self, df_resid, nobs, resid):\n h = df_resid / nobs * (\n self.d ** 2\n + (1 - self.d ** 2) * Gaussian.cdf(self.d)\n - .5 - self.d / (np.sqrt(2 * np.pi)) * np.exp(-.5 * self.d ** 2)\n )\n s = mad(resid)\n\n def subset(x):\n return np.less(np.abs(resid / x), self.d)\n\n def chi(s):\n return subset(s) * (resid / s) ** 2 / 2 + (1 - subset(s)) * \\\n (self.d ** 2 / 2)\n\n scalehist = [np.inf, s]\n niter = 1\n while (np.abs(scalehist[niter - 1] - scalehist[niter]) > self.tol\n and niter < self.maxiter):\n nscale = np.sqrt(1 / (nobs * h) * np.sum(chi(scalehist[-1])) *\n scalehist[-1] ** 2)\n scalehist.append(nscale)\n niter += 1\n # TODO: raise on convergence failure?\n return scalehist[-1]\n\n\nhubers_scale = HuberScale()\n", "path": "statsmodels/robust/scale.py"}]}
3,404
470
gh_patches_debug_17015
rasdani/github-patches
git_diff
Azure__azure-cli-extensions-3621
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- storage blob directory download throwing exception "Could not deserialize key data. The data may be in an incorrect format, or it may be encrypted with an unsupported algorithm" ### Extension name (the extension in question) Storage Preview, v0.7.3 ### Description of the issue (in as much detail as possible) storage blob directory download throwing exception "Could not deserialize key data. The data may be in an incorrect format, or it may be encrypted with an unsupported algorithm" From the exception seems like the token is not encrypted in RS256 and decoding is failing? ----- Detailed Log: CMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:24,677 | INFO | az_command_data_logger | command args: storage blob directory download -d {} -s {} -c {} --account-name {} --auth-mode {} --recursive -o {} --debug CMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,234 | INFO | az_command_data_logger | extension name: storage-preview CMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,234 | INFO | az_command_data_logger | extension version: 0.7.3 CMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,662 | ERROR | az_command_data_logger | The command failed with an unexpected error. Here is the traceback: CMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,667 | ERROR | az_command_data_logger | Could not deserialize key data. The data may be in an incorrect format or it may be encrypted with an unsupported algorithm. Traceback (most recent call last): File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\jwt/algorithms.py", line 256, in prepare_key File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\cryptography/hazmat/primitives/serialization/base.py", line 18, in load_pem_private_key File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\cryptography/hazmat/backends/openssl/backend.py", line 1244, in load_pem_private_key File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\cryptography/hazmat/backends/openssl/backend.py", line 1475, in _load_key File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\cryptography/hazmat/backends/openssl/backend.py", line 1517, in _handle_key_loading_error ValueError: Could not deserialize key data. The data may be in an incorrect format or it may be encrypted with an unsupported algorithm. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\knack/cli.py", line 231, in invoke File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\azure/cli/core/commands/__init__.py", line 657, in execute File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\azure/cli/core/commands/__init__.py", line 720, in _run_jobs_serially File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\azure/cli/core/commands/__init__.py", line 712, in _run_job File "C:\Users\someuserwhomyoudontknow\.azure\cliextensions\storage-preview\azext_storage_preview\__init__.py", line 252, in new_handler handler(ex) File "C:\Users\someuserwhomyoudontknow\.azure\cliextensions\storage-preview\azext_storage_preview\__init__.py", line 194, in handler raise ex File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\azure/cli/core/commands/__init__.py", line 691, in _run_job File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\azure/cli/core/commands/__init__.py", line 328, in __call__ File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\azure/cli/core/commands/command_operation.py", line 121, in handler File "C:\Users\someuserwhomyoudontknow\.azure\cliextensions\storage-preview\azext_storage_preview\operations\azcopy.py", line 24, in storage_blob_download azcopy = _azcopy_blob_client(cmd, client) File "C:\Users\someuserwhomyoudontknow\.azure\cliextensions\storage-preview\azext_storage_preview\operations\azcopy.py", line 65, in _azcopy_blob_client return AzCopy(creds=blob_client_auth_for_azcopy(cmd, client)) File "C:\Users\someuserwhomyoudontknow\.azure\cliextensions\storage-preview\azext_storage_preview\azcopy\util.py", line 81, in blob_client_auth_for_azcopy token_info = _unserialize_non_msi_token_payload(token_info) File "C:\Users\someuserwhomyoudontknow\.azure\cliextensions\storage-preview\azext_storage_preview\azcopy\util.py", line 105, in _unserialize_non_msi_token_payload decode = jwt.decode(token_info['accessToken'], verify=False, algorithms=['RS256']) File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\jwt/api_jwt.py", line 119, in decode File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\jwt/api_jwt.py", line 90, in decode_complete File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\jwt/api_jws.py", line 149, in decode_complete File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\jwt/api_jws.py", line 233, in _verify_signature File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\jwt/algorithms.py", line 258, in prepare_key File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\cryptography/hazmat/primitives/serialization/base.py", line 23, in load_pem_public_key File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\cryptography/hazmat/backends/openssl/backend.py", line 1274, in load_pem_public_key File "D:\a\1\s\build_scripts\windows\artifacts\cli\Lib\site-packages\cryptography/hazmat/backends/openssl/backend.py", line 1517, in _handle_key_loading_error ValueError: Could not deserialize key data. The data may be in an incorrect format or it may be encrypted with an unsupported algorithm. CMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,669 | INFO | az_command_data_logger | exit code: 1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/storage-preview/setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # -------------------------------------------------------------------------------------------- 4 # Copyright (c) Microsoft Corporation. All rights reserved. 5 # Licensed under the MIT License. See License.txt in the project root for license information. 6 # -------------------------------------------------------------------------------------------- 7 8 from codecs import open 9 from setuptools import setup, find_packages 10 11 VERSION = "0.7.3" 12 13 CLASSIFIERS = [ 14 'Development Status :: 4 - Beta', 15 'Intended Audience :: Developers', 16 'Intended Audience :: System Administrators', 17 'Programming Language :: Python', 18 'Programming Language :: Python :: 2', 19 'Programming Language :: Python :: 2.7', 20 'Programming Language :: Python :: 3', 21 'Programming Language :: Python :: 3.4', 22 'Programming Language :: Python :: 3.5', 23 'Programming Language :: Python :: 3.6', 24 'License :: OSI Approved :: MIT License', 25 ] 26 27 DEPENDENCIES = ['azure-core'] 28 29 setup( 30 name='storage-preview', 31 version=VERSION, 32 description='Provides a preview for upcoming storage features.', 33 long_description='An Azure CLI Extension for storage preview features. This will replace the full storage module.', 34 license='MIT', 35 author='Microsoft Corporation', 36 author_email='[email protected]', 37 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/storage-preview', 38 classifiers=CLASSIFIERS, 39 package_data={ 40 'azext_storage_preview': ['azext_metadata.json', 'azcopy/*/*'] 41 }, 42 packages=find_packages(exclude=["tests"]), 43 install_requires=DEPENDENCIES 44 ) 45 ``` Path: `src/storage-preview/azext_storage_preview/azcopy/util.py` Content: ``` 1 # -------------------------------------------------------------------------------------------- 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License. See License.txt in the project root for license information. 4 # -------------------------------------------------------------------------------------------- 5 6 7 import os 8 import json 9 import platform 10 import subprocess 11 import datetime 12 from six.moves.urllib.parse import urlparse 13 from azure.cli.core._profile import Profile 14 from knack.log import get_logger 15 16 logger = get_logger(__name__) 17 18 19 STORAGE_RESOURCE_ENDPOINT = "https://storage.azure.com" 20 SERVICES = {'blob', 'file'} 21 AZCOPY_VERSION = '10.5.0' 22 23 24 class AzCopy(object): 25 system_executable_path = { 26 'Darwin': ['azcopy_darwin_amd64_{}'.format(AZCOPY_VERSION), 'azcopy'], 27 'Linux': ['azcopy_linux_amd64_{}'.format(AZCOPY_VERSION), 'azcopy'], 28 'Windows': ['azcopy_windows_amd64_{}'.format(AZCOPY_VERSION), 'azcopy.exe'] 29 } 30 31 def __init__(self, creds=None): 32 self.system = platform.system() 33 curr_path = os.path.dirname(os.path.realpath(__file__)) 34 self.executable = os.path.join(curr_path, *AzCopy.system_executable_path[self.system]) 35 self.creds = creds 36 37 def run_command(self, args): 38 command = [self.executable] + args 39 logger.warning("Azcopy command: %s", command) 40 env_kwargs = {} 41 if self.creds and self.creds.token_info: 42 env_kwargs = {'AZCOPY_OAUTH_TOKEN_INFO': json.dumps(self.creds.token_info)} 43 subprocess.call(command, env=dict(os.environ, **env_kwargs)) 44 45 def copy(self, source, destination, flags=None): 46 flags = flags or [] 47 self.run_command(['copy', source, destination] + flags) 48 49 def remove(self, target, flags=None): 50 flags = flags or [] 51 self.run_command(['remove', target] + flags) 52 53 def sync(self, source, destination, flags=None): 54 flags = flags or [] 55 self.run_command(['sync', source, destination] + flags) 56 57 58 class AzCopyCredentials(object): # pylint: disable=too-few-public-methods 59 def __init__(self, sas_token=None, token_info=None): 60 self.sas_token = sas_token 61 self.token_info = token_info 62 63 64 def login_auth_for_azcopy(cmd): 65 token_info = Profile(cli_ctx=cmd.cli_ctx).get_raw_token(resource=STORAGE_RESOURCE_ENDPOINT)[0][2] 66 try: 67 token_info = _unserialize_non_msi_token_payload(token_info) 68 except KeyError: # unserialized MSI token payload 69 raise Exception('MSI auth not yet supported.') 70 return AzCopyCredentials(token_info=token_info) 71 72 73 def blob_client_auth_for_azcopy(cmd, blob_client): 74 azcopy_creds = storage_client_auth_for_azcopy(cmd, blob_client, 'blob') 75 if azcopy_creds is not None: 76 return azcopy_creds 77 78 # oauth mode 79 token_info = Profile(cli_ctx=cmd.cli_ctx).get_raw_token(resource=STORAGE_RESOURCE_ENDPOINT)[0][2] 80 try: 81 token_info = _unserialize_non_msi_token_payload(token_info) 82 except KeyError: # unserialized MSI token payload 83 raise Exception('MSI auth not yet supported.') 84 return AzCopyCredentials(token_info=token_info) 85 86 87 def storage_client_auth_for_azcopy(cmd, client, service): 88 if service not in SERVICES: 89 raise Exception('{} not one of: {}'.format(service, str(SERVICES))) 90 91 if client.sas_token: 92 return AzCopyCredentials(sas_token=client.sas_token) 93 94 # if account key provided, generate a sas token 95 if client.account_key: 96 sas_token = _generate_sas_token(cmd, client.account_name, client.account_key, service) 97 return AzCopyCredentials(sas_token=sas_token) 98 return None 99 100 101 def _unserialize_non_msi_token_payload(token_info): 102 import jwt # pylint: disable=import-error 103 104 parsed_authority = urlparse(token_info['_authority']) 105 decode = jwt.decode(token_info['accessToken'], verify=False, algorithms=['RS256']) 106 return { 107 'access_token': token_info['accessToken'], 108 'refresh_token': token_info['refreshToken'], 109 'expires_in': str(token_info['expiresIn']), 110 'not_before': str(decode['nbf']), 111 'expires_on': str(int((datetime.datetime.strptime( 112 token_info['expiresOn'], "%Y-%m-%d %H:%M:%S.%f")).timestamp())), 113 'resource': STORAGE_RESOURCE_ENDPOINT, 114 'token_type': token_info['tokenType'], 115 '_tenant': parsed_authority.path.strip('/'), 116 '_client_id': token_info['_clientId'], 117 '_ad_endpoint': '{uri.scheme}://{uri.netloc}'.format(uri=parsed_authority) 118 } 119 120 121 def _generate_sas_token(cmd, account_name, account_key, service): 122 from .._client_factory import cloud_storage_account_service_factory 123 from .._validators import resource_type_type, services_type 124 125 kwargs = { 126 'account_name': account_name, 127 'account_key': account_key 128 } 129 cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs) 130 t_account_permissions = cmd.loader.get_sdk('common.models#AccountPermissions') 131 132 return cloud_storage_client.generate_shared_access_signature( 133 services_type(cmd.loader)(service[0]), 134 resource_type_type(cmd.loader)('sco'), 135 t_account_permissions(_str='rwdlacup'), 136 datetime.datetime.utcnow() + datetime.timedelta(days=1) 137 ) 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/storage-preview/azext_storage_preview/azcopy/util.py b/src/storage-preview/azext_storage_preview/azcopy/util.py --- a/src/storage-preview/azext_storage_preview/azcopy/util.py +++ b/src/storage-preview/azext_storage_preview/azcopy/util.py @@ -102,7 +102,7 @@ import jwt # pylint: disable=import-error parsed_authority = urlparse(token_info['_authority']) - decode = jwt.decode(token_info['accessToken'], verify=False, algorithms=['RS256']) + decode = jwt.decode(token_info['accessToken'], algorithms=['RS256'], options={"verify_signature": False}) return { 'access_token': token_info['accessToken'], 'refresh_token': token_info['refreshToken'], diff --git a/src/storage-preview/setup.py b/src/storage-preview/setup.py --- a/src/storage-preview/setup.py +++ b/src/storage-preview/setup.py @@ -8,7 +8,7 @@ from codecs import open from setuptools import setup, find_packages -VERSION = "0.7.3" +VERSION = "0.7.4" CLASSIFIERS = [ 'Development Status :: 4 - Beta',
{"golden_diff": "diff --git a/src/storage-preview/azext_storage_preview/azcopy/util.py b/src/storage-preview/azext_storage_preview/azcopy/util.py\n--- a/src/storage-preview/azext_storage_preview/azcopy/util.py\n+++ b/src/storage-preview/azext_storage_preview/azcopy/util.py\n@@ -102,7 +102,7 @@\n import jwt # pylint: disable=import-error\n \n parsed_authority = urlparse(token_info['_authority'])\n- decode = jwt.decode(token_info['accessToken'], verify=False, algorithms=['RS256'])\n+ decode = jwt.decode(token_info['accessToken'], algorithms=['RS256'], options={\"verify_signature\": False})\n return {\n 'access_token': token_info['accessToken'],\n 'refresh_token': token_info['refreshToken'],\ndiff --git a/src/storage-preview/setup.py b/src/storage-preview/setup.py\n--- a/src/storage-preview/setup.py\n+++ b/src/storage-preview/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.7.3\"\n+VERSION = \"0.7.4\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "storage blob directory download throwing exception \"Could not deserialize key data. The data may be in an incorrect format, or it may be encrypted with an unsupported algorithm\"\n### Extension name (the extension in question)\r\nStorage Preview, v0.7.3\r\n\r\n### Description of the issue (in as much detail as possible)\r\nstorage blob directory download throwing exception \"Could not deserialize key data. The data may be in an incorrect format, or it may be encrypted with an unsupported algorithm\"\r\n\r\nFrom the exception seems like the token is not encrypted in RS256 and decoding is failing?\r\n\r\n-----\r\nDetailed Log:\r\n\r\nCMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:24,677 | INFO | az_command_data_logger | command args: storage blob directory download -d {} -s {} -c {} --account-name {} --auth-mode {} --recursive -o {} --debug\r\nCMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,234 | INFO | az_command_data_logger | extension name: storage-preview\r\nCMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,234 | INFO | az_command_data_logger | extension version: 0.7.3\r\nCMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,662 | ERROR | az_command_data_logger | The command failed with an unexpected error. Here is the traceback:\r\nCMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,667 | ERROR | az_command_data_logger | Could not deserialize key data. The data may be in an incorrect format or it may be encrypted with an unsupported algorithm.\r\nTraceback (most recent call last):\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\jwt/algorithms.py\", line 256, in prepare_key\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\cryptography/hazmat/primitives/serialization/base.py\", line 18, in load_pem_private_key\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\cryptography/hazmat/backends/openssl/backend.py\", line 1244, in load_pem_private_key\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\cryptography/hazmat/backends/openssl/backend.py\", line 1475, in _load_key\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\cryptography/hazmat/backends/openssl/backend.py\", line 1517, in _handle_key_loading_error\r\nValueError: Could not deserialize key data. The data may be in an incorrect format or it may be encrypted with an unsupported algorithm.\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\knack/cli.py\", line 231, in invoke\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\azure/cli/core/commands/__init__.py\", line 657, in execute\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\azure/cli/core/commands/__init__.py\", line 720, in _run_jobs_serially\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\azure/cli/core/commands/__init__.py\", line 712, in _run_job\r\n File \"C:\\Users\\someuserwhomyoudontknow\\.azure\\cliextensions\\storage-preview\\azext_storage_preview\\__init__.py\", line 252, in new_handler\r\n handler(ex)\r\n File \"C:\\Users\\someuserwhomyoudontknow\\.azure\\cliextensions\\storage-preview\\azext_storage_preview\\__init__.py\", line 194, in handler\r\n raise ex\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\azure/cli/core/commands/__init__.py\", line 691, in _run_job\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\azure/cli/core/commands/__init__.py\", line 328, in __call__\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\azure/cli/core/commands/command_operation.py\", line 121, in handler\r\n File \"C:\\Users\\someuserwhomyoudontknow\\.azure\\cliextensions\\storage-preview\\azext_storage_preview\\operations\\azcopy.py\", line 24, in storage_blob_download\r\n azcopy = _azcopy_blob_client(cmd, client)\r\n File \"C:\\Users\\someuserwhomyoudontknow\\.azure\\cliextensions\\storage-preview\\azext_storage_preview\\operations\\azcopy.py\", line 65, in _azcopy_blob_client\r\n return AzCopy(creds=blob_client_auth_for_azcopy(cmd, client))\r\n File \"C:\\Users\\someuserwhomyoudontknow\\.azure\\cliextensions\\storage-preview\\azext_storage_preview\\azcopy\\util.py\", line 81, in blob_client_auth_for_azcopy\r\n token_info = _unserialize_non_msi_token_payload(token_info)\r\n File \"C:\\Users\\someuserwhomyoudontknow\\.azure\\cliextensions\\storage-preview\\azext_storage_preview\\azcopy\\util.py\", line 105, in _unserialize_non_msi_token_payload\r\n decode = jwt.decode(token_info['accessToken'], verify=False, algorithms=['RS256'])\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\jwt/api_jwt.py\", line 119, in decode\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\jwt/api_jwt.py\", line 90, in decode_complete\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\jwt/api_jws.py\", line 149, in decode_complete\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\jwt/api_jws.py\", line 233, in _verify_signature\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\jwt/algorithms.py\", line 258, in prepare_key\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\cryptography/hazmat/primitives/serialization/base.py\", line 23, in load_pem_public_key\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\cryptography/hazmat/backends/openssl/backend.py\", line 1274, in load_pem_public_key\r\n File \"D:\\a\\1\\s\\build_scripts\\windows\\artifacts\\cli\\Lib\\site-packages\\cryptography/hazmat/backends/openssl/backend.py\", line 1517, in _handle_key_loading_error\r\nValueError: Could not deserialize key data. The data may be in an incorrect format or it may be encrypted with an unsupported algorithm.\r\nCMD-LOG-LINE-BEGIN 5728 | 2021-07-09 16:36:25,669 | INFO | az_command_data_logger | exit code: 1\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.7.3\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = ['azure-core']\n\nsetup(\n name='storage-preview',\n version=VERSION,\n description='Provides a preview for upcoming storage features.',\n long_description='An Azure CLI Extension for storage preview features. This will replace the full storage module.',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/storage-preview',\n classifiers=CLASSIFIERS,\n package_data={\n 'azext_storage_preview': ['azext_metadata.json', 'azcopy/*/*']\n },\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/storage-preview/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nimport os\nimport json\nimport platform\nimport subprocess\nimport datetime\nfrom six.moves.urllib.parse import urlparse\nfrom azure.cli.core._profile import Profile\nfrom knack.log import get_logger\n\nlogger = get_logger(__name__)\n\n\nSTORAGE_RESOURCE_ENDPOINT = \"https://storage.azure.com\"\nSERVICES = {'blob', 'file'}\nAZCOPY_VERSION = '10.5.0'\n\n\nclass AzCopy(object):\n system_executable_path = {\n 'Darwin': ['azcopy_darwin_amd64_{}'.format(AZCOPY_VERSION), 'azcopy'],\n 'Linux': ['azcopy_linux_amd64_{}'.format(AZCOPY_VERSION), 'azcopy'],\n 'Windows': ['azcopy_windows_amd64_{}'.format(AZCOPY_VERSION), 'azcopy.exe']\n }\n\n def __init__(self, creds=None):\n self.system = platform.system()\n curr_path = os.path.dirname(os.path.realpath(__file__))\n self.executable = os.path.join(curr_path, *AzCopy.system_executable_path[self.system])\n self.creds = creds\n\n def run_command(self, args):\n command = [self.executable] + args\n logger.warning(\"Azcopy command: %s\", command)\n env_kwargs = {}\n if self.creds and self.creds.token_info:\n env_kwargs = {'AZCOPY_OAUTH_TOKEN_INFO': json.dumps(self.creds.token_info)}\n subprocess.call(command, env=dict(os.environ, **env_kwargs))\n\n def copy(self, source, destination, flags=None):\n flags = flags or []\n self.run_command(['copy', source, destination] + flags)\n\n def remove(self, target, flags=None):\n flags = flags or []\n self.run_command(['remove', target] + flags)\n\n def sync(self, source, destination, flags=None):\n flags = flags or []\n self.run_command(['sync', source, destination] + flags)\n\n\nclass AzCopyCredentials(object): # pylint: disable=too-few-public-methods\n def __init__(self, sas_token=None, token_info=None):\n self.sas_token = sas_token\n self.token_info = token_info\n\n\ndef login_auth_for_azcopy(cmd):\n token_info = Profile(cli_ctx=cmd.cli_ctx).get_raw_token(resource=STORAGE_RESOURCE_ENDPOINT)[0][2]\n try:\n token_info = _unserialize_non_msi_token_payload(token_info)\n except KeyError: # unserialized MSI token payload\n raise Exception('MSI auth not yet supported.')\n return AzCopyCredentials(token_info=token_info)\n\n\ndef blob_client_auth_for_azcopy(cmd, blob_client):\n azcopy_creds = storage_client_auth_for_azcopy(cmd, blob_client, 'blob')\n if azcopy_creds is not None:\n return azcopy_creds\n\n # oauth mode\n token_info = Profile(cli_ctx=cmd.cli_ctx).get_raw_token(resource=STORAGE_RESOURCE_ENDPOINT)[0][2]\n try:\n token_info = _unserialize_non_msi_token_payload(token_info)\n except KeyError: # unserialized MSI token payload\n raise Exception('MSI auth not yet supported.')\n return AzCopyCredentials(token_info=token_info)\n\n\ndef storage_client_auth_for_azcopy(cmd, client, service):\n if service not in SERVICES:\n raise Exception('{} not one of: {}'.format(service, str(SERVICES)))\n\n if client.sas_token:\n return AzCopyCredentials(sas_token=client.sas_token)\n\n # if account key provided, generate a sas token\n if client.account_key:\n sas_token = _generate_sas_token(cmd, client.account_name, client.account_key, service)\n return AzCopyCredentials(sas_token=sas_token)\n return None\n\n\ndef _unserialize_non_msi_token_payload(token_info):\n import jwt # pylint: disable=import-error\n\n parsed_authority = urlparse(token_info['_authority'])\n decode = jwt.decode(token_info['accessToken'], verify=False, algorithms=['RS256'])\n return {\n 'access_token': token_info['accessToken'],\n 'refresh_token': token_info['refreshToken'],\n 'expires_in': str(token_info['expiresIn']),\n 'not_before': str(decode['nbf']),\n 'expires_on': str(int((datetime.datetime.strptime(\n token_info['expiresOn'], \"%Y-%m-%d %H:%M:%S.%f\")).timestamp())),\n 'resource': STORAGE_RESOURCE_ENDPOINT,\n 'token_type': token_info['tokenType'],\n '_tenant': parsed_authority.path.strip('/'),\n '_client_id': token_info['_clientId'],\n '_ad_endpoint': '{uri.scheme}://{uri.netloc}'.format(uri=parsed_authority)\n }\n\n\ndef _generate_sas_token(cmd, account_name, account_key, service):\n from .._client_factory import cloud_storage_account_service_factory\n from .._validators import resource_type_type, services_type\n\n kwargs = {\n 'account_name': account_name,\n 'account_key': account_key\n }\n cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs)\n t_account_permissions = cmd.loader.get_sdk('common.models#AccountPermissions')\n\n return cloud_storage_client.generate_shared_access_signature(\n services_type(cmd.loader)(service[0]),\n resource_type_type(cmd.loader)('sco'),\n t_account_permissions(_str='rwdlacup'),\n datetime.datetime.utcnow() + datetime.timedelta(days=1)\n )\n", "path": "src/storage-preview/azext_storage_preview/azcopy/util.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.7.4\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = ['azure-core']\n\nsetup(\n name='storage-preview',\n version=VERSION,\n description='Provides a preview for upcoming storage features.',\n long_description='An Azure CLI Extension for storage preview features. This will replace the full storage module.',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/storage-preview',\n classifiers=CLASSIFIERS,\n package_data={\n 'azext_storage_preview': ['azext_metadata.json', 'azcopy/*/*']\n },\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/storage-preview/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nimport os\nimport json\nimport platform\nimport subprocess\nimport datetime\nfrom six.moves.urllib.parse import urlparse\nfrom azure.cli.core._profile import Profile\nfrom knack.log import get_logger\n\nlogger = get_logger(__name__)\n\n\nSTORAGE_RESOURCE_ENDPOINT = \"https://storage.azure.com\"\nSERVICES = {'blob', 'file'}\nAZCOPY_VERSION = '10.5.0'\n\n\nclass AzCopy(object):\n system_executable_path = {\n 'Darwin': ['azcopy_darwin_amd64_{}'.format(AZCOPY_VERSION), 'azcopy'],\n 'Linux': ['azcopy_linux_amd64_{}'.format(AZCOPY_VERSION), 'azcopy'],\n 'Windows': ['azcopy_windows_amd64_{}'.format(AZCOPY_VERSION), 'azcopy.exe']\n }\n\n def __init__(self, creds=None):\n self.system = platform.system()\n curr_path = os.path.dirname(os.path.realpath(__file__))\n self.executable = os.path.join(curr_path, *AzCopy.system_executable_path[self.system])\n self.creds = creds\n\n def run_command(self, args):\n command = [self.executable] + args\n logger.warning(\"Azcopy command: %s\", command)\n env_kwargs = {}\n if self.creds and self.creds.token_info:\n env_kwargs = {'AZCOPY_OAUTH_TOKEN_INFO': json.dumps(self.creds.token_info)}\n subprocess.call(command, env=dict(os.environ, **env_kwargs))\n\n def copy(self, source, destination, flags=None):\n flags = flags or []\n self.run_command(['copy', source, destination] + flags)\n\n def remove(self, target, flags=None):\n flags = flags or []\n self.run_command(['remove', target] + flags)\n\n def sync(self, source, destination, flags=None):\n flags = flags or []\n self.run_command(['sync', source, destination] + flags)\n\n\nclass AzCopyCredentials(object): # pylint: disable=too-few-public-methods\n def __init__(self, sas_token=None, token_info=None):\n self.sas_token = sas_token\n self.token_info = token_info\n\n\ndef login_auth_for_azcopy(cmd):\n token_info = Profile(cli_ctx=cmd.cli_ctx).get_raw_token(resource=STORAGE_RESOURCE_ENDPOINT)[0][2]\n try:\n token_info = _unserialize_non_msi_token_payload(token_info)\n except KeyError: # unserialized MSI token payload\n raise Exception('MSI auth not yet supported.')\n return AzCopyCredentials(token_info=token_info)\n\n\ndef blob_client_auth_for_azcopy(cmd, blob_client):\n azcopy_creds = storage_client_auth_for_azcopy(cmd, blob_client, 'blob')\n if azcopy_creds is not None:\n return azcopy_creds\n\n # oauth mode\n token_info = Profile(cli_ctx=cmd.cli_ctx).get_raw_token(resource=STORAGE_RESOURCE_ENDPOINT)[0][2]\n try:\n token_info = _unserialize_non_msi_token_payload(token_info)\n except KeyError: # unserialized MSI token payload\n raise Exception('MSI auth not yet supported.')\n return AzCopyCredentials(token_info=token_info)\n\n\ndef storage_client_auth_for_azcopy(cmd, client, service):\n if service not in SERVICES:\n raise Exception('{} not one of: {}'.format(service, str(SERVICES)))\n\n if client.sas_token:\n return AzCopyCredentials(sas_token=client.sas_token)\n\n # if account key provided, generate a sas token\n if client.account_key:\n sas_token = _generate_sas_token(cmd, client.account_name, client.account_key, service)\n return AzCopyCredentials(sas_token=sas_token)\n return None\n\n\ndef _unserialize_non_msi_token_payload(token_info):\n import jwt # pylint: disable=import-error\n\n parsed_authority = urlparse(token_info['_authority'])\n decode = jwt.decode(token_info['accessToken'], algorithms=['RS256'], options={\"verify_signature\": False})\n return {\n 'access_token': token_info['accessToken'],\n 'refresh_token': token_info['refreshToken'],\n 'expires_in': str(token_info['expiresIn']),\n 'not_before': str(decode['nbf']),\n 'expires_on': str(int((datetime.datetime.strptime(\n token_info['expiresOn'], \"%Y-%m-%d %H:%M:%S.%f\")).timestamp())),\n 'resource': STORAGE_RESOURCE_ENDPOINT,\n 'token_type': token_info['tokenType'],\n '_tenant': parsed_authority.path.strip('/'),\n '_client_id': token_info['_clientId'],\n '_ad_endpoint': '{uri.scheme}://{uri.netloc}'.format(uri=parsed_authority)\n }\n\n\ndef _generate_sas_token(cmd, account_name, account_key, service):\n from .._client_factory import cloud_storage_account_service_factory\n from .._validators import resource_type_type, services_type\n\n kwargs = {\n 'account_name': account_name,\n 'account_key': account_key\n }\n cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs)\n t_account_permissions = cmd.loader.get_sdk('common.models#AccountPermissions')\n\n return cloud_storage_client.generate_shared_access_signature(\n services_type(cmd.loader)(service[0]),\n resource_type_type(cmd.loader)('sco'),\n t_account_permissions(_str='rwdlacup'),\n datetime.datetime.utcnow() + datetime.timedelta(days=1)\n )\n", "path": "src/storage-preview/azext_storage_preview/azcopy/util.py"}]}
4,004
262
gh_patches_debug_61831
rasdani/github-patches
git_diff
pulp__pulpcore-3411
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 0077_move_remote_url_credentials.py fails on Remotes that have @ in path, not netloc **Version** 3.18.10 **Describe the bug** Migration 0077 fails when you have a remote that has an @ somewhere in the path ``` Applying core.0077_move_remote_url_credentials...Traceback (most recent call last): File "/usr/bin/pulpcore-manager", line 33, in <module> sys.exit(load_entry_point('pulpcore==3.18.10', 'console_scripts', 'pulpcore-manager')()) File "/usr/lib/python3.9/site-packages/pulpcore/app/manage.py", line 11, in manage execute_from_command_line(sys.argv) File "/usr/lib/python3.9/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line utility.execute() File "/usr/lib/python3.9/site-packages/django/core/management/__init__.py", line 413, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 354, in run_from_argv self.execute(*args, **cmd_options) File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 398, in execute output = self.handle(*args, **options) File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 89, in wrapped res = handle_func(*args, **kwargs) File "/usr/lib/python3.9/site-packages/django/core/management/commands/migrate.py", line 244, in handle post_migrate_state = executor.migrate( File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 117, in migrate state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial) File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 147, in _migrate_all_forwards state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial) File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 227, in apply_migration state = migration.apply(state, schema_editor) File "/usr/lib/python3.9/site-packages/django/db/migrations/migration.py", line 126, in apply operation.database_forwards(self.app_label, schema_editor, old_state, project_state) File "/usr/lib/python3.9/site-packages/django/db/migrations/operations/special.py", line 190, in database_forwards self.code(from_state.apps, schema_editor) File "/usr/lib/python3.9/site-packages/pulpcore/app/migrations/0077_move_remote_url_credentials.py", line 19, in move_remote_url_credentials _, url_split = url.netloc.rsplit("@", maxsplit=1) ValueError: not enough values to unpack (expected 2, got 1) ``` **To Reproduce** Steps to reproduce the behavior: * Have a remote `https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-8-x86_64/` * Try to migrate 0077 **Expected behavior** migration aplies **Additional context** https://community.theforeman.org/t/foreman-3-3-katello-4-5-upgrade-failed-pulpcore-manager-migrate-noinput/31088 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pulpcore/app/migrations/0077_move_remote_url_credentials.py` Content: ``` 1 # Generated by Django 3.2.6 on 2021-09-29 14:00 2 3 from urllib.parse import urlparse, urlunparse 4 5 from django.db import migrations 6 7 8 def move_remote_url_credentials(apps, schema_editor): 9 Remote = apps.get_model("core", "Remote") 10 11 for remote in Remote.objects.filter(url__contains="@").iterator(): 12 url = urlparse(remote.url) 13 14 if not remote.username: 15 remote.username = url.username 16 if not remote.password: 17 remote.password = url.password 18 19 _, url_split = url.netloc.rsplit("@", maxsplit=1) 20 remote.url = urlunparse(url._replace(netloc=url_split)) 21 remote.save() 22 23 24 class Migration(migrations.Migration): 25 26 dependencies = [ 27 ('core', '0076_remove_reserved_resource'), 28 ] 29 30 operations = [ 31 migrations.RunPython( 32 code=move_remote_url_credentials, 33 reverse_code=migrations.RunPython.noop, 34 elidable=True, 35 ) 36 ] 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pulpcore/app/migrations/0077_move_remote_url_credentials.py b/pulpcore/app/migrations/0077_move_remote_url_credentials.py --- a/pulpcore/app/migrations/0077_move_remote_url_credentials.py +++ b/pulpcore/app/migrations/0077_move_remote_url_credentials.py @@ -11,6 +11,11 @@ for remote in Remote.objects.filter(url__contains="@").iterator(): url = urlparse(remote.url) + if '@' not in url.netloc: + # URLs can have an @ in other places than the netloc, + # but those do not indicate credentials + continue + if not remote.username: remote.username = url.username if not remote.password:
{"golden_diff": "diff --git a/pulpcore/app/migrations/0077_move_remote_url_credentials.py b/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n--- a/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n+++ b/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n@@ -11,6 +11,11 @@\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n \n+ if '@' not in url.netloc:\n+ # URLs can have an @ in other places than the netloc,\n+ # but those do not indicate credentials\n+ continue\n+\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n", "issue": "0077_move_remote_url_credentials.py fails on Remotes that have @ in path, not netloc\n**Version**\r\n3.18.10\r\n\r\n**Describe the bug**\r\nMigration 0077 fails when you have a remote that has an @ somewhere in the path\r\n\r\n```\r\n Applying core.0077_move_remote_url_credentials...Traceback (most recent call last):\r\n File \"/usr/bin/pulpcore-manager\", line 33, in <module>\r\n sys.exit(load_entry_point('pulpcore==3.18.10', 'console_scripts', 'pulpcore-manager')())\r\n File \"/usr/lib/python3.9/site-packages/pulpcore/app/manage.py\", line 11, in manage\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/__init__.py\", line 413, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 354, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 398, in execute\r\n output = self.handle(*args, **options)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 89, in wrapped\r\n res = handle_func(*args, **kwargs)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/commands/migrate.py\", line 244, in handle\r\n post_migrate_state = executor.migrate(\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 117, in migrate\r\n state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 147, in _migrate_all_forwards\r\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 227, in apply_migration\r\n state = migration.apply(state, schema_editor)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/migration.py\", line 126, in apply\r\n operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/operations/special.py\", line 190, in database_forwards\r\n self.code(from_state.apps, schema_editor)\r\n File \"/usr/lib/python3.9/site-packages/pulpcore/app/migrations/0077_move_remote_url_credentials.py\", line 19, in move_remote_url_credentials\r\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\r\nValueError: not enough values to unpack (expected 2, got 1)\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n* Have a remote `https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-8-x86_64/`\r\n* Try to migrate 0077\r\n\r\n**Expected behavior**\r\nmigration aplies\r\n\r\n**Additional context**\r\nhttps://community.theforeman.org/t/foreman-3-3-katello-4-5-upgrade-failed-pulpcore-manager-migrate-noinput/31088\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.6 on 2021-09-29 14:00\n\nfrom urllib.parse import urlparse, urlunparse\n\nfrom django.db import migrations\n\n\ndef move_remote_url_credentials(apps, schema_editor):\n Remote = apps.get_model(\"core\", \"Remote\")\n\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n remote.password = url.password\n\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\n remote.url = urlunparse(url._replace(netloc=url_split))\n remote.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0076_remove_reserved_resource'),\n ]\n\n operations = [\n migrations.RunPython(\n code=move_remote_url_credentials,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n )\n ]\n", "path": "pulpcore/app/migrations/0077_move_remote_url_credentials.py"}], "after_files": [{"content": "# Generated by Django 3.2.6 on 2021-09-29 14:00\n\nfrom urllib.parse import urlparse, urlunparse\n\nfrom django.db import migrations\n\n\ndef move_remote_url_credentials(apps, schema_editor):\n Remote = apps.get_model(\"core\", \"Remote\")\n\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n\n if '@' not in url.netloc:\n # URLs can have an @ in other places than the netloc,\n # but those do not indicate credentials\n continue\n\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n remote.password = url.password\n\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\n remote.url = urlunparse(url._replace(netloc=url_split))\n remote.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0076_remove_reserved_resource'),\n ]\n\n operations = [\n migrations.RunPython(\n code=move_remote_url_credentials,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n )\n ]\n", "path": "pulpcore/app/migrations/0077_move_remote_url_credentials.py"}]}
1,388
172
gh_patches_debug_3983
rasdani/github-patches
git_diff
pwndbg__pwndbg-642
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- nextcall with symbol bug ### Description after nextcall with unknown symbol/address (like nextcall lol) gdb won't run again ### Steps to reproduce ``` gdb whatever > start > nextcall lol > start > continue Warning: Cannot insert breakpoint -46. Cannot access memory at address 0x7ffff7a6f916 Command aborted. ``` ### My setup GNU gdb (Debian 7.12-6) 7.12.0.20161007 nextcall with symbol bug ### Description after nextcall with unknown symbol/address (like nextcall lol) gdb won't run again ### Steps to reproduce ``` gdb whatever > start > nextcall lol > start > continue Warning: Cannot insert breakpoint -46. Cannot access memory at address 0x7ffff7a6f916 Command aborted. ``` ### My setup GNU gdb (Debian 7.12-6) 7.12.0.20161007 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/next.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 Commands for setting temporary breakpoints on the next 5 instruction of some type (call, branch, etc.) 6 """ 7 from __future__ import absolute_import 8 from __future__ import division 9 from __future__ import print_function 10 from __future__ import unicode_literals 11 12 import re 13 14 import capstone 15 import gdb 16 17 import pwndbg.disasm 18 import pwndbg.regs 19 from pwndbg.color import message 20 21 jumps = set(( 22 capstone.CS_GRP_CALL, 23 capstone.CS_GRP_JUMP, 24 capstone.CS_GRP_RET, 25 capstone.CS_GRP_IRET 26 )) 27 28 interrupts = set((capstone.CS_GRP_INT,)) 29 30 31 def next_int(address=None): 32 """ 33 If there is a syscall in the current basic black, 34 return the instruction of the one closest to $PC. 35 36 Otherwise, return None. 37 """ 38 if address is None: 39 ins = pwndbg.disasm.one(pwndbg.regs.pc) 40 if not ins: 41 return None 42 address = ins.next 43 44 ins = pwndbg.disasm.one(address) 45 while ins: 46 if set(ins.groups) & jumps: 47 return None 48 if set(ins.groups) & interrupts: 49 return ins 50 ins = pwndbg.disasm.one(ins.next) 51 52 return None 53 54 55 def next_branch(address=None): 56 if address is None: 57 ins = pwndbg.disasm.one(pwndbg.regs.pc) 58 if not ins: 59 return None 60 address = ins.next 61 62 ins = pwndbg.disasm.one(address) 63 while ins: 64 if set(ins.groups) & jumps: 65 return ins 66 ins = pwndbg.disasm.one(ins.next) 67 68 return None 69 70 71 def break_next_branch(address=None): 72 ins = next_branch(address) 73 74 if ins: 75 gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True) 76 gdb.execute('continue', from_tty=False, to_string=True) 77 return ins 78 79 80 def break_next_interrupt(address=None): 81 ins = next_int(address) 82 83 if ins: 84 gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True) 85 gdb.execute('continue', from_tty=False, to_string=True) 86 return ins 87 88 89 def break_next_call(symbol_regex=None): 90 while pwndbg.proc.alive: 91 ins = break_next_branch() 92 93 if not ins: 94 break 95 96 # continue if not a call 97 if capstone.CS_GRP_CALL not in ins.groups: 98 continue 99 100 # return call if we don't search for a symbol 101 if not symbol_regex: 102 return ins 103 104 # return call if we match target address 105 if ins.target_const and re.match('%s$' % symbol_regex, hex(ins.target)): 106 return ins 107 108 # return call if we match symbol name 109 if ins.symbol and re.match('%s$' % symbol_regex, ins.symbol): 110 return ins 111 112 113 def break_next_ret(address=None): 114 while pwndbg.proc.alive: 115 ins = break_next_branch(address) 116 117 if not ins: 118 break 119 120 if capstone.CS_GRP_RET in ins.groups: 121 return ins 122 123 124 def break_on_program_code(): 125 """ 126 Breaks on next instruction that belongs to process' objfile code. 127 :return: True for success, False when process ended or when pc is at the code. 128 """ 129 mp = pwndbg.proc.mem_page 130 start = mp.start 131 end = mp.end 132 133 if start <= pwndbg.regs.pc < end: 134 print(message.error('The pc is already at the binary objfile code. Not stepping.')) 135 return False 136 137 while pwndbg.proc.alive: 138 gdb.execute('si', from_tty=False, to_string=False) 139 140 addr = pwndbg.regs.pc 141 if start <= addr < end: 142 return True 143 144 return False 145 146 147 def break_on_next(address=None): 148 address = address or pwndbg.regs.pc 149 ins = pwndbg.disasm.one(address) 150 151 gdb.Breakpoint("*%#x" % (ins.address + ins.size), temporary=True) 152 gdb.execute('continue', from_tty=False, to_string=True) 153 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/next.py b/pwndbg/next.py --- a/pwndbg/next.py +++ b/pwndbg/next.py @@ -27,6 +27,14 @@ interrupts = set((capstone.CS_GRP_INT,)) [email protected] +def clear_temp_breaks(): + if not pwndbg.proc.alive: + breakpoints = gdb.breakpoints() + if breakpoints: + for bp in breakpoints: + if bp.temporary and not bp.visible: #visible is used instead of internal because older gdb's don't support internal + bp.delete() def next_int(address=None): """
{"golden_diff": "diff --git a/pwndbg/next.py b/pwndbg/next.py\n--- a/pwndbg/next.py\n+++ b/pwndbg/next.py\n@@ -27,6 +27,14 @@\n \n interrupts = set((capstone.CS_GRP_INT,))\n \[email protected]\n+def clear_temp_breaks():\n+ if not pwndbg.proc.alive:\n+ breakpoints = gdb.breakpoints()\n+ if breakpoints:\n+ for bp in breakpoints:\n+ if bp.temporary and not bp.visible: #visible is used instead of internal because older gdb's don't support internal \n+ bp.delete()\n \n def next_int(address=None):\n \"\"\"\n", "issue": "nextcall with symbol bug\n### Description\r\n\r\nafter nextcall with unknown symbol/address (like nextcall lol) gdb won't run again\r\n\r\n### Steps to reproduce\r\n```\r\ngdb whatever\r\n> start\r\n> nextcall lol\r\n> start\r\n> continue\r\nWarning:\r\nCannot insert breakpoint -46.\r\nCannot access memory at address 0x7ffff7a6f916\r\n\r\nCommand aborted.\r\n```\r\n\r\n### My setup\r\n\r\nGNU gdb (Debian 7.12-6) 7.12.0.20161007\nnextcall with symbol bug\n### Description\r\n\r\nafter nextcall with unknown symbol/address (like nextcall lol) gdb won't run again\r\n\r\n### Steps to reproduce\r\n```\r\ngdb whatever\r\n> start\r\n> nextcall lol\r\n> start\r\n> continue\r\nWarning:\r\nCannot insert breakpoint -46.\r\nCannot access memory at address 0x7ffff7a6f916\r\n\r\nCommand aborted.\r\n```\r\n\r\n### My setup\r\n\r\nGNU gdb (Debian 7.12-6) 7.12.0.20161007\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommands for setting temporary breakpoints on the next\ninstruction of some type (call, branch, etc.)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nimport capstone\nimport gdb\n\nimport pwndbg.disasm\nimport pwndbg.regs\nfrom pwndbg.color import message\n\njumps = set((\n capstone.CS_GRP_CALL,\n capstone.CS_GRP_JUMP,\n capstone.CS_GRP_RET,\n capstone.CS_GRP_IRET\n))\n\ninterrupts = set((capstone.CS_GRP_INT,))\n\n\ndef next_int(address=None):\n \"\"\"\n If there is a syscall in the current basic black,\n return the instruction of the one closest to $PC.\n\n Otherwise, return None.\n \"\"\"\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return None\n if set(ins.groups) & interrupts:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef next_branch(address=None):\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef break_next_branch(address=None):\n ins = next_branch(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute('continue', from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_interrupt(address=None):\n ins = next_int(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute('continue', from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_call(symbol_regex=None):\n while pwndbg.proc.alive:\n ins = break_next_branch()\n\n if not ins:\n break\n\n # continue if not a call\n if capstone.CS_GRP_CALL not in ins.groups:\n continue\n\n # return call if we don't search for a symbol\n if not symbol_regex:\n return ins\n\n # return call if we match target address\n if ins.target_const and re.match('%s$' % symbol_regex, hex(ins.target)):\n return ins\n\n # return call if we match symbol name\n if ins.symbol and re.match('%s$' % symbol_regex, ins.symbol):\n return ins\n\n\ndef break_next_ret(address=None):\n while pwndbg.proc.alive:\n ins = break_next_branch(address)\n\n if not ins:\n break\n\n if capstone.CS_GRP_RET in ins.groups:\n return ins\n\n\ndef break_on_program_code():\n \"\"\"\n Breaks on next instruction that belongs to process' objfile code.\n :return: True for success, False when process ended or when pc is at the code.\n \"\"\"\n mp = pwndbg.proc.mem_page\n start = mp.start\n end = mp.end\n\n if start <= pwndbg.regs.pc < end:\n print(message.error('The pc is already at the binary objfile code. Not stepping.'))\n return False\n\n while pwndbg.proc.alive:\n gdb.execute('si', from_tty=False, to_string=False)\n\n addr = pwndbg.regs.pc\n if start <= addr < end:\n return True\n\n return False\n\n\ndef break_on_next(address=None):\n address = address or pwndbg.regs.pc\n ins = pwndbg.disasm.one(address)\n\n gdb.Breakpoint(\"*%#x\" % (ins.address + ins.size), temporary=True)\n gdb.execute('continue', from_tty=False, to_string=True)\n", "path": "pwndbg/next.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommands for setting temporary breakpoints on the next\ninstruction of some type (call, branch, etc.)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nimport capstone\nimport gdb\n\nimport pwndbg.disasm\nimport pwndbg.regs\nfrom pwndbg.color import message\n\njumps = set((\n capstone.CS_GRP_CALL,\n capstone.CS_GRP_JUMP,\n capstone.CS_GRP_RET,\n capstone.CS_GRP_IRET\n))\n\ninterrupts = set((capstone.CS_GRP_INT,))\n\[email protected]\ndef clear_temp_breaks():\n if not pwndbg.proc.alive:\n breakpoints = gdb.breakpoints()\n if breakpoints:\n for bp in breakpoints:\n if bp.temporary and not bp.visible: #visible is used instead of internal because older gdb's don't support internal \n bp.delete()\n\ndef next_int(address=None):\n \"\"\"\n If there is a syscall in the current basic black,\n return the instruction of the one closest to $PC.\n\n Otherwise, return None.\n \"\"\"\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return None\n if set(ins.groups) & interrupts:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef next_branch(address=None):\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef break_next_branch(address=None):\n ins = next_branch(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute('continue', from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_interrupt(address=None):\n ins = next_int(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute('continue', from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_call(symbol_regex=None):\n while pwndbg.proc.alive:\n ins = break_next_branch()\n\n if not ins:\n break\n\n # continue if not a call\n if capstone.CS_GRP_CALL not in ins.groups:\n continue\n\n # return call if we don't search for a symbol\n if not symbol_regex:\n return ins\n\n # return call if we match target address\n if ins.target_const and re.match('%s$' % symbol_regex, hex(ins.target)):\n return ins\n\n # return call if we match symbol name\n if ins.symbol and re.match('%s$' % symbol_regex, ins.symbol):\n return ins\n\n\ndef break_next_ret(address=None):\n while pwndbg.proc.alive:\n ins = break_next_branch(address)\n\n if not ins:\n break\n\n if capstone.CS_GRP_RET in ins.groups:\n return ins\n\n\ndef break_on_program_code():\n \"\"\"\n Breaks on next instruction that belongs to process' objfile code.\n :return: True for success, False when process ended or when pc is at the code.\n \"\"\"\n mp = pwndbg.proc.mem_page\n start = mp.start\n end = mp.end\n\n if start <= pwndbg.regs.pc < end:\n print(message.error('The pc is already at the binary objfile code. Not stepping.'))\n return False\n\n while pwndbg.proc.alive:\n gdb.execute('si', from_tty=False, to_string=False)\n\n addr = pwndbg.regs.pc\n if start <= addr < end:\n return True\n\n return False\n\n\ndef break_on_next(address=None):\n address = address or pwndbg.regs.pc\n ins = pwndbg.disasm.one(address)\n\n gdb.Breakpoint(\"*%#x\" % (ins.address + ins.size), temporary=True)\n gdb.execute('continue', from_tty=False, to_string=True)\n", "path": "pwndbg/next.py"}]}
1,787
148
gh_patches_debug_28439
rasdani/github-patches
git_diff
iterative__dvc-10423
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feature proposal: `dvc artifacts get --show-url` DVC currently supports `dvc get --show-url` as a way to retrieve just the URL of a DVC-versioned object as opposed to the object itself. However, there is no equivalent for `dvc artifacts get`. This came as a customer request (to allow easier sharing of results even to people who are not DVC/DVC Studio users). It also has advantages e.g. in model deployment to Sagemaker (which requires the artifact URL on S3). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/commands/artifacts.py` Content: ``` 1 from dvc.cli import completion, formatter 2 from dvc.cli.command import CmdBaseNoRepo 3 from dvc.cli.utils import DictAction, append_doc_link 4 from dvc.exceptions import DvcException 5 from dvc.log import logger 6 7 logger = logger.getChild(__name__) 8 9 10 class CmdArtifactsGet(CmdBaseNoRepo): 11 def run(self): 12 from dvc.repo.artifacts import Artifacts 13 from dvc.scm import CloneError 14 from dvc.ui import ui 15 16 try: 17 count, out = Artifacts.get( 18 self.args.url, 19 name=self.args.name, 20 version=self.args.rev, 21 stage=self.args.stage, 22 force=self.args.force, 23 config=self.args.config, 24 remote=self.args.remote, 25 remote_config=self.args.remote_config, 26 out=self.args.out, 27 ) 28 ui.write(f"Downloaded {count} file(s) to '{out}'") 29 return 0 30 except CloneError: 31 logger.exception("failed to get '%s'", self.args.name) 32 return 1 33 except DvcException: 34 logger.exception( 35 "failed to get '%s' from '%s'", self.args.name, self.args.url 36 ) 37 return 1 38 39 40 def add_parser(subparsers, parent_parser): 41 ARTIFACTS_HELP = "DVC model registry artifact commands." 42 43 artifacts_parser = subparsers.add_parser( 44 "artifacts", 45 parents=[parent_parser], 46 description=append_doc_link(ARTIFACTS_HELP, "artifacts"), 47 help=ARTIFACTS_HELP, 48 formatter_class=formatter.RawDescriptionHelpFormatter, 49 ) 50 artifacts_subparsers = artifacts_parser.add_subparsers( 51 dest="cmd", 52 help="Use `dvc artifacts CMD --help` to display command-specific help.", 53 required=True, 54 ) 55 56 ARTIFACTS_GET_HELP = "Download an artifact from a DVC project." 57 get_parser = artifacts_subparsers.add_parser( 58 "get", 59 parents=[parent_parser], 60 description=append_doc_link(ARTIFACTS_GET_HELP, "artifacts/get"), 61 help=ARTIFACTS_HELP, 62 formatter_class=formatter.RawDescriptionHelpFormatter, 63 ) 64 get_parser.add_argument("url", help="Location of DVC repository to download from") 65 get_parser.add_argument( 66 "name", help="Name of artifact in the repository" 67 ).complete = completion.FILE 68 get_parser.add_argument( 69 "--rev", 70 nargs="?", 71 help="Artifact version", 72 metavar="<version>", 73 ) 74 get_parser.add_argument( 75 "--stage", 76 nargs="?", 77 help="Artifact stage", 78 metavar="<stage>", 79 ) 80 get_parser.add_argument( 81 "-o", 82 "--out", 83 nargs="?", 84 help="Destination path to download artifact to", 85 metavar="<path>", 86 ).complete = completion.DIR 87 get_parser.add_argument( 88 "-j", 89 "--jobs", 90 type=int, 91 help=( 92 "Number of jobs to run simultaneously. " 93 "The default value is 4 * cpu_count(). " 94 ), 95 metavar="<number>", 96 ) 97 get_parser.add_argument( 98 "-f", 99 "--force", 100 action="store_true", 101 default=False, 102 help="Override local file or folder if exists.", 103 ) 104 get_parser.add_argument( 105 "--config", 106 type=str, 107 help=( 108 "Path to a config file that will be merged with the config " 109 "in the target repository." 110 ), 111 ) 112 get_parser.add_argument( 113 "--remote", 114 type=str, 115 help=( 116 "Remote name to set as a default in the target repository " 117 "(only applicable when downloading from DVC remote)." 118 ), 119 ) 120 get_parser.add_argument( 121 "--remote-config", 122 type=str, 123 nargs="*", 124 action=DictAction, 125 help=( 126 "Remote config options to merge with a remote's config (default or one " 127 "specified by '--remote') in the target repository (only applicable " 128 "when downloading from DVC remote)." 129 ), 130 ) 131 get_parser.set_defaults(func=CmdArtifactsGet) 132 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/commands/artifacts.py b/dvc/commands/artifacts.py --- a/dvc/commands/artifacts.py +++ b/dvc/commands/artifacts.py @@ -13,6 +13,9 @@ from dvc.scm import CloneError from dvc.ui import ui + if self.args.show_url: + return self._show_url() + try: count, out = Artifacts.get( self.args.url, @@ -36,6 +39,28 @@ ) return 1 + def _show_url(self): + from dvc.api import artifacts_show, get_url + from dvc.ui import ui + + artifact = artifacts_show( + self.args.name, + version=self.args.rev, + stage=self.args.stage, + repo=self.args.url, + ) + + url = get_url( + artifact["path"], + repo=self.args.url, + rev=artifact["rev"], + remote=self.args.remote, + remote_config=self.args.remote_config, + ) + ui.write(url, force=True) + + return 0 + def add_parser(subparsers, parent_parser): ARTIFACTS_HELP = "DVC model registry artifact commands." @@ -84,6 +109,14 @@ help="Destination path to download artifact to", metavar="<path>", ).complete = completion.DIR + get_parser.add_argument( + "--show-url", + action="store_true", + help=( + "Print the storage location (URL) the target data would be " + "downloaded from, and exit." + ), + ) get_parser.add_argument( "-j", "--jobs",
{"golden_diff": "diff --git a/dvc/commands/artifacts.py b/dvc/commands/artifacts.py\n--- a/dvc/commands/artifacts.py\n+++ b/dvc/commands/artifacts.py\n@@ -13,6 +13,9 @@\n from dvc.scm import CloneError\n from dvc.ui import ui\n \n+ if self.args.show_url:\n+ return self._show_url()\n+\n try:\n count, out = Artifacts.get(\n self.args.url,\n@@ -36,6 +39,28 @@\n )\n return 1\n \n+ def _show_url(self):\n+ from dvc.api import artifacts_show, get_url\n+ from dvc.ui import ui\n+\n+ artifact = artifacts_show(\n+ self.args.name,\n+ version=self.args.rev,\n+ stage=self.args.stage,\n+ repo=self.args.url,\n+ )\n+\n+ url = get_url(\n+ artifact[\"path\"],\n+ repo=self.args.url,\n+ rev=artifact[\"rev\"],\n+ remote=self.args.remote,\n+ remote_config=self.args.remote_config,\n+ )\n+ ui.write(url, force=True)\n+\n+ return 0\n+\n \n def add_parser(subparsers, parent_parser):\n ARTIFACTS_HELP = \"DVC model registry artifact commands.\"\n@@ -84,6 +109,14 @@\n help=\"Destination path to download artifact to\",\n metavar=\"<path>\",\n ).complete = completion.DIR\n+ get_parser.add_argument(\n+ \"--show-url\",\n+ action=\"store_true\",\n+ help=(\n+ \"Print the storage location (URL) the target data would be \"\n+ \"downloaded from, and exit.\"\n+ ),\n+ )\n get_parser.add_argument(\n \"-j\",\n \"--jobs\",\n", "issue": "Feature proposal: `dvc artifacts get --show-url`\nDVC currently supports `dvc get --show-url` as a way to retrieve just the URL of a DVC-versioned object as opposed to the object itself.\r\n\r\nHowever, there is no equivalent for `dvc artifacts get`. This came as a customer request (to allow easier sharing of results even to people who are not DVC/DVC Studio users). It also has advantages e.g. in model deployment to Sagemaker (which requires the artifact URL on S3).\n", "before_files": [{"content": "from dvc.cli import completion, formatter\nfrom dvc.cli.command import CmdBaseNoRepo\nfrom dvc.cli.utils import DictAction, append_doc_link\nfrom dvc.exceptions import DvcException\nfrom dvc.log import logger\n\nlogger = logger.getChild(__name__)\n\n\nclass CmdArtifactsGet(CmdBaseNoRepo):\n def run(self):\n from dvc.repo.artifacts import Artifacts\n from dvc.scm import CloneError\n from dvc.ui import ui\n\n try:\n count, out = Artifacts.get(\n self.args.url,\n name=self.args.name,\n version=self.args.rev,\n stage=self.args.stage,\n force=self.args.force,\n config=self.args.config,\n remote=self.args.remote,\n remote_config=self.args.remote_config,\n out=self.args.out,\n )\n ui.write(f\"Downloaded {count} file(s) to '{out}'\")\n return 0\n except CloneError:\n logger.exception(\"failed to get '%s'\", self.args.name)\n return 1\n except DvcException:\n logger.exception(\n \"failed to get '%s' from '%s'\", self.args.name, self.args.url\n )\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n ARTIFACTS_HELP = \"DVC model registry artifact commands.\"\n\n artifacts_parser = subparsers.add_parser(\n \"artifacts\",\n parents=[parent_parser],\n description=append_doc_link(ARTIFACTS_HELP, \"artifacts\"),\n help=ARTIFACTS_HELP,\n formatter_class=formatter.RawDescriptionHelpFormatter,\n )\n artifacts_subparsers = artifacts_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc artifacts CMD --help` to display command-specific help.\",\n required=True,\n )\n\n ARTIFACTS_GET_HELP = \"Download an artifact from a DVC project.\"\n get_parser = artifacts_subparsers.add_parser(\n \"get\",\n parents=[parent_parser],\n description=append_doc_link(ARTIFACTS_GET_HELP, \"artifacts/get\"),\n help=ARTIFACTS_HELP,\n formatter_class=formatter.RawDescriptionHelpFormatter,\n )\n get_parser.add_argument(\"url\", help=\"Location of DVC repository to download from\")\n get_parser.add_argument(\n \"name\", help=\"Name of artifact in the repository\"\n ).complete = completion.FILE\n get_parser.add_argument(\n \"--rev\",\n nargs=\"?\",\n help=\"Artifact version\",\n metavar=\"<version>\",\n )\n get_parser.add_argument(\n \"--stage\",\n nargs=\"?\",\n help=\"Artifact stage\",\n metavar=\"<stage>\",\n )\n get_parser.add_argument(\n \"-o\",\n \"--out\",\n nargs=\"?\",\n help=\"Destination path to download artifact to\",\n metavar=\"<path>\",\n ).complete = completion.DIR\n get_parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n help=(\n \"Number of jobs to run simultaneously. \"\n \"The default value is 4 * cpu_count(). \"\n ),\n metavar=\"<number>\",\n )\n get_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Override local file or folder if exists.\",\n )\n get_parser.add_argument(\n \"--config\",\n type=str,\n help=(\n \"Path to a config file that will be merged with the config \"\n \"in the target repository.\"\n ),\n )\n get_parser.add_argument(\n \"--remote\",\n type=str,\n help=(\n \"Remote name to set as a default in the target repository \"\n \"(only applicable when downloading from DVC remote).\"\n ),\n )\n get_parser.add_argument(\n \"--remote-config\",\n type=str,\n nargs=\"*\",\n action=DictAction,\n help=(\n \"Remote config options to merge with a remote's config (default or one \"\n \"specified by '--remote') in the target repository (only applicable \"\n \"when downloading from DVC remote).\"\n ),\n )\n get_parser.set_defaults(func=CmdArtifactsGet)\n", "path": "dvc/commands/artifacts.py"}], "after_files": [{"content": "from dvc.cli import completion, formatter\nfrom dvc.cli.command import CmdBaseNoRepo\nfrom dvc.cli.utils import DictAction, append_doc_link\nfrom dvc.exceptions import DvcException\nfrom dvc.log import logger\n\nlogger = logger.getChild(__name__)\n\n\nclass CmdArtifactsGet(CmdBaseNoRepo):\n def run(self):\n from dvc.repo.artifacts import Artifacts\n from dvc.scm import CloneError\n from dvc.ui import ui\n\n if self.args.show_url:\n return self._show_url()\n\n try:\n count, out = Artifacts.get(\n self.args.url,\n name=self.args.name,\n version=self.args.rev,\n stage=self.args.stage,\n force=self.args.force,\n config=self.args.config,\n remote=self.args.remote,\n remote_config=self.args.remote_config,\n out=self.args.out,\n )\n ui.write(f\"Downloaded {count} file(s) to '{out}'\")\n return 0\n except CloneError:\n logger.exception(\"failed to get '%s'\", self.args.name)\n return 1\n except DvcException:\n logger.exception(\n \"failed to get '%s' from '%s'\", self.args.name, self.args.url\n )\n return 1\n\n def _show_url(self):\n from dvc.api import artifacts_show, get_url\n from dvc.ui import ui\n\n artifact = artifacts_show(\n self.args.name,\n version=self.args.rev,\n stage=self.args.stage,\n repo=self.args.url,\n )\n\n url = get_url(\n artifact[\"path\"],\n repo=self.args.url,\n rev=artifact[\"rev\"],\n remote=self.args.remote,\n remote_config=self.args.remote_config,\n )\n ui.write(url, force=True)\n\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n ARTIFACTS_HELP = \"DVC model registry artifact commands.\"\n\n artifacts_parser = subparsers.add_parser(\n \"artifacts\",\n parents=[parent_parser],\n description=append_doc_link(ARTIFACTS_HELP, \"artifacts\"),\n help=ARTIFACTS_HELP,\n formatter_class=formatter.RawDescriptionHelpFormatter,\n )\n artifacts_subparsers = artifacts_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc artifacts CMD --help` to display command-specific help.\",\n required=True,\n )\n\n ARTIFACTS_GET_HELP = \"Download an artifact from a DVC project.\"\n get_parser = artifacts_subparsers.add_parser(\n \"get\",\n parents=[parent_parser],\n description=append_doc_link(ARTIFACTS_GET_HELP, \"artifacts/get\"),\n help=ARTIFACTS_HELP,\n formatter_class=formatter.RawDescriptionHelpFormatter,\n )\n get_parser.add_argument(\"url\", help=\"Location of DVC repository to download from\")\n get_parser.add_argument(\n \"name\", help=\"Name of artifact in the repository\"\n ).complete = completion.FILE\n get_parser.add_argument(\n \"--rev\",\n nargs=\"?\",\n help=\"Artifact version\",\n metavar=\"<version>\",\n )\n get_parser.add_argument(\n \"--stage\",\n nargs=\"?\",\n help=\"Artifact stage\",\n metavar=\"<stage>\",\n )\n get_parser.add_argument(\n \"-o\",\n \"--out\",\n nargs=\"?\",\n help=\"Destination path to download artifact to\",\n metavar=\"<path>\",\n ).complete = completion.DIR\n get_parser.add_argument(\n \"--show-url\",\n action=\"store_true\",\n help=(\n \"Print the storage location (URL) the target data would be \"\n \"downloaded from, and exit.\"\n ),\n )\n get_parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n help=(\n \"Number of jobs to run simultaneously. \"\n \"The default value is 4 * cpu_count(). \"\n ),\n metavar=\"<number>\",\n )\n get_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Override local file or folder if exists.\",\n )\n get_parser.add_argument(\n \"--config\",\n type=str,\n help=(\n \"Path to a config file that will be merged with the config \"\n \"in the target repository.\"\n ),\n )\n get_parser.add_argument(\n \"--remote\",\n type=str,\n help=(\n \"Remote name to set as a default in the target repository \"\n \"(only applicable when downloading from DVC remote).\"\n ),\n )\n get_parser.add_argument(\n \"--remote-config\",\n type=str,\n nargs=\"*\",\n action=DictAction,\n help=(\n \"Remote config options to merge with a remote's config (default or one \"\n \"specified by '--remote') in the target repository (only applicable \"\n \"when downloading from DVC remote).\"\n ),\n )\n get_parser.set_defaults(func=CmdArtifactsGet)\n", "path": "dvc/commands/artifacts.py"}]}
1,527
389
gh_patches_debug_23316
rasdani/github-patches
git_diff
holoviz__panel-4619
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add ability to open custom notebook in Panelite. The Jupyter lite extension https://github.com/jupyterlab-contrib/jupyterlab-open-url-parameter enables you to open a notebook from an url in Jupyterlite. This would be really powerful to include in the build of Panelite as we can the start to share links to notebooks that opens quickly for the user with a working environment. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `doc/conf.py` Content: ``` 1 import json 2 import os 3 import pathlib 4 5 import param 6 7 param.parameterized.docstring_signature = False 8 param.parameterized.docstring_describe_params = False 9 10 from nbsite.shared_conf import * 11 12 project = 'Panel' 13 authors = 'Panel contributors' 14 copyright_years['start_year'] = '2019' 15 copyright = copyright_fmt.format(**copyright_years) 16 description = 'High-level dashboarding for python visualization libraries' 17 18 import panel 19 20 from panel.io.convert import BOKEH_VERSION, PY_VERSION 21 from panel.io.resources import CDN_DIST 22 23 PANEL_ROOT = pathlib.Path(panel.__file__).parent 24 25 version = release = base_version(panel.__version__) 26 js_version = json.loads((PANEL_ROOT / 'package.json').read_text())['version'] 27 28 # For the interactivity warning box created by nbsite to point to the right 29 # git tag instead of the default i.e. main. 30 os.environ['BRANCH'] = f"v{release}" 31 32 html_static_path += ['_static'] 33 34 html_css_files = [ 35 'nbsite.css', 36 'css/custom.css', 37 'css/dataframe.css', 38 ] 39 40 html_theme = "pydata_sphinx_theme" 41 html_logo = "_static/logo_horizontal.png" 42 html_favicon = "_static/icons/favicon.ico" 43 44 html_theme_options = { 45 "github_url": "https://github.com/holoviz/panel", 46 "icon_links": [ 47 { 48 "name": "Twitter", 49 "url": "https://twitter.com/Panel_Org", 50 "icon": "fab fa-twitter-square", 51 }, 52 { 53 "name": "Discourse", 54 "url": "https://discourse.holoviz.org/c/panel/5", 55 "icon": "fab fa-discourse", 56 }, 57 ], 58 "footer_items": [ 59 "copyright", 60 "last-updated", 61 ], 62 "google_analytics_id": "UA-154795830-2", 63 "pygment_light_style": "material", 64 "pygment_dark_style": "material", 65 "header_links_before_dropdown": 6 66 } 67 68 extensions += [ 69 'sphinx.ext.napoleon', 70 'nbsite.gallery', 71 'sphinx_copybutton', 72 'nbsite.pyodide' 73 ] 74 napoleon_numpy_docstring = True 75 76 myst_enable_extensions = ["colon_fence", "deflist"] 77 78 nbsite_gallery_conf = { 79 'github_org': 'holoviz', 80 'github_project': 'panel', 81 'galleries': { 82 'gallery': { 83 'title': 'Gallery', 84 'sections': [ 85 {'path': 'demos', 86 'title': 'Demos', 87 'description': 'A set of sophisticated apps built to demonstrate the features of Panel.'}, 88 {'path': 'simple', 89 'title': 'Simple Apps', 90 'description': 'Simple example apps meant to provide a quick introduction to Panel.'}, 91 {'path': 'layout', 92 'title': 'Layouts', 93 'description': 'How to leverage Panel layout components to achieve complex layouts.'}, 94 {'path': 'dynamic', 95 'title': 'Dynamic UIs', 96 'description': ('Examples demonstrating how to build dynamic UIs with components that ' 97 'are added or removed interactively.')}, 98 {'path': 'streaming', 99 'title': 'Streaming', 100 'description': ('Streaming data to a visual component.')}, 101 {'path': 'components', 102 'title': 'Custom components', 103 'description': "Components created using Panel's ReactiveHTML class."}, 104 {'path': 'styles', 105 'title': 'Styling & Theming', 106 'description': "Examples demonstrating how to style and theme different components."}, 107 {'path': 'external', 108 'title': 'External libraries', 109 'description': 'Wrapping external libraries with Panel.'} 110 ] 111 }, 112 'reference': { 113 'title': 'Reference Gallery', 114 'sections': [ 115 'panes', 116 'layouts', 117 'templates', 118 'global', 119 'indicators', 120 'widgets', 121 ], 122 'titles': { 123 'Vega': 'Altair & Vega', 124 'DeckGL': 'PyDeck & Deck.gl', 125 'ECharts': 'PyEcharts & ECharts', 126 'IPyWidget': 'ipywidgets' 127 }, 128 'normalize_titles': False 129 } 130 }, 131 'thumbnail_url': 'https://assets.holoviz.org/panel/thumbnails', 132 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/' 133 } 134 135 if panel.__version__ != version and (PANEL_ROOT / 'dist' / 'wheels').is_dir(): 136 py_version = panel.__version__.replace("-dirty", "") 137 panel_req = f'./wheels/panel-{py_version}-py3-none-any.whl' 138 bokeh_req = f'./wheels/bokeh-{BOKEH_VERSION}-py3-none-any.whl' 139 else: 140 panel_req = f'{CDN_DIST}wheels/panel-{PY_VERSION}-py3-none-any.whl' 141 bokeh_req = f'{CDN_DIST}wheels/bokeh-{BOKEH_VERSION}-py3-none-any.whl' 142 143 nbsite_pyodide_conf = { 144 'requirements': [bokeh_req, panel_req, 'pandas', 'pyodide-http', 'holoviews>=1.16.0a2'] 145 } 146 147 templates_path = [ 148 '_templates' 149 ] 150 151 html_context.update({ 152 "last_release": f"v{release}", 153 "github_user": "holoviz", 154 "github_repo": "panel", 155 "default_mode": "light" 156 }) 157 158 nbbuild_patterns_to_take_along = ["simple.html", "*.json", "json_*"] 159 160 # Override the Sphinx default title that appends `documentation` 161 html_title = f'{project} v{version}' 162 163 suppress_warnings = ["myst.header", "ref.myst", "mystnb.unknown_mime_type"] 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doc/conf.py b/doc/conf.py --- a/doc/conf.py +++ b/doc/conf.py @@ -55,10 +55,6 @@ "icon": "fab fa-discourse", }, ], - "footer_items": [ - "copyright", - "last-updated", - ], "google_analytics_id": "UA-154795830-2", "pygment_light_style": "material", "pygment_dark_style": "material", @@ -111,6 +107,7 @@ }, 'reference': { 'title': 'Reference Gallery', + 'as_pyodide': True, 'sections': [ 'panes', 'layouts', @@ -129,7 +126,8 @@ } }, 'thumbnail_url': 'https://assets.holoviz.org/panel/thumbnails', - 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/' + 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/', + 'jupyterlite_url': 'https://panelite.holoviz.org/lab/index.html' } if panel.__version__ != version and (PANEL_ROOT / 'dist' / 'wheels').is_dir():
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -55,10 +55,6 @@\n \"icon\": \"fab fa-discourse\",\n },\n ],\n- \"footer_items\": [\n- \"copyright\",\n- \"last-updated\",\n- ],\n \"google_analytics_id\": \"UA-154795830-2\",\n \"pygment_light_style\": \"material\",\n \"pygment_dark_style\": \"material\",\n@@ -111,6 +107,7 @@\n },\n 'reference': {\n 'title': 'Reference Gallery',\n+ 'as_pyodide': True,\n 'sections': [\n 'panes',\n 'layouts',\n@@ -129,7 +126,8 @@\n }\n },\n 'thumbnail_url': 'https://assets.holoviz.org/panel/thumbnails',\n- 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'\n+ 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/',\n+ 'jupyterlite_url': 'https://panelite.holoviz.org/lab/index.html'\n }\n \n if panel.__version__ != version and (PANEL_ROOT / 'dist' / 'wheels').is_dir():\n", "issue": "Add ability to open custom notebook in Panelite.\nThe Jupyter lite extension https://github.com/jupyterlab-contrib/jupyterlab-open-url-parameter enables you to open a notebook from an url in Jupyterlite.\n\nThis would be really powerful to include in the build of Panelite as we can the start to share links to notebooks that opens quickly for the user with a working environment.\n", "before_files": [{"content": "import json\nimport os\nimport pathlib\n\nimport param\n\nparam.parameterized.docstring_signature = False\nparam.parameterized.docstring_describe_params = False\n\nfrom nbsite.shared_conf import *\n\nproject = 'Panel'\nauthors = 'Panel contributors'\ncopyright_years['start_year'] = '2019'\ncopyright = copyright_fmt.format(**copyright_years)\ndescription = 'High-level dashboarding for python visualization libraries'\n\nimport panel\n\nfrom panel.io.convert import BOKEH_VERSION, PY_VERSION\nfrom panel.io.resources import CDN_DIST\n\nPANEL_ROOT = pathlib.Path(panel.__file__).parent\n\nversion = release = base_version(panel.__version__)\njs_version = json.loads((PANEL_ROOT / 'package.json').read_text())['version']\n\n# For the interactivity warning box created by nbsite to point to the right\n# git tag instead of the default i.e. main.\nos.environ['BRANCH'] = f\"v{release}\"\n\nhtml_static_path += ['_static']\n\nhtml_css_files = [\n 'nbsite.css',\n 'css/custom.css',\n 'css/dataframe.css',\n]\n\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_logo = \"_static/logo_horizontal.png\"\nhtml_favicon = \"_static/icons/favicon.ico\"\n\nhtml_theme_options = {\n \"github_url\": \"https://github.com/holoviz/panel\",\n \"icon_links\": [\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/Panel_Org\",\n \"icon\": \"fab fa-twitter-square\",\n },\n {\n \"name\": \"Discourse\",\n \"url\": \"https://discourse.holoviz.org/c/panel/5\",\n \"icon\": \"fab fa-discourse\",\n },\n ],\n \"footer_items\": [\n \"copyright\",\n \"last-updated\",\n ],\n \"google_analytics_id\": \"UA-154795830-2\",\n \"pygment_light_style\": \"material\",\n \"pygment_dark_style\": \"material\",\n \"header_links_before_dropdown\": 6\n}\n\nextensions += [\n 'sphinx.ext.napoleon',\n 'nbsite.gallery',\n 'sphinx_copybutton',\n 'nbsite.pyodide'\n]\nnapoleon_numpy_docstring = True\n\nmyst_enable_extensions = [\"colon_fence\", \"deflist\"]\n\nnbsite_gallery_conf = {\n 'github_org': 'holoviz',\n 'github_project': 'panel',\n 'galleries': {\n 'gallery': {\n 'title': 'Gallery',\n 'sections': [\n {'path': 'demos',\n 'title': 'Demos',\n 'description': 'A set of sophisticated apps built to demonstrate the features of Panel.'},\n {'path': 'simple',\n 'title': 'Simple Apps',\n 'description': 'Simple example apps meant to provide a quick introduction to Panel.'},\n {'path': 'layout',\n 'title': 'Layouts',\n 'description': 'How to leverage Panel layout components to achieve complex layouts.'},\n {'path': 'dynamic',\n 'title': 'Dynamic UIs',\n 'description': ('Examples demonstrating how to build dynamic UIs with components that '\n 'are added or removed interactively.')},\n {'path': 'streaming',\n 'title': 'Streaming',\n 'description': ('Streaming data to a visual component.')},\n {'path': 'components',\n 'title': 'Custom components',\n 'description': \"Components created using Panel's ReactiveHTML class.\"},\n {'path': 'styles',\n 'title': 'Styling & Theming',\n 'description': \"Examples demonstrating how to style and theme different components.\"},\n {'path': 'external',\n 'title': 'External libraries',\n 'description': 'Wrapping external libraries with Panel.'}\n ]\n },\n 'reference': {\n 'title': 'Reference Gallery',\n 'sections': [\n 'panes',\n 'layouts',\n 'templates',\n 'global',\n 'indicators',\n 'widgets',\n ],\n 'titles': {\n 'Vega': 'Altair & Vega',\n 'DeckGL': 'PyDeck & Deck.gl',\n 'ECharts': 'PyEcharts & ECharts',\n 'IPyWidget': 'ipywidgets'\n },\n 'normalize_titles': False\n }\n },\n 'thumbnail_url': 'https://assets.holoviz.org/panel/thumbnails',\n 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'\n}\n\nif panel.__version__ != version and (PANEL_ROOT / 'dist' / 'wheels').is_dir():\n py_version = panel.__version__.replace(\"-dirty\", \"\")\n panel_req = f'./wheels/panel-{py_version}-py3-none-any.whl'\n bokeh_req = f'./wheels/bokeh-{BOKEH_VERSION}-py3-none-any.whl'\nelse:\n panel_req = f'{CDN_DIST}wheels/panel-{PY_VERSION}-py3-none-any.whl'\n bokeh_req = f'{CDN_DIST}wheels/bokeh-{BOKEH_VERSION}-py3-none-any.whl'\n\nnbsite_pyodide_conf = {\n 'requirements': [bokeh_req, panel_req, 'pandas', 'pyodide-http', 'holoviews>=1.16.0a2']\n}\n\ntemplates_path = [\n '_templates'\n]\n\nhtml_context.update({\n \"last_release\": f\"v{release}\",\n \"github_user\": \"holoviz\",\n \"github_repo\": \"panel\",\n \"default_mode\": \"light\"\n})\n\nnbbuild_patterns_to_take_along = [\"simple.html\", \"*.json\", \"json_*\"]\n\n# Override the Sphinx default title that appends `documentation`\nhtml_title = f'{project} v{version}'\n\nsuppress_warnings = [\"myst.header\", \"ref.myst\", \"mystnb.unknown_mime_type\"]\n", "path": "doc/conf.py"}], "after_files": [{"content": "import json\nimport os\nimport pathlib\n\nimport param\n\nparam.parameterized.docstring_signature = False\nparam.parameterized.docstring_describe_params = False\n\nfrom nbsite.shared_conf import *\n\nproject = 'Panel'\nauthors = 'Panel contributors'\ncopyright_years['start_year'] = '2019'\ncopyright = copyright_fmt.format(**copyright_years)\ndescription = 'High-level dashboarding for python visualization libraries'\n\nimport panel\n\nfrom panel.io.convert import BOKEH_VERSION, PY_VERSION\nfrom panel.io.resources import CDN_DIST\n\nPANEL_ROOT = pathlib.Path(panel.__file__).parent\n\nversion = release = base_version(panel.__version__)\njs_version = json.loads((PANEL_ROOT / 'package.json').read_text())['version']\n\n# For the interactivity warning box created by nbsite to point to the right\n# git tag instead of the default i.e. main.\nos.environ['BRANCH'] = f\"v{release}\"\n\nhtml_static_path += ['_static']\n\nhtml_css_files = [\n 'nbsite.css',\n 'css/custom.css',\n 'css/dataframe.css',\n]\n\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_logo = \"_static/logo_horizontal.png\"\nhtml_favicon = \"_static/icons/favicon.ico\"\n\nhtml_theme_options = {\n \"github_url\": \"https://github.com/holoviz/panel\",\n \"icon_links\": [\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/Panel_Org\",\n \"icon\": \"fab fa-twitter-square\",\n },\n {\n \"name\": \"Discourse\",\n \"url\": \"https://discourse.holoviz.org/c/panel/5\",\n \"icon\": \"fab fa-discourse\",\n },\n ],\n \"google_analytics_id\": \"UA-154795830-2\",\n \"pygment_light_style\": \"material\",\n \"pygment_dark_style\": \"material\",\n \"header_links_before_dropdown\": 6\n}\n\nextensions += [\n 'sphinx.ext.napoleon',\n 'nbsite.gallery',\n 'sphinx_copybutton',\n 'nbsite.pyodide'\n]\nnapoleon_numpy_docstring = True\n\nmyst_enable_extensions = [\"colon_fence\", \"deflist\"]\n\nnbsite_gallery_conf = {\n 'github_org': 'holoviz',\n 'github_project': 'panel',\n 'galleries': {\n 'gallery': {\n 'title': 'Gallery',\n 'sections': [\n {'path': 'demos',\n 'title': 'Demos',\n 'description': 'A set of sophisticated apps built to demonstrate the features of Panel.'},\n {'path': 'simple',\n 'title': 'Simple Apps',\n 'description': 'Simple example apps meant to provide a quick introduction to Panel.'},\n {'path': 'layout',\n 'title': 'Layouts',\n 'description': 'How to leverage Panel layout components to achieve complex layouts.'},\n {'path': 'dynamic',\n 'title': 'Dynamic UIs',\n 'description': ('Examples demonstrating how to build dynamic UIs with components that '\n 'are added or removed interactively.')},\n {'path': 'streaming',\n 'title': 'Streaming',\n 'description': ('Streaming data to a visual component.')},\n {'path': 'components',\n 'title': 'Custom components',\n 'description': \"Components created using Panel's ReactiveHTML class.\"},\n {'path': 'styles',\n 'title': 'Styling & Theming',\n 'description': \"Examples demonstrating how to style and theme different components.\"},\n {'path': 'external',\n 'title': 'External libraries',\n 'description': 'Wrapping external libraries with Panel.'}\n ]\n },\n 'reference': {\n 'title': 'Reference Gallery',\n 'as_pyodide': True,\n 'sections': [\n 'panes',\n 'layouts',\n 'templates',\n 'global',\n 'indicators',\n 'widgets',\n ],\n 'titles': {\n 'Vega': 'Altair & Vega',\n 'DeckGL': 'PyDeck & Deck.gl',\n 'ECharts': 'PyEcharts & ECharts',\n 'IPyWidget': 'ipywidgets'\n },\n 'normalize_titles': False\n }\n },\n 'thumbnail_url': 'https://assets.holoviz.org/panel/thumbnails',\n 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/',\n 'jupyterlite_url': 'https://panelite.holoviz.org/lab/index.html'\n}\n\nif panel.__version__ != version and (PANEL_ROOT / 'dist' / 'wheels').is_dir():\n py_version = panel.__version__.replace(\"-dirty\", \"\")\n panel_req = f'./wheels/panel-{py_version}-py3-none-any.whl'\n bokeh_req = f'./wheels/bokeh-{BOKEH_VERSION}-py3-none-any.whl'\nelse:\n panel_req = f'{CDN_DIST}wheels/panel-{PY_VERSION}-py3-none-any.whl'\n bokeh_req = f'{CDN_DIST}wheels/bokeh-{BOKEH_VERSION}-py3-none-any.whl'\n\nnbsite_pyodide_conf = {\n 'requirements': [bokeh_req, panel_req, 'pandas', 'pyodide-http', 'holoviews>=1.16.0a2']\n}\n\ntemplates_path = [\n '_templates'\n]\n\nhtml_context.update({\n \"last_release\": f\"v{release}\",\n \"github_user\": \"holoviz\",\n \"github_repo\": \"panel\",\n \"default_mode\": \"light\"\n})\n\nnbbuild_patterns_to_take_along = [\"simple.html\", \"*.json\", \"json_*\"]\n\n# Override the Sphinx default title that appends `documentation`\nhtml_title = f'{project} v{version}'\n\nsuppress_warnings = [\"myst.header\", \"ref.myst\", \"mystnb.unknown_mime_type\"]\n", "path": "doc/conf.py"}]}
1,987
291
gh_patches_debug_10995
rasdani/github-patches
git_diff
getredash__redash-1856
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make dashboard level filters a feature available to non-admins ### Issue Summary Currently to enable dashboard level filters you have to be an administrator and you have to change a flag manually in the dashboards table. It would be good if this was just on by default or an option that users could change through the front end. ### Technical details: * Redash Version: 1.0.3 * Browser/OS: Chrome * How did you install Redash: Amazon via the AMI --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/handlers/dashboards.py` Content: ``` 1 from itertools import chain 2 3 from flask import request, url_for 4 from funcy import distinct, project, take 5 6 from flask_restful import abort 7 from redash import models, serializers, settings 8 from redash.handlers.base import BaseResource, get_object_or_404 9 from redash.permissions import (can_modify, require_admin_or_owner, 10 require_object_modify_permission, 11 require_permission) 12 from sqlalchemy.orm.exc import StaleDataError 13 14 15 class RecentDashboardsResource(BaseResource): 16 @require_permission('list_dashboards') 17 def get(self): 18 """ 19 Lists dashboards modified in the last 7 days. 20 """ 21 if settings.FEATURE_DUMB_RECENTS: 22 dashboards = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id).order_by(models.Dashboard.updated_at.desc()).limit(10) 23 dashboards = [d.to_dict() for d in dashboards] 24 else: 25 recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id, for_user=True)] 26 27 global_recent = [] 28 if len(recent) < 10: 29 global_recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id)] 30 31 dashboards = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id'])) 32 33 return dashboards 34 35 36 class DashboardListResource(BaseResource): 37 @require_permission('list_dashboards') 38 def get(self): 39 """ 40 Lists all accessible dashboards. 41 """ 42 results = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id) 43 return [q.to_dict() for q in results] 44 45 @require_permission('create_dashboard') 46 def post(self): 47 """ 48 Creates a new dashboard. 49 50 :<json string name: Dashboard name 51 52 Responds with a :ref:`dashboard <dashboard-response-label>`. 53 """ 54 dashboard_properties = request.get_json(force=True) 55 dashboard = models.Dashboard(name=dashboard_properties['name'], 56 org=self.current_org, 57 user=self.current_user, 58 is_draft=True, 59 layout='[]') 60 models.db.session.add(dashboard) 61 models.db.session.commit() 62 return dashboard.to_dict() 63 64 65 class DashboardResource(BaseResource): 66 @require_permission('list_dashboards') 67 def get(self, dashboard_slug=None): 68 """ 69 Retrieves a dashboard. 70 71 :qparam string slug: Slug of dashboard to retrieve. 72 73 .. _dashboard-response-label: 74 75 :>json number id: Dashboard ID 76 :>json string name: 77 :>json string slug: 78 :>json number user_id: ID of the dashboard creator 79 :>json string created_at: ISO format timestamp for dashboard creation 80 :>json string updated_at: ISO format timestamp for last dashboard modification 81 :>json number version: Revision number of dashboard 82 :>json boolean dashboard_filters_enabled: Whether filters are enabled or not 83 :>json boolean is_archived: Whether this dashboard has been removed from the index or not 84 :>json boolean is_draft: Whether this dashboard is a draft or not. 85 :>json array layout: Array of arrays containing widget IDs, corresponding to the rows and columns the widgets are displayed in 86 :>json array widgets: Array of arrays containing :ref:`widget <widget-response-label>` data 87 88 .. _widget-response-label: 89 90 Widget structure: 91 92 :>json number widget.id: Widget ID 93 :>json number widget.width: Widget size 94 :>json object widget.options: Widget options 95 :>json number widget.dashboard_id: ID of dashboard containing this widget 96 :>json string widget.text: Widget contents, if this is a text-box widget 97 :>json object widget.visualization: Widget contents, if this is a visualization widget 98 :>json string widget.created_at: ISO format timestamp for widget creation 99 :>json string widget.updated_at: ISO format timestamp for last widget modification 100 """ 101 dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, dashboard_slug, self.current_org) 102 response = dashboard.to_dict(with_widgets=True, user=self.current_user) 103 104 api_key = models.ApiKey.get_by_object(dashboard) 105 if api_key: 106 response['public_url'] = url_for('redash.public_dashboard', token=api_key.api_key, org_slug=self.current_org.slug, _external=True) 107 response['api_key'] = api_key.api_key 108 109 response['can_edit'] = can_modify(dashboard, self.current_user) 110 111 return response 112 113 @require_permission('edit_dashboard') 114 def post(self, dashboard_slug): 115 """ 116 Modifies a dashboard. 117 118 :qparam string slug: Slug of dashboard to retrieve. 119 120 Responds with the updated :ref:`dashboard <dashboard-response-label>`. 121 122 :status 200: success 123 :status 409: Version conflict -- dashboard modified since last read 124 """ 125 dashboard_properties = request.get_json(force=True) 126 # TODO: either convert all requests to use slugs or ids 127 dashboard = models.Dashboard.get_by_id_and_org(dashboard_slug, self.current_org) 128 129 require_object_modify_permission(dashboard, self.current_user) 130 131 updates = project(dashboard_properties, ('name', 'layout', 'version', 132 'is_draft')) 133 134 # SQLAlchemy handles the case where a concurrent transaction beats us 135 # to the update. But we still have to make sure that we're not starting 136 # out behind. 137 if 'version' in updates and updates['version'] != dashboard.version: 138 abort(409) 139 140 updates['changed_by'] = self.current_user 141 142 self.update_model(dashboard, updates) 143 models.db.session.add(dashboard) 144 try: 145 models.db.session.commit() 146 except StaleDataError: 147 abort(409) 148 149 result = dashboard.to_dict(with_widgets=True, user=self.current_user) 150 return result 151 152 @require_permission('edit_dashboard') 153 def delete(self, dashboard_slug): 154 """ 155 Archives a dashboard. 156 157 :qparam string slug: Slug of dashboard to retrieve. 158 159 Responds with the archived :ref:`dashboard <dashboard-response-label>`. 160 """ 161 dashboard = models.Dashboard.get_by_slug_and_org(dashboard_slug, self.current_org) 162 dashboard.is_archived = True 163 dashboard.record_changes(changed_by=self.current_user) 164 models.db.session.add(dashboard) 165 d = dashboard.to_dict(with_widgets=True, user=self.current_user) 166 models.db.session.commit() 167 return d 168 169 170 class PublicDashboardResource(BaseResource): 171 def get(self, token): 172 """ 173 Retrieve a public dashboard. 174 175 :param token: An API key for a public dashboard. 176 :>json array widgets: An array of arrays of :ref:`public widgets <public-widget-label>`, corresponding to the rows and columns the widgets are displayed in 177 """ 178 if not isinstance(self.current_user, models.ApiUser): 179 api_key = get_object_or_404(models.ApiKey.get_by_api_key, token) 180 dashboard = api_key.object 181 else: 182 dashboard = self.current_user.object 183 184 return serializers.public_dashboard(dashboard) 185 186 187 class DashboardShareResource(BaseResource): 188 def post(self, dashboard_id): 189 """ 190 Allow anonymous access to a dashboard. 191 192 :param dashboard_id: The numeric ID of the dashboard to share. 193 :>json string public_url: The URL for anonymous access to the dashboard. 194 :>json api_key: The API key to use when accessing it. 195 """ 196 dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org) 197 require_admin_or_owner(dashboard.user_id) 198 api_key = models.ApiKey.create_for_object(dashboard, self.current_user) 199 models.db.session.flush() 200 models.db.session.commit() 201 202 public_url = url_for('redash.public_dashboard', token=api_key.api_key, org_slug=self.current_org.slug, _external=True) 203 204 self.record_event({ 205 'action': 'activate_api_key', 206 'object_id': dashboard.id, 207 'object_type': 'dashboard', 208 }) 209 210 return {'public_url': public_url, 'api_key': api_key.api_key} 211 212 def delete(self, dashboard_id): 213 """ 214 Disable anonymous access to a dashboard. 215 216 :param dashboard_id: The numeric ID of the dashboard to unshare. 217 """ 218 dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org) 219 require_admin_or_owner(dashboard.user_id) 220 api_key = models.ApiKey.get_by_object(dashboard) 221 222 if api_key: 223 api_key.active = False 224 models.db.session.add(api_key) 225 models.db.session.commit() 226 227 self.record_event({ 228 'action': 'deactivate_api_key', 229 'object_id': dashboard.id, 230 'object_type': 'dashboard', 231 }) 232 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/handlers/dashboards.py b/redash/handlers/dashboards.py --- a/redash/handlers/dashboards.py +++ b/redash/handlers/dashboards.py @@ -129,7 +129,7 @@ require_object_modify_permission(dashboard, self.current_user) updates = project(dashboard_properties, ('name', 'layout', 'version', - 'is_draft')) + 'is_draft', 'dashboard_filters_enabled')) # SQLAlchemy handles the case where a concurrent transaction beats us # to the update. But we still have to make sure that we're not starting
{"golden_diff": "diff --git a/redash/handlers/dashboards.py b/redash/handlers/dashboards.py\n--- a/redash/handlers/dashboards.py\n+++ b/redash/handlers/dashboards.py\n@@ -129,7 +129,7 @@\n require_object_modify_permission(dashboard, self.current_user)\n \n updates = project(dashboard_properties, ('name', 'layout', 'version',\n- 'is_draft'))\n+ 'is_draft', 'dashboard_filters_enabled'))\n \n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n", "issue": "Make dashboard level filters a feature available to non-admins\n### Issue Summary\r\n\r\nCurrently to enable dashboard level filters you have to be an administrator and you have to change a flag manually in the dashboards table. It would be good if this was just on by default or an option that users could change through the front end.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.3\r\n* Browser/OS: Chrome\r\n* How did you install Redash: Amazon via the AMI\r\n\n", "before_files": [{"content": "from itertools import chain\n\nfrom flask import request, url_for\nfrom funcy import distinct, project, take\n\nfrom flask_restful import abort\nfrom redash import models, serializers, settings\nfrom redash.handlers.base import BaseResource, get_object_or_404\nfrom redash.permissions import (can_modify, require_admin_or_owner,\n require_object_modify_permission,\n require_permission)\nfrom sqlalchemy.orm.exc import StaleDataError\n\n\nclass RecentDashboardsResource(BaseResource):\n @require_permission('list_dashboards')\n def get(self):\n \"\"\"\n Lists dashboards modified in the last 7 days.\n \"\"\"\n if settings.FEATURE_DUMB_RECENTS:\n dashboards = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id).order_by(models.Dashboard.updated_at.desc()).limit(10)\n dashboards = [d.to_dict() for d in dashboards]\n else:\n recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id, for_user=True)]\n\n global_recent = []\n if len(recent) < 10:\n global_recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id)]\n\n dashboards = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))\n\n return dashboards\n\n\nclass DashboardListResource(BaseResource):\n @require_permission('list_dashboards')\n def get(self):\n \"\"\"\n Lists all accessible dashboards.\n \"\"\"\n results = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id)\n return [q.to_dict() for q in results]\n\n @require_permission('create_dashboard')\n def post(self):\n \"\"\"\n Creates a new dashboard.\n\n :<json string name: Dashboard name\n\n Responds with a :ref:`dashboard <dashboard-response-label>`.\n \"\"\"\n dashboard_properties = request.get_json(force=True)\n dashboard = models.Dashboard(name=dashboard_properties['name'],\n org=self.current_org,\n user=self.current_user,\n is_draft=True,\n layout='[]')\n models.db.session.add(dashboard)\n models.db.session.commit()\n return dashboard.to_dict()\n\n\nclass DashboardResource(BaseResource):\n @require_permission('list_dashboards')\n def get(self, dashboard_slug=None):\n \"\"\"\n Retrieves a dashboard.\n\n :qparam string slug: Slug of dashboard to retrieve.\n\n .. _dashboard-response-label:\n\n :>json number id: Dashboard ID\n :>json string name:\n :>json string slug:\n :>json number user_id: ID of the dashboard creator\n :>json string created_at: ISO format timestamp for dashboard creation\n :>json string updated_at: ISO format timestamp for last dashboard modification\n :>json number version: Revision number of dashboard\n :>json boolean dashboard_filters_enabled: Whether filters are enabled or not\n :>json boolean is_archived: Whether this dashboard has been removed from the index or not\n :>json boolean is_draft: Whether this dashboard is a draft or not.\n :>json array layout: Array of arrays containing widget IDs, corresponding to the rows and columns the widgets are displayed in\n :>json array widgets: Array of arrays containing :ref:`widget <widget-response-label>` data\n\n .. _widget-response-label:\n\n Widget structure:\n\n :>json number widget.id: Widget ID\n :>json number widget.width: Widget size\n :>json object widget.options: Widget options\n :>json number widget.dashboard_id: ID of dashboard containing this widget\n :>json string widget.text: Widget contents, if this is a text-box widget\n :>json object widget.visualization: Widget contents, if this is a visualization widget\n :>json string widget.created_at: ISO format timestamp for widget creation\n :>json string widget.updated_at: ISO format timestamp for last widget modification\n \"\"\"\n dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, dashboard_slug, self.current_org)\n response = dashboard.to_dict(with_widgets=True, user=self.current_user)\n\n api_key = models.ApiKey.get_by_object(dashboard)\n if api_key:\n response['public_url'] = url_for('redash.public_dashboard', token=api_key.api_key, org_slug=self.current_org.slug, _external=True)\n response['api_key'] = api_key.api_key\n\n response['can_edit'] = can_modify(dashboard, self.current_user)\n\n return response\n\n @require_permission('edit_dashboard')\n def post(self, dashboard_slug):\n \"\"\"\n Modifies a dashboard.\n\n :qparam string slug: Slug of dashboard to retrieve.\n\n Responds with the updated :ref:`dashboard <dashboard-response-label>`.\n\n :status 200: success\n :status 409: Version conflict -- dashboard modified since last read\n \"\"\"\n dashboard_properties = request.get_json(force=True)\n # TODO: either convert all requests to use slugs or ids\n dashboard = models.Dashboard.get_by_id_and_org(dashboard_slug, self.current_org)\n\n require_object_modify_permission(dashboard, self.current_user)\n\n updates = project(dashboard_properties, ('name', 'layout', 'version',\n 'is_draft'))\n\n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n # out behind.\n if 'version' in updates and updates['version'] != dashboard.version:\n abort(409)\n\n updates['changed_by'] = self.current_user\n\n self.update_model(dashboard, updates)\n models.db.session.add(dashboard)\n try:\n models.db.session.commit()\n except StaleDataError:\n abort(409)\n\n result = dashboard.to_dict(with_widgets=True, user=self.current_user)\n return result\n\n @require_permission('edit_dashboard')\n def delete(self, dashboard_slug):\n \"\"\"\n Archives a dashboard.\n\n :qparam string slug: Slug of dashboard to retrieve.\n\n Responds with the archived :ref:`dashboard <dashboard-response-label>`.\n \"\"\"\n dashboard = models.Dashboard.get_by_slug_and_org(dashboard_slug, self.current_org)\n dashboard.is_archived = True\n dashboard.record_changes(changed_by=self.current_user)\n models.db.session.add(dashboard)\n d = dashboard.to_dict(with_widgets=True, user=self.current_user)\n models.db.session.commit()\n return d\n\n\nclass PublicDashboardResource(BaseResource):\n def get(self, token):\n \"\"\"\n Retrieve a public dashboard.\n\n :param token: An API key for a public dashboard.\n :>json array widgets: An array of arrays of :ref:`public widgets <public-widget-label>`, corresponding to the rows and columns the widgets are displayed in\n \"\"\"\n if not isinstance(self.current_user, models.ApiUser):\n api_key = get_object_or_404(models.ApiKey.get_by_api_key, token)\n dashboard = api_key.object\n else:\n dashboard = self.current_user.object\n\n return serializers.public_dashboard(dashboard)\n\n\nclass DashboardShareResource(BaseResource):\n def post(self, dashboard_id):\n \"\"\"\n Allow anonymous access to a dashboard.\n\n :param dashboard_id: The numeric ID of the dashboard to share.\n :>json string public_url: The URL for anonymous access to the dashboard.\n :>json api_key: The API key to use when accessing it.\n \"\"\"\n dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org)\n require_admin_or_owner(dashboard.user_id)\n api_key = models.ApiKey.create_for_object(dashboard, self.current_user)\n models.db.session.flush()\n models.db.session.commit()\n\n public_url = url_for('redash.public_dashboard', token=api_key.api_key, org_slug=self.current_org.slug, _external=True)\n\n self.record_event({\n 'action': 'activate_api_key',\n 'object_id': dashboard.id,\n 'object_type': 'dashboard',\n })\n\n return {'public_url': public_url, 'api_key': api_key.api_key}\n\n def delete(self, dashboard_id):\n \"\"\"\n Disable anonymous access to a dashboard.\n\n :param dashboard_id: The numeric ID of the dashboard to unshare.\n \"\"\"\n dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org)\n require_admin_or_owner(dashboard.user_id)\n api_key = models.ApiKey.get_by_object(dashboard)\n\n if api_key:\n api_key.active = False\n models.db.session.add(api_key)\n models.db.session.commit()\n\n self.record_event({\n 'action': 'deactivate_api_key',\n 'object_id': dashboard.id,\n 'object_type': 'dashboard',\n })\n", "path": "redash/handlers/dashboards.py"}], "after_files": [{"content": "from itertools import chain\n\nfrom flask import request, url_for\nfrom funcy import distinct, project, take\n\nfrom flask_restful import abort\nfrom redash import models, serializers, settings\nfrom redash.handlers.base import BaseResource, get_object_or_404\nfrom redash.permissions import (can_modify, require_admin_or_owner,\n require_object_modify_permission,\n require_permission)\nfrom sqlalchemy.orm.exc import StaleDataError\n\n\nclass RecentDashboardsResource(BaseResource):\n @require_permission('list_dashboards')\n def get(self):\n \"\"\"\n Lists dashboards modified in the last 7 days.\n \"\"\"\n if settings.FEATURE_DUMB_RECENTS:\n dashboards = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id).order_by(models.Dashboard.updated_at.desc()).limit(10)\n dashboards = [d.to_dict() for d in dashboards]\n else:\n recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id, for_user=True)]\n\n global_recent = []\n if len(recent) < 10:\n global_recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id)]\n\n dashboards = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))\n\n return dashboards\n\n\nclass DashboardListResource(BaseResource):\n @require_permission('list_dashboards')\n def get(self):\n \"\"\"\n Lists all accessible dashboards.\n \"\"\"\n results = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id)\n return [q.to_dict() for q in results]\n\n @require_permission('create_dashboard')\n def post(self):\n \"\"\"\n Creates a new dashboard.\n\n :<json string name: Dashboard name\n\n Responds with a :ref:`dashboard <dashboard-response-label>`.\n \"\"\"\n dashboard_properties = request.get_json(force=True)\n dashboard = models.Dashboard(name=dashboard_properties['name'],\n org=self.current_org,\n user=self.current_user,\n is_draft=True,\n layout='[]')\n models.db.session.add(dashboard)\n models.db.session.commit()\n return dashboard.to_dict()\n\n\nclass DashboardResource(BaseResource):\n @require_permission('list_dashboards')\n def get(self, dashboard_slug=None):\n \"\"\"\n Retrieves a dashboard.\n\n :qparam string slug: Slug of dashboard to retrieve.\n\n .. _dashboard-response-label:\n\n :>json number id: Dashboard ID\n :>json string name:\n :>json string slug:\n :>json number user_id: ID of the dashboard creator\n :>json string created_at: ISO format timestamp for dashboard creation\n :>json string updated_at: ISO format timestamp for last dashboard modification\n :>json number version: Revision number of dashboard\n :>json boolean dashboard_filters_enabled: Whether filters are enabled or not\n :>json boolean is_archived: Whether this dashboard has been removed from the index or not\n :>json boolean is_draft: Whether this dashboard is a draft or not.\n :>json array layout: Array of arrays containing widget IDs, corresponding to the rows and columns the widgets are displayed in\n :>json array widgets: Array of arrays containing :ref:`widget <widget-response-label>` data\n\n .. _widget-response-label:\n\n Widget structure:\n\n :>json number widget.id: Widget ID\n :>json number widget.width: Widget size\n :>json object widget.options: Widget options\n :>json number widget.dashboard_id: ID of dashboard containing this widget\n :>json string widget.text: Widget contents, if this is a text-box widget\n :>json object widget.visualization: Widget contents, if this is a visualization widget\n :>json string widget.created_at: ISO format timestamp for widget creation\n :>json string widget.updated_at: ISO format timestamp for last widget modification\n \"\"\"\n dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, dashboard_slug, self.current_org)\n response = dashboard.to_dict(with_widgets=True, user=self.current_user)\n\n api_key = models.ApiKey.get_by_object(dashboard)\n if api_key:\n response['public_url'] = url_for('redash.public_dashboard', token=api_key.api_key, org_slug=self.current_org.slug, _external=True)\n response['api_key'] = api_key.api_key\n\n response['can_edit'] = can_modify(dashboard, self.current_user)\n\n return response\n\n @require_permission('edit_dashboard')\n def post(self, dashboard_slug):\n \"\"\"\n Modifies a dashboard.\n\n :qparam string slug: Slug of dashboard to retrieve.\n\n Responds with the updated :ref:`dashboard <dashboard-response-label>`.\n\n :status 200: success\n :status 409: Version conflict -- dashboard modified since last read\n \"\"\"\n dashboard_properties = request.get_json(force=True)\n # TODO: either convert all requests to use slugs or ids\n dashboard = models.Dashboard.get_by_id_and_org(dashboard_slug, self.current_org)\n\n require_object_modify_permission(dashboard, self.current_user)\n\n updates = project(dashboard_properties, ('name', 'layout', 'version',\n 'is_draft', 'dashboard_filters_enabled'))\n\n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n # out behind.\n if 'version' in updates and updates['version'] != dashboard.version:\n abort(409)\n\n updates['changed_by'] = self.current_user\n\n self.update_model(dashboard, updates)\n models.db.session.add(dashboard)\n try:\n models.db.session.commit()\n except StaleDataError:\n abort(409)\n\n result = dashboard.to_dict(with_widgets=True, user=self.current_user)\n return result\n\n @require_permission('edit_dashboard')\n def delete(self, dashboard_slug):\n \"\"\"\n Archives a dashboard.\n\n :qparam string slug: Slug of dashboard to retrieve.\n\n Responds with the archived :ref:`dashboard <dashboard-response-label>`.\n \"\"\"\n dashboard = models.Dashboard.get_by_slug_and_org(dashboard_slug, self.current_org)\n dashboard.is_archived = True\n dashboard.record_changes(changed_by=self.current_user)\n models.db.session.add(dashboard)\n d = dashboard.to_dict(with_widgets=True, user=self.current_user)\n models.db.session.commit()\n return d\n\n\nclass PublicDashboardResource(BaseResource):\n def get(self, token):\n \"\"\"\n Retrieve a public dashboard.\n\n :param token: An API key for a public dashboard.\n :>json array widgets: An array of arrays of :ref:`public widgets <public-widget-label>`, corresponding to the rows and columns the widgets are displayed in\n \"\"\"\n if not isinstance(self.current_user, models.ApiUser):\n api_key = get_object_or_404(models.ApiKey.get_by_api_key, token)\n dashboard = api_key.object\n else:\n dashboard = self.current_user.object\n\n return serializers.public_dashboard(dashboard)\n\n\nclass DashboardShareResource(BaseResource):\n def post(self, dashboard_id):\n \"\"\"\n Allow anonymous access to a dashboard.\n\n :param dashboard_id: The numeric ID of the dashboard to share.\n :>json string public_url: The URL for anonymous access to the dashboard.\n :>json api_key: The API key to use when accessing it.\n \"\"\"\n dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org)\n require_admin_or_owner(dashboard.user_id)\n api_key = models.ApiKey.create_for_object(dashboard, self.current_user)\n models.db.session.flush()\n models.db.session.commit()\n\n public_url = url_for('redash.public_dashboard', token=api_key.api_key, org_slug=self.current_org.slug, _external=True)\n\n self.record_event({\n 'action': 'activate_api_key',\n 'object_id': dashboard.id,\n 'object_type': 'dashboard',\n })\n\n return {'public_url': public_url, 'api_key': api_key.api_key}\n\n def delete(self, dashboard_id):\n \"\"\"\n Disable anonymous access to a dashboard.\n\n :param dashboard_id: The numeric ID of the dashboard to unshare.\n \"\"\"\n dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org)\n require_admin_or_owner(dashboard.user_id)\n api_key = models.ApiKey.get_by_object(dashboard)\n\n if api_key:\n api_key.active = False\n models.db.session.add(api_key)\n models.db.session.commit()\n\n self.record_event({\n 'action': 'deactivate_api_key',\n 'object_id': dashboard.id,\n 'object_type': 'dashboard',\n })\n", "path": "redash/handlers/dashboards.py"}]}
2,877
142
gh_patches_debug_36074
rasdani/github-patches
git_diff
conan-io__conan-center-index-5411
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] all: "Access is denied" in os.rename() on Windows ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **almost all packages affected** * Operating System+version: **Windows 10** * Compiler+version: **MSVC 16** * Conan version: **conan 1.35.2** * Python version: **Python 3.8.7** ### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use) ``` [settings] os_build=Windows os=Windows arch=x86_64 arch_build=x86_64 compiler=Visual Studio compiler.version=16 compiler.runtime=MD build_type=Release ``` ### Steps to reproduce (Include if Applicable) This is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774 However most recipes still use `os.rename()` and not `tools.rename()`. ### Log ``` b2/4.2.0: Configuring sources in C:\Users\xxx\.conan\data\b2\4.2.0\_\_\source ERROR: b2/4.2.0: Error in source() method, line 58 os.rename(extracted_dir, "source") PermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/zlib/1.2.11/conanfile.py` Content: ``` 1 import os 2 from conans import ConanFile, tools, CMake 3 from conans.errors import ConanException 4 5 6 class ZlibConan(ConanFile): 7 name = "zlib" 8 version = "1.2.11" 9 url = "https://github.com/conan-io/conan-center-index" 10 homepage = "https://zlib.net" 11 license = "Zlib" 12 description = ("A Massively Spiffy Yet Delicately Unobtrusive Compression Library " 13 "(Also Free, Not to Mention Unencumbered by Patents)") 14 settings = "os", "arch", "compiler", "build_type" 15 options = {"shared": [True, False], "fPIC": [True, False], "minizip": [True, False, "deprecated"]} 16 default_options = {"shared": False, "fPIC": True, "minizip": "deprecated"} 17 exports_sources = ["CMakeLists.txt", "CMakeLists_minizip.txt", "patches/**"] 18 generators = "cmake" 19 topics = ("conan", "zlib", "compression") 20 21 @property 22 def _source_subfolder(self): 23 return "source_subfolder" 24 25 @property 26 def _build_subfolder(self): 27 return "build_subfolder" 28 29 def config_options(self): 30 if self.settings.os == "Windows": 31 del self.options.fPIC 32 33 def configure(self): 34 del self.settings.compiler.libcxx 35 del self.settings.compiler.cppstd 36 37 if self.options.shared: 38 del self.options.fPIC 39 40 if self.options.minizip != "deprecated": 41 self.output.warn("minizip option is deprecated. Please use the new minizip/1.2.11 package") 42 43 def package_id(self): 44 del self.info.options.minizip 45 46 def source(self): 47 tools.get(**self.conan_data["sources"][self.version]) 48 os.rename("{}-{}".format(self.name, self.version), self._source_subfolder) 49 50 def _patch_sources(self): 51 for patch in self.conan_data["patches"][self.version]: 52 tools.patch(**patch) 53 54 with tools.chdir(self._source_subfolder): 55 # https://github.com/madler/zlib/issues/268 56 tools.replace_in_file('gzguts.h', 57 '#if defined(_WIN32) || defined(__CYGWIN__)', 58 '#if defined(_WIN32) || defined(__MINGW32__)') 59 60 is_apple_clang12 = self.settings.compiler == "apple-clang" and tools.Version(self.settings.compiler.version) >= "12.0" 61 if not is_apple_clang12: 62 for filename in ['zconf.h', 'zconf.h.cmakein', 'zconf.h.in']: 63 tools.replace_in_file(filename, 64 '#ifdef HAVE_UNISTD_H ' 65 '/* may be set to #if 1 by ./configure */', 66 '#if defined(HAVE_UNISTD_H) && (1-HAVE_UNISTD_H-1 != 0)') 67 tools.replace_in_file(filename, 68 '#ifdef HAVE_STDARG_H ' 69 '/* may be set to #if 1 by ./configure */', 70 '#if defined(HAVE_STDARG_H) && (1-HAVE_STDARG_H-1 != 0)') 71 72 def build(self): 73 self._patch_sources() 74 make_target = "zlib" if self.options.shared else "zlibstatic" 75 cmake = CMake(self) 76 cmake.configure(build_folder=self._build_subfolder) 77 cmake.build(target=make_target) 78 79 def _rename_libraries(self): 80 if self.settings.os == "Windows": 81 lib_path = os.path.join(self.package_folder, "lib") 82 suffix = "d" if self.settings.build_type == "Debug" else "" 83 84 if self.options.shared: 85 if self.settings.compiler == "Visual Studio": 86 current_lib = os.path.join(lib_path, "zlib%s.lib" % suffix) 87 os.rename(current_lib, os.path.join(lib_path, "zlib.lib")) 88 else: 89 if self.settings.compiler == "Visual Studio": 90 current_lib = os.path.join(lib_path, "zlibstatic%s.lib" % suffix) 91 os.rename(current_lib, os.path.join(lib_path, "zlib.lib")) 92 elif self.settings.compiler == "gcc": 93 if self.settings.os != "Windows" or not self.settings.os.subsystem: 94 current_lib = os.path.join(lib_path, "libzlibstatic.a") 95 os.rename(current_lib, os.path.join(lib_path, "libzlib.a")) 96 elif self.settings.compiler == "clang": 97 current_lib = os.path.join(lib_path, "zlibstatic.lib") 98 os.rename(current_lib, os.path.join(lib_path, "zlib.lib")) 99 100 def _extract_license(self): 101 with tools.chdir(os.path.join(self.source_folder, self._source_subfolder)): 102 tmp = tools.load("zlib.h") 103 license_contents = tmp[2:tmp.find("*/", 1)] 104 tools.save("LICENSE", license_contents) 105 106 def package(self): 107 self._extract_license() 108 self.copy("LICENSE", src=self._source_subfolder, dst="licenses") 109 110 # Copy headers 111 for header in ["*zlib.h", "*zconf.h"]: 112 self.copy(pattern=header, dst="include", src=self._source_subfolder, keep_path=False) 113 self.copy(pattern=header, dst="include", src=self._build_subfolder, keep_path=False) 114 115 # Copying static and dynamic libs 116 if self.options.shared: 117 self.copy(pattern="*.dylib*", dst="lib", src=self._build_subfolder, keep_path=False, symlinks=True) 118 self.copy(pattern="*.so*", dst="lib", src=self._build_subfolder, keep_path=False, symlinks=True) 119 self.copy(pattern="*.dll", dst="bin", src=self._build_subfolder, keep_path=False) 120 self.copy(pattern="*.dll.a", dst="lib", src=self._build_subfolder, keep_path=False) 121 else: 122 self.copy(pattern="*.a", dst="lib", src=self._build_subfolder, keep_path=False) 123 self.copy(pattern="*.lib", dst="lib", src=self._build_subfolder, keep_path=False) 124 125 self._rename_libraries() 126 127 def package_info(self): 128 self.cpp_info.libs.append("zlib" if self.settings.os == "Windows" and not self.settings.os.subsystem else "z") 129 self.cpp_info.names["cmake_find_package"] = "ZLIB" 130 self.cpp_info.names["cmake_find_package_multi"] = "ZLIB" 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/zlib/1.2.11/conanfile.py b/recipes/zlib/1.2.11/conanfile.py --- a/recipes/zlib/1.2.11/conanfile.py +++ b/recipes/zlib/1.2.11/conanfile.py @@ -44,8 +44,7 @@ del self.info.options.minizip def source(self): - tools.get(**self.conan_data["sources"][self.version]) - os.rename("{}-{}".format(self.name, self.version), self._source_subfolder) + tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True) def _patch_sources(self): for patch in self.conan_data["patches"][self.version]: @@ -82,20 +81,20 @@ suffix = "d" if self.settings.build_type == "Debug" else "" if self.options.shared: - if self.settings.compiler == "Visual Studio": + if self.settings.compiler == "Visual Studio" and suffix: current_lib = os.path.join(lib_path, "zlib%s.lib" % suffix) - os.rename(current_lib, os.path.join(lib_path, "zlib.lib")) + tools.rename(current_lib, os.path.join(lib_path, "zlib.lib")) else: if self.settings.compiler == "Visual Studio": current_lib = os.path.join(lib_path, "zlibstatic%s.lib" % suffix) - os.rename(current_lib, os.path.join(lib_path, "zlib.lib")) + tools.rename(current_lib, os.path.join(lib_path, "zlib.lib")) elif self.settings.compiler == "gcc": if self.settings.os != "Windows" or not self.settings.os.subsystem: current_lib = os.path.join(lib_path, "libzlibstatic.a") - os.rename(current_lib, os.path.join(lib_path, "libzlib.a")) + tools.rename(current_lib, os.path.join(lib_path, "libzlib.a")) elif self.settings.compiler == "clang": current_lib = os.path.join(lib_path, "zlibstatic.lib") - os.rename(current_lib, os.path.join(lib_path, "zlib.lib")) + tools.rename(current_lib, os.path.join(lib_path, "zlib.lib")) def _extract_license(self): with tools.chdir(os.path.join(self.source_folder, self._source_subfolder)):
{"golden_diff": "diff --git a/recipes/zlib/1.2.11/conanfile.py b/recipes/zlib/1.2.11/conanfile.py\n--- a/recipes/zlib/1.2.11/conanfile.py\n+++ b/recipes/zlib/1.2.11/conanfile.py\n@@ -44,8 +44,7 @@\n del self.info.options.minizip\n \n def source(self):\n- tools.get(**self.conan_data[\"sources\"][self.version])\n- os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder)\n+ tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n \n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n@@ -82,20 +81,20 @@\n suffix = \"d\" if self.settings.build_type == \"Debug\" else \"\"\n \n if self.options.shared:\n- if self.settings.compiler == \"Visual Studio\":\n+ if self.settings.compiler == \"Visual Studio\" and suffix:\n current_lib = os.path.join(lib_path, \"zlib%s.lib\" % suffix)\n- os.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n+ tools.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n else:\n if self.settings.compiler == \"Visual Studio\":\n current_lib = os.path.join(lib_path, \"zlibstatic%s.lib\" % suffix)\n- os.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n+ tools.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n elif self.settings.compiler == \"gcc\":\n if self.settings.os != \"Windows\" or not self.settings.os.subsystem:\n current_lib = os.path.join(lib_path, \"libzlibstatic.a\")\n- os.rename(current_lib, os.path.join(lib_path, \"libzlib.a\"))\n+ tools.rename(current_lib, os.path.join(lib_path, \"libzlib.a\"))\n elif self.settings.compiler == \"clang\":\n current_lib = os.path.join(lib_path, \"zlibstatic.lib\")\n- os.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n+ tools.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n \n def _extract_license(self):\n with tools.chdir(os.path.join(self.source_folder, self._source_subfolder)):\n", "issue": "[package] all: \"Access is denied\" in os.rename() on Windows\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **almost all packages affected**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **MSVC 16**\r\n * Conan version: **conan 1.35.2**\r\n * Python version: **Python 3.8.7**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos_build=Windows\r\nos=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\ncompiler.runtime=MD\r\nbuild_type=Release\r\n```\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nThis is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774\r\nHowever most recipes still use `os.rename()` and not `tools.rename()`. \r\n\r\n### Log\r\n```\r\nb2/4.2.0: Configuring sources in C:\\Users\\xxx\\.conan\\data\\b2\\4.2.0\\_\\_\\source\r\nERROR: b2/4.2.0: Error in source() method, line 58\r\nos.rename(extracted_dir, \"source\")\r\nPermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'\r\n```\r\n\n", "before_files": [{"content": "import os\nfrom conans import ConanFile, tools, CMake\nfrom conans.errors import ConanException\n\n\nclass ZlibConan(ConanFile):\n name = \"zlib\"\n version = \"1.2.11\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://zlib.net\"\n license = \"Zlib\"\n description = (\"A Massively Spiffy Yet Delicately Unobtrusive Compression Library \"\n \"(Also Free, Not to Mention Unencumbered by Patents)\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"minizip\": [True, False, \"deprecated\"]}\n default_options = {\"shared\": False, \"fPIC\": True, \"minizip\": \"deprecated\"}\n exports_sources = [\"CMakeLists.txt\", \"CMakeLists_minizip.txt\", \"patches/**\"]\n generators = \"cmake\"\n topics = (\"conan\", \"zlib\", \"compression\")\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n if self.options.shared:\n del self.options.fPIC\n\n if self.options.minizip != \"deprecated\":\n self.output.warn(\"minizip option is deprecated. Please use the new minizip/1.2.11 package\")\n\n def package_id(self):\n del self.info.options.minizip\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder)\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n with tools.chdir(self._source_subfolder):\n # https://github.com/madler/zlib/issues/268\n tools.replace_in_file('gzguts.h',\n '#if defined(_WIN32) || defined(__CYGWIN__)',\n '#if defined(_WIN32) || defined(__MINGW32__)')\n\n is_apple_clang12 = self.settings.compiler == \"apple-clang\" and tools.Version(self.settings.compiler.version) >= \"12.0\"\n if not is_apple_clang12:\n for filename in ['zconf.h', 'zconf.h.cmakein', 'zconf.h.in']:\n tools.replace_in_file(filename,\n '#ifdef HAVE_UNISTD_H '\n '/* may be set to #if 1 by ./configure */',\n '#if defined(HAVE_UNISTD_H) && (1-HAVE_UNISTD_H-1 != 0)')\n tools.replace_in_file(filename,\n '#ifdef HAVE_STDARG_H '\n '/* may be set to #if 1 by ./configure */',\n '#if defined(HAVE_STDARG_H) && (1-HAVE_STDARG_H-1 != 0)')\n\n def build(self):\n self._patch_sources()\n make_target = \"zlib\" if self.options.shared else \"zlibstatic\"\n cmake = CMake(self)\n cmake.configure(build_folder=self._build_subfolder)\n cmake.build(target=make_target)\n\n def _rename_libraries(self):\n if self.settings.os == \"Windows\":\n lib_path = os.path.join(self.package_folder, \"lib\")\n suffix = \"d\" if self.settings.build_type == \"Debug\" else \"\"\n\n if self.options.shared:\n if self.settings.compiler == \"Visual Studio\":\n current_lib = os.path.join(lib_path, \"zlib%s.lib\" % suffix)\n os.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n else:\n if self.settings.compiler == \"Visual Studio\":\n current_lib = os.path.join(lib_path, \"zlibstatic%s.lib\" % suffix)\n os.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n elif self.settings.compiler == \"gcc\":\n if self.settings.os != \"Windows\" or not self.settings.os.subsystem:\n current_lib = os.path.join(lib_path, \"libzlibstatic.a\")\n os.rename(current_lib, os.path.join(lib_path, \"libzlib.a\"))\n elif self.settings.compiler == \"clang\":\n current_lib = os.path.join(lib_path, \"zlibstatic.lib\")\n os.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n\n def _extract_license(self):\n with tools.chdir(os.path.join(self.source_folder, self._source_subfolder)):\n tmp = tools.load(\"zlib.h\")\n license_contents = tmp[2:tmp.find(\"*/\", 1)]\n tools.save(\"LICENSE\", license_contents)\n\n def package(self):\n self._extract_license()\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n\n # Copy headers\n for header in [\"*zlib.h\", \"*zconf.h\"]:\n self.copy(pattern=header, dst=\"include\", src=self._source_subfolder, keep_path=False)\n self.copy(pattern=header, dst=\"include\", src=self._build_subfolder, keep_path=False)\n\n # Copying static and dynamic libs\n if self.options.shared:\n self.copy(pattern=\"*.dylib*\", dst=\"lib\", src=self._build_subfolder, keep_path=False, symlinks=True)\n self.copy(pattern=\"*.so*\", dst=\"lib\", src=self._build_subfolder, keep_path=False, symlinks=True)\n self.copy(pattern=\"*.dll\", dst=\"bin\", src=self._build_subfolder, keep_path=False)\n self.copy(pattern=\"*.dll.a\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n else:\n self.copy(pattern=\"*.a\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n self.copy(pattern=\"*.lib\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n\n self._rename_libraries()\n\n def package_info(self):\n self.cpp_info.libs.append(\"zlib\" if self.settings.os == \"Windows\" and not self.settings.os.subsystem else \"z\")\n self.cpp_info.names[\"cmake_find_package\"] = \"ZLIB\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"ZLIB\"\n", "path": "recipes/zlib/1.2.11/conanfile.py"}], "after_files": [{"content": "import os\nfrom conans import ConanFile, tools, CMake\nfrom conans.errors import ConanException\n\n\nclass ZlibConan(ConanFile):\n name = \"zlib\"\n version = \"1.2.11\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://zlib.net\"\n license = \"Zlib\"\n description = (\"A Massively Spiffy Yet Delicately Unobtrusive Compression Library \"\n \"(Also Free, Not to Mention Unencumbered by Patents)\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"minizip\": [True, False, \"deprecated\"]}\n default_options = {\"shared\": False, \"fPIC\": True, \"minizip\": \"deprecated\"}\n exports_sources = [\"CMakeLists.txt\", \"CMakeLists_minizip.txt\", \"patches/**\"]\n generators = \"cmake\"\n topics = (\"conan\", \"zlib\", \"compression\")\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n if self.options.shared:\n del self.options.fPIC\n\n if self.options.minizip != \"deprecated\":\n self.output.warn(\"minizip option is deprecated. Please use the new minizip/1.2.11 package\")\n\n def package_id(self):\n del self.info.options.minizip\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n with tools.chdir(self._source_subfolder):\n # https://github.com/madler/zlib/issues/268\n tools.replace_in_file('gzguts.h',\n '#if defined(_WIN32) || defined(__CYGWIN__)',\n '#if defined(_WIN32) || defined(__MINGW32__)')\n\n is_apple_clang12 = self.settings.compiler == \"apple-clang\" and tools.Version(self.settings.compiler.version) >= \"12.0\"\n if not is_apple_clang12:\n for filename in ['zconf.h', 'zconf.h.cmakein', 'zconf.h.in']:\n tools.replace_in_file(filename,\n '#ifdef HAVE_UNISTD_H '\n '/* may be set to #if 1 by ./configure */',\n '#if defined(HAVE_UNISTD_H) && (1-HAVE_UNISTD_H-1 != 0)')\n tools.replace_in_file(filename,\n '#ifdef HAVE_STDARG_H '\n '/* may be set to #if 1 by ./configure */',\n '#if defined(HAVE_STDARG_H) && (1-HAVE_STDARG_H-1 != 0)')\n\n def build(self):\n self._patch_sources()\n make_target = \"zlib\" if self.options.shared else \"zlibstatic\"\n cmake = CMake(self)\n cmake.configure(build_folder=self._build_subfolder)\n cmake.build(target=make_target)\n\n def _rename_libraries(self):\n if self.settings.os == \"Windows\":\n lib_path = os.path.join(self.package_folder, \"lib\")\n suffix = \"d\" if self.settings.build_type == \"Debug\" else \"\"\n\n if self.options.shared:\n if self.settings.compiler == \"Visual Studio\" and suffix:\n current_lib = os.path.join(lib_path, \"zlib%s.lib\" % suffix)\n tools.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n else:\n if self.settings.compiler == \"Visual Studio\":\n current_lib = os.path.join(lib_path, \"zlibstatic%s.lib\" % suffix)\n tools.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n elif self.settings.compiler == \"gcc\":\n if self.settings.os != \"Windows\" or not self.settings.os.subsystem:\n current_lib = os.path.join(lib_path, \"libzlibstatic.a\")\n tools.rename(current_lib, os.path.join(lib_path, \"libzlib.a\"))\n elif self.settings.compiler == \"clang\":\n current_lib = os.path.join(lib_path, \"zlibstatic.lib\")\n tools.rename(current_lib, os.path.join(lib_path, \"zlib.lib\"))\n\n def _extract_license(self):\n with tools.chdir(os.path.join(self.source_folder, self._source_subfolder)):\n tmp = tools.load(\"zlib.h\")\n license_contents = tmp[2:tmp.find(\"*/\", 1)]\n tools.save(\"LICENSE\", license_contents)\n\n def package(self):\n self._extract_license()\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n\n # Copy headers\n for header in [\"*zlib.h\", \"*zconf.h\"]:\n self.copy(pattern=header, dst=\"include\", src=self._source_subfolder, keep_path=False)\n self.copy(pattern=header, dst=\"include\", src=self._build_subfolder, keep_path=False)\n\n # Copying static and dynamic libs\n if self.options.shared:\n self.copy(pattern=\"*.dylib*\", dst=\"lib\", src=self._build_subfolder, keep_path=False, symlinks=True)\n self.copy(pattern=\"*.so*\", dst=\"lib\", src=self._build_subfolder, keep_path=False, symlinks=True)\n self.copy(pattern=\"*.dll\", dst=\"bin\", src=self._build_subfolder, keep_path=False)\n self.copy(pattern=\"*.dll.a\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n else:\n self.copy(pattern=\"*.a\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n self.copy(pattern=\"*.lib\", dst=\"lib\", src=self._build_subfolder, keep_path=False)\n\n self._rename_libraries()\n\n def package_info(self):\n self.cpp_info.libs.append(\"zlib\" if self.settings.os == \"Windows\" and not self.settings.os.subsystem else \"z\")\n self.cpp_info.names[\"cmake_find_package\"] = \"ZLIB\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"ZLIB\"\n", "path": "recipes/zlib/1.2.11/conanfile.py"}]}
2,316
535
gh_patches_debug_40081
rasdani/github-patches
git_diff
airctic__icevision-518
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Stop using dataclass for masks Using `@dataclass` has proven to have more disadvantages than advantages, switching to normal classes should be very straight forward. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `icevision/core/mask.py` Content: ``` 1 __all__ = [ 2 "Mask", 3 "MaskArray", 4 "MaskFile", 5 "VocMaskFile", 6 "RLE", 7 "Polygon", 8 "EncodedRLEs", 9 ] 10 11 from icevision.imports import * 12 from icevision.utils import * 13 from PIL import Image 14 15 16 class Mask(ABC): 17 @abstractmethod 18 def to_mask(self, h, w) -> "MaskArray": 19 pass 20 21 @abstractmethod 22 def to_erles(self, h, w) -> "EncodedRLEs": 23 pass 24 25 26 class EncodedRLEs(Mask): 27 def __init__(self, erles: List[dict] = None): 28 self.erles = erles or [] 29 30 def __repr__(self): 31 return f"<{self.__class__.__name__} with {len(self)} objects>" 32 33 def __len__(self): 34 return len(self.erles) 35 36 def __eq__(self, other): 37 if isinstance(other, self.__class__): 38 return self.erles == other.erles 39 return False 40 41 def append(self, v: "EncodedRLEs"): 42 self.erles.extend(v.erles) 43 44 def extend(self, v: List["EncodedRLEs"]): 45 for o in v: 46 self.append(o) 47 48 def pop(self, i: int): 49 self.erles.pop(i) 50 51 def to_mask(self, h, w) -> "MaskArray": 52 mask = mask_utils.decode(self.erles) 53 mask = mask.transpose(2, 0, 1) # channels first 54 return MaskArray(mask) 55 56 def to_erles(self, h, w) -> "EncodedRLEs": 57 return self 58 59 60 # TODO: Assert shape? (bs, height, width) 61 @dataclass 62 class MaskArray(Mask): 63 """Binary numpy array representation of a mask. 64 65 (num_instances, height, width) 66 """ 67 68 data: np.ndarray 69 70 def __post_init__(self): 71 self.data = self.data.astype(np.uint8) 72 73 def __len__(self): 74 return len(self.data) 75 76 def __getitem__(self, i): 77 return type(self)(self.data[i]) 78 79 def to_tensor(self): 80 return tensor(self.data, dtype=torch.uint8) 81 82 def to_mask(self, h, w): 83 return self 84 85 def to_erles(self, h, w) -> EncodedRLEs: 86 return EncodedRLEs( 87 mask_utils.encode(np.asfortranarray(self.data.transpose(1, 2, 0))) 88 ) 89 90 def to_coco_rle(self, h, w) -> List[dict]: 91 """From https://stackoverflow.com/a/49547872/6772672""" 92 assert self.data.shape[1:] == (h, w) 93 rles = [] 94 for mask in self.data: 95 counts = [] 96 flat = itertools.groupby(mask.ravel(order="F")) 97 for i, (value, elements) in enumerate(flat): 98 if i == 0 and value == 1: 99 counts.append(0) 100 counts.append(len(list(elements))) 101 rles.append({"counts": counts, "size": (h, w)}) 102 return rles 103 104 @property 105 def shape(self): 106 return self.data.shape 107 108 @classmethod 109 def from_masks(cls, masks: Union[EncodedRLEs, Sequence[Mask]], h: int, w: int): 110 # HACK: check for backwards compatibility 111 if isinstance(masks, EncodedRLEs): 112 return masks.to_mask(h, w) 113 else: 114 masks_arrays = [o.to_mask(h=h, w=w).data for o in masks] 115 return cls(np.concatenate(masks_arrays)) 116 117 118 @dataclass 119 class MaskFile(Mask): 120 filepath: Union[str, Path] 121 122 def __post_init__(self): 123 self.filepath = Path(self.filepath) 124 125 def to_mask(self, h, w): 126 mask = open_img(self.filepath, gray=True) 127 obj_ids = np.unique(mask)[1:] 128 masks = mask == obj_ids[:, None, None] 129 return MaskArray(masks) 130 131 def to_coco_rle(self, h, w) -> List[dict]: 132 return self.to_mask(h=h, w=w).to_coco_rle(h=h, w=w) 133 134 def to_erles(self, h, w) -> EncodedRLEs: 135 return self.to_mask(h, w).to_erles(h, w) 136 137 138 @dataclass 139 class VocMaskFile(MaskFile): 140 """Extension of `MaskFile` for VOC masks. 141 Removes the color pallete and optionally drops void pixels. 142 143 Args: 144 drop_void (bool): drops the void pixels, which should have the value 255. 145 """ 146 147 drop_void: bool = True 148 149 def to_mask(self, h, w) -> MaskArray: 150 mask_arr = np.array(Image.open(self.filepath)) 151 obj_ids = np.unique(mask_arr)[1:] 152 masks = mask_arr == obj_ids[:, None, None] 153 154 if self.drop_void: 155 masks = masks[:-1, ...] 156 157 return MaskArray(masks) 158 159 160 @dataclass(frozen=True) 161 class RLE(Mask): 162 counts: List[int] 163 164 def to_mask(self, h, w) -> "MaskArray": 165 return self.to_erles(h=h, w=w).to_mask(h=h, w=w) 166 # Convert kaggle counts to mask 167 # "From https://www.kaggle.com/julienbeaulieu/imaterialist-detectron2" 168 # mask = np.full(h * w, 0, dtype=np.uint8) 169 # for start, ones in zip(self.counts[::2], self.counts[1::2]): 170 # # counting starts on one 171 # start -= 1 172 # if ones: 173 # mask[start : start + ones] = 1 174 # mask = mask.reshape((h, w), order="F") 175 # return MaskArray(mask) 176 177 def to_coco(self) -> List[int]: 178 return self.counts 179 180 def to_erles(self, h, w) -> EncodedRLEs: 181 return EncodedRLEs( 182 mask_utils.frPyObjects([{"counts": self.to_coco(), "size": [h, w]}], h, w) 183 ) 184 185 @classmethod 186 def from_string(cls, s, sep=" "): 187 return cls(lmap(int, s.split(sep))) 188 189 @classmethod 190 def from_kaggle(cls, counts: Sequence[int]): 191 """Described [here](https://www.kaggle.com/c/imaterialist-fashion-2020-fgvc7/overview/evaluation)""" 192 if len(counts) % 2 != 0: 193 raise ValueError("Counts must be divisible by 2") 194 195 current = 1 196 coco_counts = [] 197 for start, count in zip(counts[::2], counts[1::2]): 198 coco_counts.append(start - current) # zeros 199 coco_counts.append(count) # ones 200 current = start + count 201 202 # remove trailing zero 203 if coco_counts[-1] == 0: 204 coco_counts.pop(-1) 205 206 return cls.from_coco(coco_counts) 207 208 @classmethod 209 def from_coco(cls, counts: Sequence[int]): 210 """Described [here](https://stackoverflow.com/a/49547872/6772672)""" 211 return cls(counts) 212 # Convert from kaggle to coco 213 # when counts is odd, round it with 0 ones at the end 214 # if len(counts) % 2 != 0: 215 # counts = counts + [0] 216 # 217 # kaggle_counts, total = [], 0 218 # for zeros, ones in zip(counts[::2], counts[1::2]): 219 # start = zeros + total + 1 220 # kaggle_counts.extend([start, ones]) 221 # total += zeros + ones 222 # return cls.from_kaggle(kaggle_counts) 223 224 225 @dataclass(frozen=True) 226 class Polygon(Mask): 227 points: List[List[int]] 228 229 def to_mask(self, h, w): 230 return self.to_erles(h=h, w=w).to_mask(h=h, w=w) 231 232 def to_erles(self, h, w) -> EncodedRLEs: 233 erles = mask_utils.frPyObjects(self.points, h, w) 234 erle = mask_utils.merge(erles) # make unconnected polygons a single mask 235 return EncodedRLEs([erle]) 236 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/icevision/core/mask.py b/icevision/core/mask.py --- a/icevision/core/mask.py +++ b/icevision/core/mask.py @@ -58,17 +58,15 @@ # TODO: Assert shape? (bs, height, width) -@dataclass class MaskArray(Mask): """Binary numpy array representation of a mask. - (num_instances, height, width) + # Arguments + data: Mask array, with the dimensions: (num_instances, height, width) """ - data: np.ndarray - - def __post_init__(self): - self.data = self.data.astype(np.uint8) + def __init__(self, data: np.uint8): + self.data = data.astype(np.uint8) def __len__(self): return len(self.data) @@ -115,12 +113,15 @@ return cls(np.concatenate(masks_arrays)) -@dataclass class MaskFile(Mask): - filepath: Union[str, Path] + """Holds the path to mask image file. + + # Arguments + filepath: Path to the mask image file. + """ - def __post_init__(self): - self.filepath = Path(self.filepath) + def __init__(self, filepath: Union[str, Path]): + self.filepath = Path(filepath) def to_mask(self, h, w): mask = open_img(self.filepath, gray=True) @@ -135,16 +136,18 @@ return self.to_mask(h, w).to_erles(h, w) -@dataclass class VocMaskFile(MaskFile): """Extension of `MaskFile` for VOC masks. Removes the color pallete and optionally drops void pixels. - Args: - drop_void (bool): drops the void pixels, which should have the value 255. + # Arguments + drop_void (bool): drops the void pixels, which should have the value 255. + filepath: Path to the mask image file. """ - drop_void: bool = True + def __init__(self, filepath: Union[str, Path], drop_void: bool = True): + super().__init__(filepath=filepath) + self.drop_void = drop_void def to_mask(self, h, w) -> MaskArray: mask_arr = np.array(Image.open(self.filepath)) @@ -157,9 +160,15 @@ return MaskArray(masks) -@dataclass(frozen=True) class RLE(Mask): - counts: List[int] + """Run length encoding of a mask. + + Don't instantiate this class directly, instead use the classmethods + `from_coco` and `from_kaggle`. + """ + + def __init__(self, counts: List[int]): + self.counts = counts def to_mask(self, h, w) -> "MaskArray": return self.to_erles(h=h, w=w).to_mask(h=h, w=w) @@ -222,9 +231,15 @@ # return cls.from_kaggle(kaggle_counts) -@dataclass(frozen=True) class Polygon(Mask): - points: List[List[int]] + """Polygon representation of a mask + + # Arguments + points: The vertices of the polygon in the COCO standard format. + """ + + def __init__(self, points: List[List[int]]): + self.points = points def to_mask(self, h, w): return self.to_erles(h=h, w=w).to_mask(h=h, w=w)
{"golden_diff": "diff --git a/icevision/core/mask.py b/icevision/core/mask.py\n--- a/icevision/core/mask.py\n+++ b/icevision/core/mask.py\n@@ -58,17 +58,15 @@\n \n \n # TODO: Assert shape? (bs, height, width)\n-@dataclass\n class MaskArray(Mask):\n \"\"\"Binary numpy array representation of a mask.\n \n- (num_instances, height, width)\n+ # Arguments\n+ data: Mask array, with the dimensions: (num_instances, height, width)\n \"\"\"\n \n- data: np.ndarray\n-\n- def __post_init__(self):\n- self.data = self.data.astype(np.uint8)\n+ def __init__(self, data: np.uint8):\n+ self.data = data.astype(np.uint8)\n \n def __len__(self):\n return len(self.data)\n@@ -115,12 +113,15 @@\n return cls(np.concatenate(masks_arrays))\n \n \n-@dataclass\n class MaskFile(Mask):\n- filepath: Union[str, Path]\n+ \"\"\"Holds the path to mask image file.\n+\n+ # Arguments\n+ filepath: Path to the mask image file.\n+ \"\"\"\n \n- def __post_init__(self):\n- self.filepath = Path(self.filepath)\n+ def __init__(self, filepath: Union[str, Path]):\n+ self.filepath = Path(filepath)\n \n def to_mask(self, h, w):\n mask = open_img(self.filepath, gray=True)\n@@ -135,16 +136,18 @@\n return self.to_mask(h, w).to_erles(h, w)\n \n \n-@dataclass\n class VocMaskFile(MaskFile):\n \"\"\"Extension of `MaskFile` for VOC masks.\n Removes the color pallete and optionally drops void pixels.\n \n- Args:\n- drop_void (bool): drops the void pixels, which should have the value 255.\n+ # Arguments\n+ drop_void (bool): drops the void pixels, which should have the value 255.\n+ filepath: Path to the mask image file.\n \"\"\"\n \n- drop_void: bool = True\n+ def __init__(self, filepath: Union[str, Path], drop_void: bool = True):\n+ super().__init__(filepath=filepath)\n+ self.drop_void = drop_void\n \n def to_mask(self, h, w) -> MaskArray:\n mask_arr = np.array(Image.open(self.filepath))\n@@ -157,9 +160,15 @@\n return MaskArray(masks)\n \n \n-@dataclass(frozen=True)\n class RLE(Mask):\n- counts: List[int]\n+ \"\"\"Run length encoding of a mask.\n+\n+ Don't instantiate this class directly, instead use the classmethods\n+ `from_coco` and `from_kaggle`.\n+ \"\"\"\n+\n+ def __init__(self, counts: List[int]):\n+ self.counts = counts\n \n def to_mask(self, h, w) -> \"MaskArray\":\n return self.to_erles(h=h, w=w).to_mask(h=h, w=w)\n@@ -222,9 +231,15 @@\n # return cls.from_kaggle(kaggle_counts)\n \n \n-@dataclass(frozen=True)\n class Polygon(Mask):\n- points: List[List[int]]\n+ \"\"\"Polygon representation of a mask\n+\n+ # Arguments\n+ points: The vertices of the polygon in the COCO standard format.\n+ \"\"\"\n+\n+ def __init__(self, points: List[List[int]]):\n+ self.points = points\n \n def to_mask(self, h, w):\n return self.to_erles(h=h, w=w).to_mask(h=h, w=w)\n", "issue": "Stop using dataclass for masks\nUsing `@dataclass` has proven to have more disadvantages than advantages, switching to normal classes should be very straight forward. \n", "before_files": [{"content": "__all__ = [\n \"Mask\",\n \"MaskArray\",\n \"MaskFile\",\n \"VocMaskFile\",\n \"RLE\",\n \"Polygon\",\n \"EncodedRLEs\",\n]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom PIL import Image\n\n\nclass Mask(ABC):\n @abstractmethod\n def to_mask(self, h, w) -> \"MaskArray\":\n pass\n\n @abstractmethod\n def to_erles(self, h, w) -> \"EncodedRLEs\":\n pass\n\n\nclass EncodedRLEs(Mask):\n def __init__(self, erles: List[dict] = None):\n self.erles = erles or []\n\n def __repr__(self):\n return f\"<{self.__class__.__name__} with {len(self)} objects>\"\n\n def __len__(self):\n return len(self.erles)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.erles == other.erles\n return False\n\n def append(self, v: \"EncodedRLEs\"):\n self.erles.extend(v.erles)\n\n def extend(self, v: List[\"EncodedRLEs\"]):\n for o in v:\n self.append(o)\n\n def pop(self, i: int):\n self.erles.pop(i)\n\n def to_mask(self, h, w) -> \"MaskArray\":\n mask = mask_utils.decode(self.erles)\n mask = mask.transpose(2, 0, 1) # channels first\n return MaskArray(mask)\n\n def to_erles(self, h, w) -> \"EncodedRLEs\":\n return self\n\n\n# TODO: Assert shape? (bs, height, width)\n@dataclass\nclass MaskArray(Mask):\n \"\"\"Binary numpy array representation of a mask.\n\n (num_instances, height, width)\n \"\"\"\n\n data: np.ndarray\n\n def __post_init__(self):\n self.data = self.data.astype(np.uint8)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n return type(self)(self.data[i])\n\n def to_tensor(self):\n return tensor(self.data, dtype=torch.uint8)\n\n def to_mask(self, h, w):\n return self\n\n def to_erles(self, h, w) -> EncodedRLEs:\n return EncodedRLEs(\n mask_utils.encode(np.asfortranarray(self.data.transpose(1, 2, 0)))\n )\n\n def to_coco_rle(self, h, w) -> List[dict]:\n \"\"\"From https://stackoverflow.com/a/49547872/6772672\"\"\"\n assert self.data.shape[1:] == (h, w)\n rles = []\n for mask in self.data:\n counts = []\n flat = itertools.groupby(mask.ravel(order=\"F\"))\n for i, (value, elements) in enumerate(flat):\n if i == 0 and value == 1:\n counts.append(0)\n counts.append(len(list(elements)))\n rles.append({\"counts\": counts, \"size\": (h, w)})\n return rles\n\n @property\n def shape(self):\n return self.data.shape\n\n @classmethod\n def from_masks(cls, masks: Union[EncodedRLEs, Sequence[Mask]], h: int, w: int):\n # HACK: check for backwards compatibility\n if isinstance(masks, EncodedRLEs):\n return masks.to_mask(h, w)\n else:\n masks_arrays = [o.to_mask(h=h, w=w).data for o in masks]\n return cls(np.concatenate(masks_arrays))\n\n\n@dataclass\nclass MaskFile(Mask):\n filepath: Union[str, Path]\n\n def __post_init__(self):\n self.filepath = Path(self.filepath)\n\n def to_mask(self, h, w):\n mask = open_img(self.filepath, gray=True)\n obj_ids = np.unique(mask)[1:]\n masks = mask == obj_ids[:, None, None]\n return MaskArray(masks)\n\n def to_coco_rle(self, h, w) -> List[dict]:\n return self.to_mask(h=h, w=w).to_coco_rle(h=h, w=w)\n\n def to_erles(self, h, w) -> EncodedRLEs:\n return self.to_mask(h, w).to_erles(h, w)\n\n\n@dataclass\nclass VocMaskFile(MaskFile):\n \"\"\"Extension of `MaskFile` for VOC masks.\n Removes the color pallete and optionally drops void pixels.\n\n Args:\n drop_void (bool): drops the void pixels, which should have the value 255.\n \"\"\"\n\n drop_void: bool = True\n\n def to_mask(self, h, w) -> MaskArray:\n mask_arr = np.array(Image.open(self.filepath))\n obj_ids = np.unique(mask_arr)[1:]\n masks = mask_arr == obj_ids[:, None, None]\n\n if self.drop_void:\n masks = masks[:-1, ...]\n\n return MaskArray(masks)\n\n\n@dataclass(frozen=True)\nclass RLE(Mask):\n counts: List[int]\n\n def to_mask(self, h, w) -> \"MaskArray\":\n return self.to_erles(h=h, w=w).to_mask(h=h, w=w)\n # Convert kaggle counts to mask\n # \"From https://www.kaggle.com/julienbeaulieu/imaterialist-detectron2\"\n # mask = np.full(h * w, 0, dtype=np.uint8)\n # for start, ones in zip(self.counts[::2], self.counts[1::2]):\n # # counting starts on one\n # start -= 1\n # if ones:\n # mask[start : start + ones] = 1\n # mask = mask.reshape((h, w), order=\"F\")\n # return MaskArray(mask)\n\n def to_coco(self) -> List[int]:\n return self.counts\n\n def to_erles(self, h, w) -> EncodedRLEs:\n return EncodedRLEs(\n mask_utils.frPyObjects([{\"counts\": self.to_coco(), \"size\": [h, w]}], h, w)\n )\n\n @classmethod\n def from_string(cls, s, sep=\" \"):\n return cls(lmap(int, s.split(sep)))\n\n @classmethod\n def from_kaggle(cls, counts: Sequence[int]):\n \"\"\"Described [here](https://www.kaggle.com/c/imaterialist-fashion-2020-fgvc7/overview/evaluation)\"\"\"\n if len(counts) % 2 != 0:\n raise ValueError(\"Counts must be divisible by 2\")\n\n current = 1\n coco_counts = []\n for start, count in zip(counts[::2], counts[1::2]):\n coco_counts.append(start - current) # zeros\n coco_counts.append(count) # ones\n current = start + count\n\n # remove trailing zero\n if coco_counts[-1] == 0:\n coco_counts.pop(-1)\n\n return cls.from_coco(coco_counts)\n\n @classmethod\n def from_coco(cls, counts: Sequence[int]):\n \"\"\"Described [here](https://stackoverflow.com/a/49547872/6772672)\"\"\"\n return cls(counts)\n # Convert from kaggle to coco\n # when counts is odd, round it with 0 ones at the end\n # if len(counts) % 2 != 0:\n # counts = counts + [0]\n #\n # kaggle_counts, total = [], 0\n # for zeros, ones in zip(counts[::2], counts[1::2]):\n # start = zeros + total + 1\n # kaggle_counts.extend([start, ones])\n # total += zeros + ones\n # return cls.from_kaggle(kaggle_counts)\n\n\n@dataclass(frozen=True)\nclass Polygon(Mask):\n points: List[List[int]]\n\n def to_mask(self, h, w):\n return self.to_erles(h=h, w=w).to_mask(h=h, w=w)\n\n def to_erles(self, h, w) -> EncodedRLEs:\n erles = mask_utils.frPyObjects(self.points, h, w)\n erle = mask_utils.merge(erles) # make unconnected polygons a single mask\n return EncodedRLEs([erle])\n", "path": "icevision/core/mask.py"}], "after_files": [{"content": "__all__ = [\n \"Mask\",\n \"MaskArray\",\n \"MaskFile\",\n \"VocMaskFile\",\n \"RLE\",\n \"Polygon\",\n \"EncodedRLEs\",\n]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom PIL import Image\n\n\nclass Mask(ABC):\n @abstractmethod\n def to_mask(self, h, w) -> \"MaskArray\":\n pass\n\n @abstractmethod\n def to_erles(self, h, w) -> \"EncodedRLEs\":\n pass\n\n\nclass EncodedRLEs(Mask):\n def __init__(self, erles: List[dict] = None):\n self.erles = erles or []\n\n def __repr__(self):\n return f\"<{self.__class__.__name__} with {len(self)} objects>\"\n\n def __len__(self):\n return len(self.erles)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.erles == other.erles\n return False\n\n def append(self, v: \"EncodedRLEs\"):\n self.erles.extend(v.erles)\n\n def extend(self, v: List[\"EncodedRLEs\"]):\n for o in v:\n self.append(o)\n\n def pop(self, i: int):\n self.erles.pop(i)\n\n def to_mask(self, h, w) -> \"MaskArray\":\n mask = mask_utils.decode(self.erles)\n mask = mask.transpose(2, 0, 1) # channels first\n return MaskArray(mask)\n\n def to_erles(self, h, w) -> \"EncodedRLEs\":\n return self\n\n\n# TODO: Assert shape? (bs, height, width)\nclass MaskArray(Mask):\n \"\"\"Binary numpy array representation of a mask.\n\n # Arguments\n data: Mask array, with the dimensions: (num_instances, height, width)\n \"\"\"\n\n def __init__(self, data: np.uint8):\n self.data = data.astype(np.uint8)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n return type(self)(self.data[i])\n\n def to_tensor(self):\n return tensor(self.data, dtype=torch.uint8)\n\n def to_mask(self, h, w):\n return self\n\n def to_erles(self, h, w) -> EncodedRLEs:\n return EncodedRLEs(\n mask_utils.encode(np.asfortranarray(self.data.transpose(1, 2, 0)))\n )\n\n def to_coco_rle(self, h, w) -> List[dict]:\n \"\"\"From https://stackoverflow.com/a/49547872/6772672\"\"\"\n assert self.data.shape[1:] == (h, w)\n rles = []\n for mask in self.data:\n counts = []\n flat = itertools.groupby(mask.ravel(order=\"F\"))\n for i, (value, elements) in enumerate(flat):\n if i == 0 and value == 1:\n counts.append(0)\n counts.append(len(list(elements)))\n rles.append({\"counts\": counts, \"size\": (h, w)})\n return rles\n\n @property\n def shape(self):\n return self.data.shape\n\n @classmethod\n def from_masks(cls, masks: Union[EncodedRLEs, Sequence[Mask]], h: int, w: int):\n # HACK: check for backwards compatibility\n if isinstance(masks, EncodedRLEs):\n return masks.to_mask(h, w)\n else:\n masks_arrays = [o.to_mask(h=h, w=w).data for o in masks]\n return cls(np.concatenate(masks_arrays))\n\n\nclass MaskFile(Mask):\n \"\"\"Holds the path to mask image file.\n\n # Arguments\n filepath: Path to the mask image file.\n \"\"\"\n\n def __init__(self, filepath: Union[str, Path]):\n self.filepath = Path(filepath)\n\n def to_mask(self, h, w):\n mask = open_img(self.filepath, gray=True)\n obj_ids = np.unique(mask)[1:]\n masks = mask == obj_ids[:, None, None]\n return MaskArray(masks)\n\n def to_coco_rle(self, h, w) -> List[dict]:\n return self.to_mask(h=h, w=w).to_coco_rle(h=h, w=w)\n\n def to_erles(self, h, w) -> EncodedRLEs:\n return self.to_mask(h, w).to_erles(h, w)\n\n\nclass VocMaskFile(MaskFile):\n \"\"\"Extension of `MaskFile` for VOC masks.\n Removes the color pallete and optionally drops void pixels.\n\n # Arguments\n drop_void (bool): drops the void pixels, which should have the value 255.\n filepath: Path to the mask image file.\n \"\"\"\n\n def __init__(self, filepath: Union[str, Path], drop_void: bool = True):\n super().__init__(filepath=filepath)\n self.drop_void = drop_void\n\n def to_mask(self, h, w) -> MaskArray:\n mask_arr = np.array(Image.open(self.filepath))\n obj_ids = np.unique(mask_arr)[1:]\n masks = mask_arr == obj_ids[:, None, None]\n\n if self.drop_void:\n masks = masks[:-1, ...]\n\n return MaskArray(masks)\n\n\nclass RLE(Mask):\n \"\"\"Run length encoding of a mask.\n\n Don't instantiate this class directly, instead use the classmethods\n `from_coco` and `from_kaggle`.\n \"\"\"\n\n def __init__(self, counts: List[int]):\n self.counts = counts\n\n def to_mask(self, h, w) -> \"MaskArray\":\n return self.to_erles(h=h, w=w).to_mask(h=h, w=w)\n # Convert kaggle counts to mask\n # \"From https://www.kaggle.com/julienbeaulieu/imaterialist-detectron2\"\n # mask = np.full(h * w, 0, dtype=np.uint8)\n # for start, ones in zip(self.counts[::2], self.counts[1::2]):\n # # counting starts on one\n # start -= 1\n # if ones:\n # mask[start : start + ones] = 1\n # mask = mask.reshape((h, w), order=\"F\")\n # return MaskArray(mask)\n\n def to_coco(self) -> List[int]:\n return self.counts\n\n def to_erles(self, h, w) -> EncodedRLEs:\n return EncodedRLEs(\n mask_utils.frPyObjects([{\"counts\": self.to_coco(), \"size\": [h, w]}], h, w)\n )\n\n @classmethod\n def from_string(cls, s, sep=\" \"):\n return cls(lmap(int, s.split(sep)))\n\n @classmethod\n def from_kaggle(cls, counts: Sequence[int]):\n \"\"\"Described [here](https://www.kaggle.com/c/imaterialist-fashion-2020-fgvc7/overview/evaluation)\"\"\"\n if len(counts) % 2 != 0:\n raise ValueError(\"Counts must be divisible by 2\")\n\n current = 1\n coco_counts = []\n for start, count in zip(counts[::2], counts[1::2]):\n coco_counts.append(start - current) # zeros\n coco_counts.append(count) # ones\n current = start + count\n\n # remove trailing zero\n if coco_counts[-1] == 0:\n coco_counts.pop(-1)\n\n return cls.from_coco(coco_counts)\n\n @classmethod\n def from_coco(cls, counts: Sequence[int]):\n \"\"\"Described [here](https://stackoverflow.com/a/49547872/6772672)\"\"\"\n return cls(counts)\n # Convert from kaggle to coco\n # when counts is odd, round it with 0 ones at the end\n # if len(counts) % 2 != 0:\n # counts = counts + [0]\n #\n # kaggle_counts, total = [], 0\n # for zeros, ones in zip(counts[::2], counts[1::2]):\n # start = zeros + total + 1\n # kaggle_counts.extend([start, ones])\n # total += zeros + ones\n # return cls.from_kaggle(kaggle_counts)\n\n\nclass Polygon(Mask):\n \"\"\"Polygon representation of a mask\n\n # Arguments\n points: The vertices of the polygon in the COCO standard format.\n \"\"\"\n\n def __init__(self, points: List[List[int]]):\n self.points = points\n\n def to_mask(self, h, w):\n return self.to_erles(h=h, w=w).to_mask(h=h, w=w)\n\n def to_erles(self, h, w) -> EncodedRLEs:\n erles = mask_utils.frPyObjects(self.points, h, w)\n erle = mask_utils.merge(erles) # make unconnected polygons a single mask\n return EncodedRLEs([erle])\n", "path": "icevision/core/mask.py"}]}
2,800
836
gh_patches_debug_36923
rasdani/github-patches
git_diff
rasterio__rasterio-1152
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fillnodata does not use nodata values ## Expected behavior and actual behavior. I expected `fillnodata` to automatically use the `source.nodata` value, but it didn't. A workaround is to read the corresponding mask. (See https://github.com/mapbox/rasterio/pull/253#issuecomment-325963662 and on) ## Steps to reproduce the problem. ``` $ rio info N00E006.hgt {"bounds": [5.999583333333334, -0.0004166666666667318, 7.000416666666667, 1.0004166666666667], "colorinterp": ["undefined"], "count": 1, "crs": "EPSG:4326", "descriptions": [null], "driver": "SRTMHGT", "dtype": "int16", "height": 1201, "indexes": [1], "lnglat": [6.5, 0.5], "mask_flags": [["nodata"]], "nodata": -32768.0, "res": [0.0008333333333333334, 0.0008333333333333334], "shape": [1201, 1201], "tiled": false, "transform": [0.0008333333333333334, 0.0, 5.999583333333334, 0.0, -0.0008333333333333334, 1.0004166666666667, 0.0, 0.0, 1.0], "units": ["m"], "width": 1201} $ gdalinfo N00E006.hgt Driver: SRTMHGT/SRTMHGT File Format Files: N00E006.hgt Size is 1201, 1201 Coordinate System is: GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9108"]], AUTHORITY["EPSG","4326"]] Origin = (5.999583333333334,1.000416666666667) Pixel Size = (0.000833333333333,-0.000833333333333) Metadata: AREA_OR_POINT=Point Corner Coordinates: Upper Left ( 5.9995833, 1.0004167) ( 5d59'58.50"E, 1d 0' 1.50"N) Lower Left ( 5.9995833, -0.0004167) ( 5d59'58.50"E, 0d 0' 1.50"S) Upper Right ( 7.0004167, 1.0004167) ( 7d 0' 1.50"E, 1d 0' 1.50"N) Lower Right ( 7.0004167, -0.0004167) ( 7d 0' 1.50"E, 0d 0' 1.50"S) Center ( 6.5000000, 0.5000000) ( 6d30' 0.00"E, 0d30' 0.00"N) Band 1 Block=1201x1 Type=Int16, ColorInterp=Undefined NoData Value=-32768 Unit Type: m $ python Python 3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32) [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import rasterio >>> source = rasterio.open("N00E006.hgt", 'r') >>> source.nodata -32768.0 >>> from rasterio.fill import fillnodata >>> data = source.read(1) >>> source.nodata in data True >>> source.nodata in fillnodata(data) True >>> (data == fillnodata(data)).all() True >>> source.nodata in fillnodata(data, mask=source.read_masks(1)) False ``` ## Operating system Linux Mint, 64bit ## Rasterio version and provenance 1.0a9 manylinux1 wheel installed from PyPI using pip. ``` $ conda list rasterio # packages in environment at /home/juanlu/.miniconda36/envs/env: # rasterio 1.0a9 <pip> $ pip list | grep rasterio DEPRECATION: The default format will switch to columns in the future. You can use --format=(legacy|columns) (or define a format=(legacy|columns) in your pip.conf under the [list] section) to disable this warning. rasterio (1.0a9) $ python -q >>> import rasterio >>> rasterio.__version__ '1.0a9' ``` fillnodata does not use nodata values ## Expected behavior and actual behavior. I expected `fillnodata` to automatically use the `source.nodata` value, but it didn't. A workaround is to read the corresponding mask. (See https://github.com/mapbox/rasterio/pull/253#issuecomment-325963662 and on) ## Steps to reproduce the problem. ``` $ rio info N00E006.hgt {"bounds": [5.999583333333334, -0.0004166666666667318, 7.000416666666667, 1.0004166666666667], "colorinterp": ["undefined"], "count": 1, "crs": "EPSG:4326", "descriptions": [null], "driver": "SRTMHGT", "dtype": "int16", "height": 1201, "indexes": [1], "lnglat": [6.5, 0.5], "mask_flags": [["nodata"]], "nodata": -32768.0, "res": [0.0008333333333333334, 0.0008333333333333334], "shape": [1201, 1201], "tiled": false, "transform": [0.0008333333333333334, 0.0, 5.999583333333334, 0.0, -0.0008333333333333334, 1.0004166666666667, 0.0, 0.0, 1.0], "units": ["m"], "width": 1201} $ gdalinfo N00E006.hgt Driver: SRTMHGT/SRTMHGT File Format Files: N00E006.hgt Size is 1201, 1201 Coordinate System is: GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9108"]], AUTHORITY["EPSG","4326"]] Origin = (5.999583333333334,1.000416666666667) Pixel Size = (0.000833333333333,-0.000833333333333) Metadata: AREA_OR_POINT=Point Corner Coordinates: Upper Left ( 5.9995833, 1.0004167) ( 5d59'58.50"E, 1d 0' 1.50"N) Lower Left ( 5.9995833, -0.0004167) ( 5d59'58.50"E, 0d 0' 1.50"S) Upper Right ( 7.0004167, 1.0004167) ( 7d 0' 1.50"E, 1d 0' 1.50"N) Lower Right ( 7.0004167, -0.0004167) ( 7d 0' 1.50"E, 0d 0' 1.50"S) Center ( 6.5000000, 0.5000000) ( 6d30' 0.00"E, 0d30' 0.00"N) Band 1 Block=1201x1 Type=Int16, ColorInterp=Undefined NoData Value=-32768 Unit Type: m $ python Python 3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32) [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import rasterio >>> source = rasterio.open("N00E006.hgt", 'r') >>> source.nodata -32768.0 >>> from rasterio.fill import fillnodata >>> data = source.read(1) >>> source.nodata in data True >>> source.nodata in fillnodata(data) True >>> (data == fillnodata(data)).all() True >>> source.nodata in fillnodata(data, mask=source.read_masks(1)) False ``` ## Operating system Linux Mint, 64bit ## Rasterio version and provenance 1.0a9 manylinux1 wheel installed from PyPI using pip. ``` $ conda list rasterio # packages in environment at /home/juanlu/.miniconda36/envs/env: # rasterio 1.0a9 <pip> $ pip list | grep rasterio DEPRECATION: The default format will switch to columns in the future. You can use --format=(legacy|columns) (or define a format=(legacy|columns) in your pip.conf under the [list] section) to disable this warning. rasterio (1.0a9) $ python -q >>> import rasterio >>> rasterio.__version__ '1.0a9' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rasterio/fill.py` Content: ``` 1 """Fill holes in raster dataset by interpolation from the edges.""" 2 3 import rasterio 4 from rasterio._fill import _fillnodata 5 from rasterio.env import ensure_env 6 7 8 @ensure_env 9 def fillnodata( 10 image, 11 mask=None, 12 max_search_distance=100.0, 13 smoothing_iterations=0): 14 """Fill holes in a raster dataset by interpolation from the edges. 15 16 This algorithm will interpolate values for all designated nodata 17 pixels (marked by zeros in `mask`). For each pixel a four direction 18 conic search is done to find values to interpolate from (using 19 inverse distance weighting). Once all values are interpolated, zero 20 or more smoothing iterations (3x3 average filters on interpolated 21 pixels) are applied to smooth out artifacts. 22 23 This algorithm is generally suitable for interpolating missing 24 regions of fairly continuously varying rasters (such as elevation 25 models for instance). It is also suitable for filling small holes 26 and cracks in more irregularly varying images (like aerial photos). 27 It is generally not so great for interpolating a raster from sparse 28 point data. 29 30 Parameters 31 ---------- 32 image : numpy ndarray 33 The source containing nodata holes. 34 mask : numpy ndarray or None 35 A mask band indicating which pixels to interpolate. Pixels to 36 interpolate into are indicated by the value 0. Values > 0 37 indicate areas to use during interpolation. Must be same shape 38 as image. If `None`, a mask will be diagnosed from the source 39 data. 40 max_search_distance : float, optional 41 The maxmimum number of pixels to search in all directions to 42 find values to interpolate from. The default is 100. 43 smoothing_iterations : integer, optional 44 The number of 3x3 smoothing filter passes to run. The default is 45 0. 46 47 Returns 48 ------- 49 out : numpy ndarray 50 The filled raster array. 51 """ 52 max_search_distance = float(max_search_distance) 53 smoothing_iterations = int(smoothing_iterations) 54 return _fillnodata( 55 image, mask, max_search_distance, smoothing_iterations) 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/fill.py b/rasterio/fill.py --- a/rasterio/fill.py +++ b/rasterio/fill.py @@ -3,6 +3,9 @@ import rasterio from rasterio._fill import _fillnodata from rasterio.env import ensure_env +from rasterio import dtypes + +from numpy.ma import MaskedArray @ensure_env @@ -11,7 +14,7 @@ mask=None, max_search_distance=100.0, smoothing_iterations=0): - """Fill holes in a raster dataset by interpolation from the edges. + """Fill holes in raster data by interpolation This algorithm will interpolate values for all designated nodata pixels (marked by zeros in `mask`). For each pixel a four direction @@ -30,13 +33,16 @@ Parameters ---------- image : numpy ndarray - The source containing nodata holes. + The source image with holes to be filled. If a MaskedArray, the + inverse of its mask will define the pixels to be filled -- + unless the ``mask`` argument is not None (see below).` mask : numpy ndarray or None A mask band indicating which pixels to interpolate. Pixels to - interpolate into are indicated by the value 0. Values > 0 - indicate areas to use during interpolation. Must be same shape - as image. If `None`, a mask will be diagnosed from the source - data. + interpolate into are indicated by the value 0. Values + > 0 indicate areas to use during interpolation. Must be same + shape as image. This array always takes precedence over the + image's mask (see above). If None, the inverse of the image's + mask will be used if available. max_search_distance : float, optional The maxmimum number of pixels to search in all directions to find values to interpolate from. The default is 100. @@ -49,6 +55,16 @@ out : numpy ndarray The filled raster array. """ + if mask is None and isinstance(image, MaskedArray): + mask = ~image.mask + if not dtypes.is_ndarray(mask): + raise ValueError("An mask array is required") + + if isinstance(image, MaskedArray): + image = image.data + if not dtypes.is_ndarray(image): + raise ValueError("An image array is required") + max_search_distance = float(max_search_distance) smoothing_iterations = int(smoothing_iterations) return _fillnodata(
{"golden_diff": "diff --git a/rasterio/fill.py b/rasterio/fill.py\n--- a/rasterio/fill.py\n+++ b/rasterio/fill.py\n@@ -3,6 +3,9 @@\n import rasterio\n from rasterio._fill import _fillnodata\n from rasterio.env import ensure_env\n+from rasterio import dtypes\n+\n+from numpy.ma import MaskedArray\n \n \n @ensure_env\n@@ -11,7 +14,7 @@\n mask=None,\n max_search_distance=100.0,\n smoothing_iterations=0):\n- \"\"\"Fill holes in a raster dataset by interpolation from the edges.\n+ \"\"\"Fill holes in raster data by interpolation\n \n This algorithm will interpolate values for all designated nodata\n pixels (marked by zeros in `mask`). For each pixel a four direction\n@@ -30,13 +33,16 @@\n Parameters\n ----------\n image : numpy ndarray\n- The source containing nodata holes.\n+ The source image with holes to be filled. If a MaskedArray, the\n+ inverse of its mask will define the pixels to be filled --\n+ unless the ``mask`` argument is not None (see below).`\n mask : numpy ndarray or None\n A mask band indicating which pixels to interpolate. Pixels to\n- interpolate into are indicated by the value 0. Values > 0\n- indicate areas to use during interpolation. Must be same shape\n- as image. If `None`, a mask will be diagnosed from the source\n- data.\n+ interpolate into are indicated by the value 0. Values\n+ > 0 indicate areas to use during interpolation. Must be same\n+ shape as image. This array always takes precedence over the\n+ image's mask (see above). If None, the inverse of the image's\n+ mask will be used if available.\n max_search_distance : float, optional\n The maxmimum number of pixels to search in all directions to\n find values to interpolate from. The default is 100.\n@@ -49,6 +55,16 @@\n out : numpy ndarray\n The filled raster array.\n \"\"\"\n+ if mask is None and isinstance(image, MaskedArray):\n+ mask = ~image.mask\n+ if not dtypes.is_ndarray(mask):\n+ raise ValueError(\"An mask array is required\")\n+\n+ if isinstance(image, MaskedArray):\n+ image = image.data\n+ if not dtypes.is_ndarray(image):\n+ raise ValueError(\"An image array is required\")\n+\n max_search_distance = float(max_search_distance)\n smoothing_iterations = int(smoothing_iterations)\n return _fillnodata(\n", "issue": "fillnodata does not use nodata values\n## Expected behavior and actual behavior.\r\n\r\nI expected `fillnodata` to automatically use the `source.nodata` value, but it didn't. A workaround is to read the corresponding mask. (See https://github.com/mapbox/rasterio/pull/253#issuecomment-325963662 and on)\r\n\r\n## Steps to reproduce the problem.\r\n\r\n```\r\n$ rio info N00E006.hgt \r\n{\"bounds\": [5.999583333333334, -0.0004166666666667318, 7.000416666666667, 1.0004166666666667], \"colorinterp\": [\"undefined\"], \"count\": 1, \"crs\": \"EPSG:4326\", \"descriptions\": [null], \"driver\": \"SRTMHGT\", \"dtype\": \"int16\", \"height\": 1201, \"indexes\": [1], \"lnglat\": [6.5, 0.5], \"mask_flags\": [[\"nodata\"]], \"nodata\": -32768.0, \"res\": [0.0008333333333333334, 0.0008333333333333334], \"shape\": [1201, 1201], \"tiled\": false, \"transform\": [0.0008333333333333334, 0.0, 5.999583333333334, 0.0, -0.0008333333333333334, 1.0004166666666667, 0.0, 0.0, 1.0], \"units\": [\"m\"], \"width\": 1201}\r\n$ gdalinfo N00E006.hgt \r\nDriver: SRTMHGT/SRTMHGT File Format\r\nFiles: N00E006.hgt\r\nSize is 1201, 1201\r\nCoordinate System is:\r\nGEOGCS[\"WGS 84\",\r\n DATUM[\"WGS_1984\",\r\n SPHEROID[\"WGS 84\",6378137,298.257223563,\r\n AUTHORITY[\"EPSG\",\"7030\"]],\r\n TOWGS84[0,0,0,0,0,0,0],\r\n AUTHORITY[\"EPSG\",\"6326\"]],\r\n PRIMEM[\"Greenwich\",0,\r\n AUTHORITY[\"EPSG\",\"8901\"]],\r\n UNIT[\"degree\",0.0174532925199433,\r\n AUTHORITY[\"EPSG\",\"9108\"]],\r\n AUTHORITY[\"EPSG\",\"4326\"]]\r\nOrigin = (5.999583333333334,1.000416666666667)\r\nPixel Size = (0.000833333333333,-0.000833333333333)\r\nMetadata:\r\n AREA_OR_POINT=Point\r\nCorner Coordinates:\r\nUpper Left ( 5.9995833, 1.0004167) ( 5d59'58.50\"E, 1d 0' 1.50\"N)\r\nLower Left ( 5.9995833, -0.0004167) ( 5d59'58.50\"E, 0d 0' 1.50\"S)\r\nUpper Right ( 7.0004167, 1.0004167) ( 7d 0' 1.50\"E, 1d 0' 1.50\"N)\r\nLower Right ( 7.0004167, -0.0004167) ( 7d 0' 1.50\"E, 0d 0' 1.50\"S)\r\nCenter ( 6.5000000, 0.5000000) ( 6d30' 0.00\"E, 0d30' 0.00\"N)\r\nBand 1 Block=1201x1 Type=Int16, ColorInterp=Undefined\r\n NoData Value=-32768\r\n Unit Type: m\r\n$ python\r\nPython 3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32) \r\n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import rasterio\r\n>>> source = rasterio.open(\"N00E006.hgt\", 'r')\r\n>>> source.nodata\r\n-32768.0\r\n>>> from rasterio.fill import fillnodata\r\n>>> data = source.read(1)\r\n>>> source.nodata in data\r\nTrue\r\n>>> source.nodata in fillnodata(data)\r\nTrue\r\n>>> (data == fillnodata(data)).all()\r\nTrue\r\n>>> source.nodata in fillnodata(data, mask=source.read_masks(1))\r\nFalse\r\n```\r\n\r\n## Operating system\r\n\r\nLinux Mint, 64bit\r\n\r\n## Rasterio version and provenance\r\n\r\n1.0a9 manylinux1 wheel installed from PyPI using pip.\r\n\r\n```\r\n$ conda list rasterio\r\n# packages in environment at /home/juanlu/.miniconda36/envs/env:\r\n#\r\nrasterio 1.0a9 <pip>\r\n$ pip list | grep rasterio\r\nDEPRECATION: The default format will switch to columns in the future. You can use --format=(legacy|columns) (or define a format=(legacy|columns) in your pip.conf under the [list] section) to disable this warning.\r\nrasterio (1.0a9)\r\n$ python -q\r\n>>> import rasterio\r\n>>> rasterio.__version__\r\n'1.0a9'\r\n```\r\n\nfillnodata does not use nodata values\n## Expected behavior and actual behavior.\r\n\r\nI expected `fillnodata` to automatically use the `source.nodata` value, but it didn't. A workaround is to read the corresponding mask. (See https://github.com/mapbox/rasterio/pull/253#issuecomment-325963662 and on)\r\n\r\n## Steps to reproduce the problem.\r\n\r\n```\r\n$ rio info N00E006.hgt \r\n{\"bounds\": [5.999583333333334, -0.0004166666666667318, 7.000416666666667, 1.0004166666666667], \"colorinterp\": [\"undefined\"], \"count\": 1, \"crs\": \"EPSG:4326\", \"descriptions\": [null], \"driver\": \"SRTMHGT\", \"dtype\": \"int16\", \"height\": 1201, \"indexes\": [1], \"lnglat\": [6.5, 0.5], \"mask_flags\": [[\"nodata\"]], \"nodata\": -32768.0, \"res\": [0.0008333333333333334, 0.0008333333333333334], \"shape\": [1201, 1201], \"tiled\": false, \"transform\": [0.0008333333333333334, 0.0, 5.999583333333334, 0.0, -0.0008333333333333334, 1.0004166666666667, 0.0, 0.0, 1.0], \"units\": [\"m\"], \"width\": 1201}\r\n$ gdalinfo N00E006.hgt \r\nDriver: SRTMHGT/SRTMHGT File Format\r\nFiles: N00E006.hgt\r\nSize is 1201, 1201\r\nCoordinate System is:\r\nGEOGCS[\"WGS 84\",\r\n DATUM[\"WGS_1984\",\r\n SPHEROID[\"WGS 84\",6378137,298.257223563,\r\n AUTHORITY[\"EPSG\",\"7030\"]],\r\n TOWGS84[0,0,0,0,0,0,0],\r\n AUTHORITY[\"EPSG\",\"6326\"]],\r\n PRIMEM[\"Greenwich\",0,\r\n AUTHORITY[\"EPSG\",\"8901\"]],\r\n UNIT[\"degree\",0.0174532925199433,\r\n AUTHORITY[\"EPSG\",\"9108\"]],\r\n AUTHORITY[\"EPSG\",\"4326\"]]\r\nOrigin = (5.999583333333334,1.000416666666667)\r\nPixel Size = (0.000833333333333,-0.000833333333333)\r\nMetadata:\r\n AREA_OR_POINT=Point\r\nCorner Coordinates:\r\nUpper Left ( 5.9995833, 1.0004167) ( 5d59'58.50\"E, 1d 0' 1.50\"N)\r\nLower Left ( 5.9995833, -0.0004167) ( 5d59'58.50\"E, 0d 0' 1.50\"S)\r\nUpper Right ( 7.0004167, 1.0004167) ( 7d 0' 1.50\"E, 1d 0' 1.50\"N)\r\nLower Right ( 7.0004167, -0.0004167) ( 7d 0' 1.50\"E, 0d 0' 1.50\"S)\r\nCenter ( 6.5000000, 0.5000000) ( 6d30' 0.00\"E, 0d30' 0.00\"N)\r\nBand 1 Block=1201x1 Type=Int16, ColorInterp=Undefined\r\n NoData Value=-32768\r\n Unit Type: m\r\n$ python\r\nPython 3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32) \r\n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import rasterio\r\n>>> source = rasterio.open(\"N00E006.hgt\", 'r')\r\n>>> source.nodata\r\n-32768.0\r\n>>> from rasterio.fill import fillnodata\r\n>>> data = source.read(1)\r\n>>> source.nodata in data\r\nTrue\r\n>>> source.nodata in fillnodata(data)\r\nTrue\r\n>>> (data == fillnodata(data)).all()\r\nTrue\r\n>>> source.nodata in fillnodata(data, mask=source.read_masks(1))\r\nFalse\r\n```\r\n\r\n## Operating system\r\n\r\nLinux Mint, 64bit\r\n\r\n## Rasterio version and provenance\r\n\r\n1.0a9 manylinux1 wheel installed from PyPI using pip.\r\n\r\n```\r\n$ conda list rasterio\r\n# packages in environment at /home/juanlu/.miniconda36/envs/env:\r\n#\r\nrasterio 1.0a9 <pip>\r\n$ pip list | grep rasterio\r\nDEPRECATION: The default format will switch to columns in the future. You can use --format=(legacy|columns) (or define a format=(legacy|columns) in your pip.conf under the [list] section) to disable this warning.\r\nrasterio (1.0a9)\r\n$ python -q\r\n>>> import rasterio\r\n>>> rasterio.__version__\r\n'1.0a9'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Fill holes in raster dataset by interpolation from the edges.\"\"\"\n\nimport rasterio\nfrom rasterio._fill import _fillnodata\nfrom rasterio.env import ensure_env\n\n\n@ensure_env\ndef fillnodata(\n image,\n mask=None,\n max_search_distance=100.0,\n smoothing_iterations=0):\n \"\"\"Fill holes in a raster dataset by interpolation from the edges.\n\n This algorithm will interpolate values for all designated nodata\n pixels (marked by zeros in `mask`). For each pixel a four direction\n conic search is done to find values to interpolate from (using\n inverse distance weighting). Once all values are interpolated, zero\n or more smoothing iterations (3x3 average filters on interpolated\n pixels) are applied to smooth out artifacts.\n\n This algorithm is generally suitable for interpolating missing\n regions of fairly continuously varying rasters (such as elevation\n models for instance). It is also suitable for filling small holes\n and cracks in more irregularly varying images (like aerial photos).\n It is generally not so great for interpolating a raster from sparse\n point data.\n\n Parameters\n ----------\n image : numpy ndarray\n The source containing nodata holes.\n mask : numpy ndarray or None\n A mask band indicating which pixels to interpolate. Pixels to\n interpolate into are indicated by the value 0. Values > 0\n indicate areas to use during interpolation. Must be same shape\n as image. If `None`, a mask will be diagnosed from the source\n data.\n max_search_distance : float, optional\n The maxmimum number of pixels to search in all directions to\n find values to interpolate from. The default is 100.\n smoothing_iterations : integer, optional\n The number of 3x3 smoothing filter passes to run. The default is\n 0.\n\n Returns\n -------\n out : numpy ndarray\n The filled raster array.\n \"\"\"\n max_search_distance = float(max_search_distance)\n smoothing_iterations = int(smoothing_iterations)\n return _fillnodata(\n image, mask, max_search_distance, smoothing_iterations)\n", "path": "rasterio/fill.py"}], "after_files": [{"content": "\"\"\"Fill holes in raster dataset by interpolation from the edges.\"\"\"\n\nimport rasterio\nfrom rasterio._fill import _fillnodata\nfrom rasterio.env import ensure_env\nfrom rasterio import dtypes\n\nfrom numpy.ma import MaskedArray\n\n\n@ensure_env\ndef fillnodata(\n image,\n mask=None,\n max_search_distance=100.0,\n smoothing_iterations=0):\n \"\"\"Fill holes in raster data by interpolation\n\n This algorithm will interpolate values for all designated nodata\n pixels (marked by zeros in `mask`). For each pixel a four direction\n conic search is done to find values to interpolate from (using\n inverse distance weighting). Once all values are interpolated, zero\n or more smoothing iterations (3x3 average filters on interpolated\n pixels) are applied to smooth out artifacts.\n\n This algorithm is generally suitable for interpolating missing\n regions of fairly continuously varying rasters (such as elevation\n models for instance). It is also suitable for filling small holes\n and cracks in more irregularly varying images (like aerial photos).\n It is generally not so great for interpolating a raster from sparse\n point data.\n\n Parameters\n ----------\n image : numpy ndarray\n The source image with holes to be filled. If a MaskedArray, the\n inverse of its mask will define the pixels to be filled --\n unless the ``mask`` argument is not None (see below).`\n mask : numpy ndarray or None\n A mask band indicating which pixels to interpolate. Pixels to\n interpolate into are indicated by the value 0. Values\n > 0 indicate areas to use during interpolation. Must be same\n shape as image. This array always takes precedence over the\n image's mask (see above). If None, the inverse of the image's\n mask will be used if available.\n max_search_distance : float, optional\n The maxmimum number of pixels to search in all directions to\n find values to interpolate from. The default is 100.\n smoothing_iterations : integer, optional\n The number of 3x3 smoothing filter passes to run. The default is\n 0.\n\n Returns\n -------\n out : numpy ndarray\n The filled raster array.\n \"\"\"\n if mask is None and isinstance(image, MaskedArray):\n mask = ~image.mask\n if not dtypes.is_ndarray(mask):\n raise ValueError(\"An mask array is required\")\n\n if isinstance(image, MaskedArray):\n image = image.data\n if not dtypes.is_ndarray(image):\n raise ValueError(\"An image array is required\")\n\n max_search_distance = float(max_search_distance)\n smoothing_iterations = int(smoothing_iterations)\n return _fillnodata(\n image, mask, max_search_distance, smoothing_iterations)\n", "path": "rasterio/fill.py"}]}
3,844
586
gh_patches_debug_13034
rasdani/github-patches
git_diff
googleapis__google-cloud-python-1773
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- happybase emulation docs mention private api the page at https://googlecloudplatform.github.io/gcloud-python/latest/happybase-package.html mentions `make_row()` and `make_ordered_row()`, both of which are _not_ public api. please don't mention those at all. :) (fyi: i'm the happybase author) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gcloud/bigtable/happybase/__init__.py` Content: ``` 1 # Copyright 2016 Google Inc. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Google Cloud Bigtable HappyBase package. 16 17 This package is intended to emulate the HappyBase library using 18 Google Cloud Bigtable as the backing store. 19 20 Differences in Public API 21 ------------------------- 22 23 Some concepts from HBase/Thrift do not map directly to the Cloud 24 Bigtable API. As a result, the following instance methods and functions 25 could not be implemented: 26 27 * :meth:`Connection.enable_table() \ 28 <gcloud.bigtable.happybase.connection.Connection.enable_table>` - no 29 concept of enabled/disabled 30 * :meth:`Connection.disable_table() \ 31 <gcloud.bigtable.happybase.connection.Connection.disable_table>` - no 32 concept of enabled/disabled 33 * :meth:`Connection.is_table_enabled() \ 34 <gcloud.bigtable.happybase.connection.Connection.is_table_enabled>` 35 - no concept of enabled/disabled 36 * :meth:`Connection.compact_table() \ 37 <gcloud.bigtable.happybase.connection.Connection.compact_table>` - 38 table storage is opaque to user 39 * :func:`make_row() <gcloud.bigtable.happybase.table.make_row>` - helper 40 needed for Thrift library 41 * :func:`make_ordered_row() <gcloud.bigtable.happybase.table.make_ordered_row>` 42 - helper needed for Thrift library 43 * :meth:`Table.regions() <gcloud.bigtable.happybase.table.Table.regions>` 44 - tables in Cloud Bigtable do not expose internal storage details 45 * :meth:`Table.counter_set() \ 46 <gcloud.bigtable.happybase.table.Table.counter_set>` - method can't 47 be atomic, so we disable it 48 * The ``__version__`` value for the HappyBase package is :data:`None`. 49 However, it's worth nothing this implementation was based off HappyBase 50 0.9. 51 52 In addition, many of the constants from 53 :mod:`connection <gcloud.bigtable.happybase.connection>` 54 are specific to HBase and are defined as :data:`None` in our module: 55 56 * ``COMPAT_MODES`` 57 * ``THRIFT_TRANSPORTS`` 58 * ``THRIFT_PROTOCOLS`` 59 * ``DEFAULT_HOST`` 60 * ``DEFAULT_PORT`` 61 * ``DEFAULT_TRANSPORT`` 62 * ``DEFAULT_COMPAT`` 63 * ``DEFAULT_PROTOCOL`` 64 65 Two of these ``DEFAULT_HOST`` and ``DEFAULT_PORT``, are even imported in 66 the main :mod:`happybase <gcloud.bigtable.happybase>` package. 67 68 Finally, we do not provide the ``util`` module. Though it is public in the 69 HappyBase library, it provides no core functionality. 70 71 API Behavior Changes 72 -------------------- 73 74 * Since there is no concept of an enabled / disabled table, calling 75 :meth:`Connection.delete_table() \ 76 <gcloud.bigtable.happybase.connection.Connection.delete_table>` 77 with ``disable=True`` can't be supported. 78 Using that argument will result in a warning. 79 * The :class:`Connection <gcloud.bigtable.happybase.connection.Connection>` 80 constructor **disables** the use of several 81 arguments and will print a warning if any of them are passed in as keyword 82 arguments. The arguments are: 83 84 * ``host`` 85 * ``port`` 86 * ``compat`` 87 * ``transport`` 88 * ``protocol`` 89 * In order to make 90 :class:`Connection <gcloud.bigtable.happybase.connection.Connection>` 91 compatible with Cloud Bigtable, we add a ``cluster`` keyword argument to 92 allow users to pass in their own 93 :class:`Cluster <gcloud.bigtable.cluster.Cluster>` (which they can 94 construct beforehand). 95 96 For example: 97 98 .. code:: python 99 100 from gcloud.bigtable.client import Client 101 client = Client(project=PROJECT_ID, admin=True) 102 cluster = client.cluster(zone, cluster_id) 103 cluster.reload() 104 105 from gcloud.bigtable.happybase import Connection 106 connection = Connection(cluster=cluster) 107 108 * Any uses of the ``wal`` (Write Ahead Log) argument will result in a 109 warning as well. This includes uses in: 110 111 * :class:`Batch <gcloud.bigtable.happybase.batch.Batch>` 112 * :meth:`Batch.put() <gcloud.bigtable.happybase.batch.Batch.put>` 113 * :meth:`Batch.delete() <gcloud.bigtable.happybase.batch.Batch.delete>` 114 * :meth:`Table.put() <gcloud.bigtable.happybase.table.Table.put>` 115 * :meth:`Table.delete() <gcloud.bigtable.happybase.table.Table.delete>` 116 * :meth:`Table.batch() <gcloud.bigtable.happybase.table.Table.batch>` factory 117 * When calling 118 :meth:`Connection.create_table() \ 119 <gcloud.bigtable.happybase.connection.Connection.create_table>`, the 120 majority of HBase column family options cannot be used. Among 121 122 * ``max_versions`` 123 * ``compression`` 124 * ``in_memory`` 125 * ``bloom_filter_type`` 126 * ``bloom_filter_vector_size`` 127 * ``bloom_filter_nb_hashes`` 128 * ``block_cache_enabled`` 129 * ``time_to_live`` 130 131 Only ``max_versions`` and ``time_to_live`` are availabe in Cloud Bigtable 132 (as 133 :class:`MaxVersionsGCRule <gcloud.bigtable.column_family.MaxVersionsGCRule>` 134 and 135 :class:`MaxAgeGCRule <gcloud.bigtable.column_family.MaxAgeGCRule>`). 136 137 In addition to using a dictionary for specifying column family options, 138 we also accept instances of :class:`.GarbageCollectionRule` or subclasses. 139 * :meth:`Table.scan() <gcloud.bigtable.happybase.table.Table.scan>` no longer 140 accepts the following arguments (which will result in a warning): 141 142 * ``batch_size`` 143 * ``scan_batching`` 144 * ``sorted_columns`` 145 146 * Using a HBase filter string in 147 :meth:`Table.scan() <gcloud.bigtable.happybase.table.Table.scan>` is 148 not possible with Cloud Bigtable and will result in a 149 :class:`TypeError <exceptions.TypeError>`. However, the method now accepts 150 instances of :class:`.RowFilter` and subclasses. 151 * :meth:`Batch.delete() <gcloud.bigtable.happybase.batch.Batch.delete>` (and 152 hence 153 :meth:`Table.delete() <gcloud.bigtable.happybase.table.Table.delete>`) 154 will fail with a :class:`ValueError <exceptions.ValueError>` when either a 155 row or column family delete is attempted with a ``timestamp``. This is 156 because the Cloud Bigtable API uses the ``DeleteFromFamily`` and 157 ``DeleteFromRow`` mutations for these deletes, and neither of these 158 mutations support a timestamp. 159 """ 160 161 from gcloud.bigtable.happybase.batch import Batch 162 from gcloud.bigtable.happybase.connection import Connection 163 from gcloud.bigtable.happybase.connection import DEFAULT_HOST 164 from gcloud.bigtable.happybase.connection import DEFAULT_PORT 165 from gcloud.bigtable.happybase.pool import ConnectionPool 166 from gcloud.bigtable.happybase.pool import NoConnectionsAvailable 167 from gcloud.bigtable.happybase.table import Table 168 169 170 # Values from HappyBase that we don't reproduce / are not relevant. 171 __version__ = None 172 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gcloud/bigtable/happybase/__init__.py b/gcloud/bigtable/happybase/__init__.py --- a/gcloud/bigtable/happybase/__init__.py +++ b/gcloud/bigtable/happybase/__init__.py @@ -36,10 +36,6 @@ * :meth:`Connection.compact_table() \ <gcloud.bigtable.happybase.connection.Connection.compact_table>` - table storage is opaque to user -* :func:`make_row() <gcloud.bigtable.happybase.table.make_row>` - helper - needed for Thrift library -* :func:`make_ordered_row() <gcloud.bigtable.happybase.table.make_ordered_row>` - - helper needed for Thrift library * :meth:`Table.regions() <gcloud.bigtable.happybase.table.Table.regions>` - tables in Cloud Bigtable do not expose internal storage details * :meth:`Table.counter_set() \
{"golden_diff": "diff --git a/gcloud/bigtable/happybase/__init__.py b/gcloud/bigtable/happybase/__init__.py\n--- a/gcloud/bigtable/happybase/__init__.py\n+++ b/gcloud/bigtable/happybase/__init__.py\n@@ -36,10 +36,6 @@\n * :meth:`Connection.compact_table() \\\n <gcloud.bigtable.happybase.connection.Connection.compact_table>` -\n table storage is opaque to user\n-* :func:`make_row() <gcloud.bigtable.happybase.table.make_row>` - helper\n- needed for Thrift library\n-* :func:`make_ordered_row() <gcloud.bigtable.happybase.table.make_ordered_row>`\n- - helper needed for Thrift library\n * :meth:`Table.regions() <gcloud.bigtable.happybase.table.Table.regions>`\n - tables in Cloud Bigtable do not expose internal storage details\n * :meth:`Table.counter_set() \\\n", "issue": "happybase emulation docs mention private api\nthe page at https://googlecloudplatform.github.io/gcloud-python/latest/happybase-package.html mentions `make_row()` and `make_ordered_row()`, both of which are _not_ public api. please don't mention those at all. :)\n\n(fyi: i'm the happybase author)\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Cloud Bigtable HappyBase package.\n\nThis package is intended to emulate the HappyBase library using\nGoogle Cloud Bigtable as the backing store.\n\nDifferences in Public API\n-------------------------\n\nSome concepts from HBase/Thrift do not map directly to the Cloud\nBigtable API. As a result, the following instance methods and functions\ncould not be implemented:\n\n* :meth:`Connection.enable_table() \\\n <gcloud.bigtable.happybase.connection.Connection.enable_table>` - no\n concept of enabled/disabled\n* :meth:`Connection.disable_table() \\\n <gcloud.bigtable.happybase.connection.Connection.disable_table>` - no\n concept of enabled/disabled\n* :meth:`Connection.is_table_enabled() \\\n <gcloud.bigtable.happybase.connection.Connection.is_table_enabled>`\n - no concept of enabled/disabled\n* :meth:`Connection.compact_table() \\\n <gcloud.bigtable.happybase.connection.Connection.compact_table>` -\n table storage is opaque to user\n* :func:`make_row() <gcloud.bigtable.happybase.table.make_row>` - helper\n needed for Thrift library\n* :func:`make_ordered_row() <gcloud.bigtable.happybase.table.make_ordered_row>`\n - helper needed for Thrift library\n* :meth:`Table.regions() <gcloud.bigtable.happybase.table.Table.regions>`\n - tables in Cloud Bigtable do not expose internal storage details\n* :meth:`Table.counter_set() \\\n <gcloud.bigtable.happybase.table.Table.counter_set>` - method can't\n be atomic, so we disable it\n* The ``__version__`` value for the HappyBase package is :data:`None`.\n However, it's worth nothing this implementation was based off HappyBase\n 0.9.\n\nIn addition, many of the constants from\n:mod:`connection <gcloud.bigtable.happybase.connection>`\nare specific to HBase and are defined as :data:`None` in our module:\n\n* ``COMPAT_MODES``\n* ``THRIFT_TRANSPORTS``\n* ``THRIFT_PROTOCOLS``\n* ``DEFAULT_HOST``\n* ``DEFAULT_PORT``\n* ``DEFAULT_TRANSPORT``\n* ``DEFAULT_COMPAT``\n* ``DEFAULT_PROTOCOL``\n\nTwo of these ``DEFAULT_HOST`` and ``DEFAULT_PORT``, are even imported in\nthe main :mod:`happybase <gcloud.bigtable.happybase>` package.\n\nFinally, we do not provide the ``util`` module. Though it is public in the\nHappyBase library, it provides no core functionality.\n\nAPI Behavior Changes\n--------------------\n\n* Since there is no concept of an enabled / disabled table, calling\n :meth:`Connection.delete_table() \\\n <gcloud.bigtable.happybase.connection.Connection.delete_table>`\n with ``disable=True`` can't be supported.\n Using that argument will result in a warning.\n* The :class:`Connection <gcloud.bigtable.happybase.connection.Connection>`\n constructor **disables** the use of several\n arguments and will print a warning if any of them are passed in as keyword\n arguments. The arguments are:\n\n * ``host``\n * ``port``\n * ``compat``\n * ``transport``\n * ``protocol``\n* In order to make\n :class:`Connection <gcloud.bigtable.happybase.connection.Connection>`\n compatible with Cloud Bigtable, we add a ``cluster`` keyword argument to\n allow users to pass in their own\n :class:`Cluster <gcloud.bigtable.cluster.Cluster>` (which they can\n construct beforehand).\n\n For example:\n\n .. code:: python\n\n from gcloud.bigtable.client import Client\n client = Client(project=PROJECT_ID, admin=True)\n cluster = client.cluster(zone, cluster_id)\n cluster.reload()\n\n from gcloud.bigtable.happybase import Connection\n connection = Connection(cluster=cluster)\n\n* Any uses of the ``wal`` (Write Ahead Log) argument will result in a\n warning as well. This includes uses in:\n\n * :class:`Batch <gcloud.bigtable.happybase.batch.Batch>`\n * :meth:`Batch.put() <gcloud.bigtable.happybase.batch.Batch.put>`\n * :meth:`Batch.delete() <gcloud.bigtable.happybase.batch.Batch.delete>`\n * :meth:`Table.put() <gcloud.bigtable.happybase.table.Table.put>`\n * :meth:`Table.delete() <gcloud.bigtable.happybase.table.Table.delete>`\n * :meth:`Table.batch() <gcloud.bigtable.happybase.table.Table.batch>` factory\n* When calling\n :meth:`Connection.create_table() \\\n <gcloud.bigtable.happybase.connection.Connection.create_table>`, the\n majority of HBase column family options cannot be used. Among\n\n * ``max_versions``\n * ``compression``\n * ``in_memory``\n * ``bloom_filter_type``\n * ``bloom_filter_vector_size``\n * ``bloom_filter_nb_hashes``\n * ``block_cache_enabled``\n * ``time_to_live``\n\n Only ``max_versions`` and ``time_to_live`` are availabe in Cloud Bigtable\n (as\n :class:`MaxVersionsGCRule <gcloud.bigtable.column_family.MaxVersionsGCRule>`\n and\n :class:`MaxAgeGCRule <gcloud.bigtable.column_family.MaxAgeGCRule>`).\n\n In addition to using a dictionary for specifying column family options,\n we also accept instances of :class:`.GarbageCollectionRule` or subclasses.\n* :meth:`Table.scan() <gcloud.bigtable.happybase.table.Table.scan>` no longer\n accepts the following arguments (which will result in a warning):\n\n * ``batch_size``\n * ``scan_batching``\n * ``sorted_columns``\n\n* Using a HBase filter string in\n :meth:`Table.scan() <gcloud.bigtable.happybase.table.Table.scan>` is\n not possible with Cloud Bigtable and will result in a\n :class:`TypeError <exceptions.TypeError>`. However, the method now accepts\n instances of :class:`.RowFilter` and subclasses.\n* :meth:`Batch.delete() <gcloud.bigtable.happybase.batch.Batch.delete>` (and\n hence\n :meth:`Table.delete() <gcloud.bigtable.happybase.table.Table.delete>`)\n will fail with a :class:`ValueError <exceptions.ValueError>` when either a\n row or column family delete is attempted with a ``timestamp``. This is\n because the Cloud Bigtable API uses the ``DeleteFromFamily`` and\n ``DeleteFromRow`` mutations for these deletes, and neither of these\n mutations support a timestamp.\n\"\"\"\n\nfrom gcloud.bigtable.happybase.batch import Batch\nfrom gcloud.bigtable.happybase.connection import Connection\nfrom gcloud.bigtable.happybase.connection import DEFAULT_HOST\nfrom gcloud.bigtable.happybase.connection import DEFAULT_PORT\nfrom gcloud.bigtable.happybase.pool import ConnectionPool\nfrom gcloud.bigtable.happybase.pool import NoConnectionsAvailable\nfrom gcloud.bigtable.happybase.table import Table\n\n\n# Values from HappyBase that we don't reproduce / are not relevant.\n__version__ = None\n", "path": "gcloud/bigtable/happybase/__init__.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Cloud Bigtable HappyBase package.\n\nThis package is intended to emulate the HappyBase library using\nGoogle Cloud Bigtable as the backing store.\n\nDifferences in Public API\n-------------------------\n\nSome concepts from HBase/Thrift do not map directly to the Cloud\nBigtable API. As a result, the following instance methods and functions\ncould not be implemented:\n\n* :meth:`Connection.enable_table() \\\n <gcloud.bigtable.happybase.connection.Connection.enable_table>` - no\n concept of enabled/disabled\n* :meth:`Connection.disable_table() \\\n <gcloud.bigtable.happybase.connection.Connection.disable_table>` - no\n concept of enabled/disabled\n* :meth:`Connection.is_table_enabled() \\\n <gcloud.bigtable.happybase.connection.Connection.is_table_enabled>`\n - no concept of enabled/disabled\n* :meth:`Connection.compact_table() \\\n <gcloud.bigtable.happybase.connection.Connection.compact_table>` -\n table storage is opaque to user\n* :meth:`Table.regions() <gcloud.bigtable.happybase.table.Table.regions>`\n - tables in Cloud Bigtable do not expose internal storage details\n* :meth:`Table.counter_set() \\\n <gcloud.bigtable.happybase.table.Table.counter_set>` - method can't\n be atomic, so we disable it\n* The ``__version__`` value for the HappyBase package is :data:`None`.\n However, it's worth nothing this implementation was based off HappyBase\n 0.9.\n\nIn addition, many of the constants from\n:mod:`connection <gcloud.bigtable.happybase.connection>`\nare specific to HBase and are defined as :data:`None` in our module:\n\n* ``COMPAT_MODES``\n* ``THRIFT_TRANSPORTS``\n* ``THRIFT_PROTOCOLS``\n* ``DEFAULT_HOST``\n* ``DEFAULT_PORT``\n* ``DEFAULT_TRANSPORT``\n* ``DEFAULT_COMPAT``\n* ``DEFAULT_PROTOCOL``\n\nTwo of these ``DEFAULT_HOST`` and ``DEFAULT_PORT``, are even imported in\nthe main :mod:`happybase <gcloud.bigtable.happybase>` package.\n\nFinally, we do not provide the ``util`` module. Though it is public in the\nHappyBase library, it provides no core functionality.\n\nAPI Behavior Changes\n--------------------\n\n* Since there is no concept of an enabled / disabled table, calling\n :meth:`Connection.delete_table() \\\n <gcloud.bigtable.happybase.connection.Connection.delete_table>`\n with ``disable=True`` can't be supported.\n Using that argument will result in a warning.\n* The :class:`Connection <gcloud.bigtable.happybase.connection.Connection>`\n constructor **disables** the use of several\n arguments and will print a warning if any of them are passed in as keyword\n arguments. The arguments are:\n\n * ``host``\n * ``port``\n * ``compat``\n * ``transport``\n * ``protocol``\n* In order to make\n :class:`Connection <gcloud.bigtable.happybase.connection.Connection>`\n compatible with Cloud Bigtable, we add a ``cluster`` keyword argument to\n allow users to pass in their own\n :class:`Cluster <gcloud.bigtable.cluster.Cluster>` (which they can\n construct beforehand).\n\n For example:\n\n .. code:: python\n\n from gcloud.bigtable.client import Client\n client = Client(project=PROJECT_ID, admin=True)\n cluster = client.cluster(zone, cluster_id)\n cluster.reload()\n\n from gcloud.bigtable.happybase import Connection\n connection = Connection(cluster=cluster)\n\n* Any uses of the ``wal`` (Write Ahead Log) argument will result in a\n warning as well. This includes uses in:\n\n * :class:`Batch <gcloud.bigtable.happybase.batch.Batch>`\n * :meth:`Batch.put() <gcloud.bigtable.happybase.batch.Batch.put>`\n * :meth:`Batch.delete() <gcloud.bigtable.happybase.batch.Batch.delete>`\n * :meth:`Table.put() <gcloud.bigtable.happybase.table.Table.put>`\n * :meth:`Table.delete() <gcloud.bigtable.happybase.table.Table.delete>`\n * :meth:`Table.batch() <gcloud.bigtable.happybase.table.Table.batch>` factory\n* When calling\n :meth:`Connection.create_table() \\\n <gcloud.bigtable.happybase.connection.Connection.create_table>`, the\n majority of HBase column family options cannot be used. Among\n\n * ``max_versions``\n * ``compression``\n * ``in_memory``\n * ``bloom_filter_type``\n * ``bloom_filter_vector_size``\n * ``bloom_filter_nb_hashes``\n * ``block_cache_enabled``\n * ``time_to_live``\n\n Only ``max_versions`` and ``time_to_live`` are availabe in Cloud Bigtable\n (as\n :class:`MaxVersionsGCRule <gcloud.bigtable.column_family.MaxVersionsGCRule>`\n and\n :class:`MaxAgeGCRule <gcloud.bigtable.column_family.MaxAgeGCRule>`).\n\n In addition to using a dictionary for specifying column family options,\n we also accept instances of :class:`.GarbageCollectionRule` or subclasses.\n* :meth:`Table.scan() <gcloud.bigtable.happybase.table.Table.scan>` no longer\n accepts the following arguments (which will result in a warning):\n\n * ``batch_size``\n * ``scan_batching``\n * ``sorted_columns``\n\n* Using a HBase filter string in\n :meth:`Table.scan() <gcloud.bigtable.happybase.table.Table.scan>` is\n not possible with Cloud Bigtable and will result in a\n :class:`TypeError <exceptions.TypeError>`. However, the method now accepts\n instances of :class:`.RowFilter` and subclasses.\n* :meth:`Batch.delete() <gcloud.bigtable.happybase.batch.Batch.delete>` (and\n hence\n :meth:`Table.delete() <gcloud.bigtable.happybase.table.Table.delete>`)\n will fail with a :class:`ValueError <exceptions.ValueError>` when either a\n row or column family delete is attempted with a ``timestamp``. This is\n because the Cloud Bigtable API uses the ``DeleteFromFamily`` and\n ``DeleteFromRow`` mutations for these deletes, and neither of these\n mutations support a timestamp.\n\"\"\"\n\nfrom gcloud.bigtable.happybase.batch import Batch\nfrom gcloud.bigtable.happybase.connection import Connection\nfrom gcloud.bigtable.happybase.connection import DEFAULT_HOST\nfrom gcloud.bigtable.happybase.connection import DEFAULT_PORT\nfrom gcloud.bigtable.happybase.pool import ConnectionPool\nfrom gcloud.bigtable.happybase.pool import NoConnectionsAvailable\nfrom gcloud.bigtable.happybase.table import Table\n\n\n# Values from HappyBase that we don't reproduce / are not relevant.\n__version__ = None\n", "path": "gcloud/bigtable/happybase/__init__.py"}]}
2,420
208
gh_patches_debug_53384
rasdani/github-patches
git_diff
chainer__chainer-271
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- FunctionSet.copy_parameters_from() Hi all! The code in 'FunctionSet.copy_parameters_from()' does not work, when 'src' and 'dst' are both numpy.ndarrays? ``` python if isinstance(dst, numpy.ndarray): if isinstance(src, numpy.ndarray): dst.copy(src) # this gives a ValueError ``` I think this should read ``` python if isinstance(dst, numpy.ndarray): if isinstance(src, numpy.ndarray): numpy.copyto(dst, src) ``` My numpy.version.full_version is 1.9.2, the 'copyto' method exists since 1.7.0. Cheers, -r --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chainer/function_set.py` Content: ``` 1 import numpy 2 import six 3 4 from chainer import cuda 5 6 7 class FunctionSet(object): 8 9 """Set of objects with ``parameters`` and ``gradients`` properties. 10 11 :class:`FunctionSet` is useful to collect parameters and gradients of 12 multiple parameterized :class:`Function` objects. :class:`FunctionSet` 13 itself also implements :attr:`~FunctionSet.parameters` and 14 :attr:`~FunctionSet.gradients`, so it can be nested in another 15 :class:`FunctionSet` object. 16 17 Function registration is done by just adding an attribute to 18 :class:`FunctionSet` object. 19 20 """ 21 22 def __init__(self, **functions): 23 """Initializes the function set by given functions. 24 25 Args: 26 **functions: ``dict`` of ``str`` key and :class:`Function` values. 27 The key-value pairs are just set to the :class:`FunctionSet` 28 object as attributes. 29 30 """ 31 for name, func in six.iteritems(functions): 32 setattr(self, name, func) 33 34 def collect_parameters(self): 35 """Returns a tuple of parameters and gradients. 36 37 Returns: 38 Tuple (pair) of two tuples. The first element is a tuple of 39 parameter arrays, and the second is a tuple of gradient arrays. 40 41 """ 42 return self.parameters, self.gradients 43 44 def to_gpu(self, device=None): 45 """Migrates all parameters and gradients onto GPU. 46 47 This method calls ``to_gpu`` method of each registered object. 48 49 Args: 50 device (int or :class:`pycuda.driver.Device` or ``None``): Device 51 ID of GPU. If ``None`` is given, it uses the current device. 52 53 Returns: 54 self 55 56 """ 57 for func in six.itervalues(self.__dict__): 58 func.to_gpu(device=device) 59 return self 60 61 def to_cpu(self): 62 """Migrates all parameters and gradients onto CPU. 63 64 This method calls ``to_cpu`` method of each registered object. 65 66 Returns: 67 self 68 69 """ 70 for func in six.itervalues(self.__dict__): 71 func.to_cpu() 72 return self 73 74 def copy_parameters_from(self, params): 75 """Copies parameters from another source without reallocation. 76 77 Args: 78 params (Iterable): Iterable of parameter arrays. 79 80 """ 81 for dst, src in zip(self.parameters, params): 82 if isinstance(dst, numpy.ndarray): 83 if isinstance(src, numpy.ndarray): 84 dst.copy(src) 85 else: 86 src.get(dst) 87 elif isinstance(src, numpy.ndarray): 88 dst.set(src) 89 else: 90 cuda.copy(src, out=dst) 91 92 @property 93 def parameters(self): 94 """Tuple of parameter arrays of all registered functions. 95 96 The order of parameters is consistent with :meth:`gradients` property. 97 98 """ 99 return sum((func.parameters for _, func in self._get_sorted_funcs()), 100 ()) 101 102 @parameters.setter 103 def parameters(self, params): 104 param_iter = iter(params) 105 for _, func in self._get_sorted_funcs(): 106 func.parameters = param_iter 107 108 @property 109 def gradients(self): 110 """Tuple of gradient arrays of all registered functions. 111 112 The order of gradients is consistent with :meth:`parameters` property. 113 114 """ 115 return sum((func.gradients for _, func in self._get_sorted_funcs()), 116 ()) 117 118 @gradients.setter 119 def gradients(self, grads): 120 grad_iter = iter(grads) 121 for _, func in self._get_sorted_funcs(): 122 func.gradients = grad_iter 123 124 def _get_sorted_funcs(self): 125 return sorted(six.iteritems(self.__dict__)) 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chainer/function_set.py b/chainer/function_set.py --- a/chainer/function_set.py +++ b/chainer/function_set.py @@ -81,7 +81,7 @@ for dst, src in zip(self.parameters, params): if isinstance(dst, numpy.ndarray): if isinstance(src, numpy.ndarray): - dst.copy(src) + numpy.copyto(dst, src) else: src.get(dst) elif isinstance(src, numpy.ndarray):
{"golden_diff": "diff --git a/chainer/function_set.py b/chainer/function_set.py\n--- a/chainer/function_set.py\n+++ b/chainer/function_set.py\n@@ -81,7 +81,7 @@\n for dst, src in zip(self.parameters, params):\n if isinstance(dst, numpy.ndarray):\n if isinstance(src, numpy.ndarray):\n- dst.copy(src)\n+ numpy.copyto(dst, src)\n else:\n src.get(dst)\n elif isinstance(src, numpy.ndarray):\n", "issue": "FunctionSet.copy_parameters_from()\nHi all!\n\nThe code in 'FunctionSet.copy_parameters_from()' does not work, when 'src' and 'dst' are both numpy.ndarrays?\n\n``` python\nif isinstance(dst, numpy.ndarray):\n if isinstance(src, numpy.ndarray):\n dst.copy(src) # this gives a ValueError\n```\n\nI think this should read\n\n``` python\nif isinstance(dst, numpy.ndarray):\n if isinstance(src, numpy.ndarray):\n numpy.copyto(dst, src)\n```\n\nMy numpy.version.full_version is 1.9.2, the 'copyto' method exists since 1.7.0.\n\nCheers,\n-r\n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\n\n\nclass FunctionSet(object):\n\n \"\"\"Set of objects with ``parameters`` and ``gradients`` properties.\n\n :class:`FunctionSet` is useful to collect parameters and gradients of\n multiple parameterized :class:`Function` objects. :class:`FunctionSet`\n itself also implements :attr:`~FunctionSet.parameters` and\n :attr:`~FunctionSet.gradients`, so it can be nested in another\n :class:`FunctionSet` object.\n\n Function registration is done by just adding an attribute to\n :class:`FunctionSet` object.\n\n \"\"\"\n\n def __init__(self, **functions):\n \"\"\"Initializes the function set by given functions.\n\n Args:\n **functions: ``dict`` of ``str`` key and :class:`Function` values.\n The key-value pairs are just set to the :class:`FunctionSet`\n object as attributes.\n\n \"\"\"\n for name, func in six.iteritems(functions):\n setattr(self, name, func)\n\n def collect_parameters(self):\n \"\"\"Returns a tuple of parameters and gradients.\n\n Returns:\n Tuple (pair) of two tuples. The first element is a tuple of\n parameter arrays, and the second is a tuple of gradient arrays.\n\n \"\"\"\n return self.parameters, self.gradients\n\n def to_gpu(self, device=None):\n \"\"\"Migrates all parameters and gradients onto GPU.\n\n This method calls ``to_gpu`` method of each registered object.\n\n Args:\n device (int or :class:`pycuda.driver.Device` or ``None``): Device\n ID of GPU. If ``None`` is given, it uses the current device.\n\n Returns:\n self\n\n \"\"\"\n for func in six.itervalues(self.__dict__):\n func.to_gpu(device=device)\n return self\n\n def to_cpu(self):\n \"\"\"Migrates all parameters and gradients onto CPU.\n\n This method calls ``to_cpu`` method of each registered object.\n\n Returns:\n self\n\n \"\"\"\n for func in six.itervalues(self.__dict__):\n func.to_cpu()\n return self\n\n def copy_parameters_from(self, params):\n \"\"\"Copies parameters from another source without reallocation.\n\n Args:\n params (Iterable): Iterable of parameter arrays.\n\n \"\"\"\n for dst, src in zip(self.parameters, params):\n if isinstance(dst, numpy.ndarray):\n if isinstance(src, numpy.ndarray):\n dst.copy(src)\n else:\n src.get(dst)\n elif isinstance(src, numpy.ndarray):\n dst.set(src)\n else:\n cuda.copy(src, out=dst)\n\n @property\n def parameters(self):\n \"\"\"Tuple of parameter arrays of all registered functions.\n\n The order of parameters is consistent with :meth:`gradients` property.\n\n \"\"\"\n return sum((func.parameters for _, func in self._get_sorted_funcs()),\n ())\n\n @parameters.setter\n def parameters(self, params):\n param_iter = iter(params)\n for _, func in self._get_sorted_funcs():\n func.parameters = param_iter\n\n @property\n def gradients(self):\n \"\"\"Tuple of gradient arrays of all registered functions.\n\n The order of gradients is consistent with :meth:`parameters` property.\n\n \"\"\"\n return sum((func.gradients for _, func in self._get_sorted_funcs()),\n ())\n\n @gradients.setter\n def gradients(self, grads):\n grad_iter = iter(grads)\n for _, func in self._get_sorted_funcs():\n func.gradients = grad_iter\n\n def _get_sorted_funcs(self):\n return sorted(six.iteritems(self.__dict__))\n", "path": "chainer/function_set.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\n\n\nclass FunctionSet(object):\n\n \"\"\"Set of objects with ``parameters`` and ``gradients`` properties.\n\n :class:`FunctionSet` is useful to collect parameters and gradients of\n multiple parameterized :class:`Function` objects. :class:`FunctionSet`\n itself also implements :attr:`~FunctionSet.parameters` and\n :attr:`~FunctionSet.gradients`, so it can be nested in another\n :class:`FunctionSet` object.\n\n Function registration is done by just adding an attribute to\n :class:`FunctionSet` object.\n\n \"\"\"\n\n def __init__(self, **functions):\n \"\"\"Initializes the function set by given functions.\n\n Args:\n **functions: ``dict`` of ``str`` key and :class:`Function` values.\n The key-value pairs are just set to the :class:`FunctionSet`\n object as attributes.\n\n \"\"\"\n for name, func in six.iteritems(functions):\n setattr(self, name, func)\n\n def collect_parameters(self):\n \"\"\"Returns a tuple of parameters and gradients.\n\n Returns:\n Tuple (pair) of two tuples. The first element is a tuple of\n parameter arrays, and the second is a tuple of gradient arrays.\n\n \"\"\"\n return self.parameters, self.gradients\n\n def to_gpu(self, device=None):\n \"\"\"Migrates all parameters and gradients onto GPU.\n\n This method calls ``to_gpu`` method of each registered object.\n\n Args:\n device (int or :class:`pycuda.driver.Device` or ``None``): Device\n ID of GPU. If ``None`` is given, it uses the current device.\n\n Returns:\n self\n\n \"\"\"\n for func in six.itervalues(self.__dict__):\n func.to_gpu(device=device)\n return self\n\n def to_cpu(self):\n \"\"\"Migrates all parameters and gradients onto CPU.\n\n This method calls ``to_cpu`` method of each registered object.\n\n Returns:\n self\n\n \"\"\"\n for func in six.itervalues(self.__dict__):\n func.to_cpu()\n return self\n\n def copy_parameters_from(self, params):\n \"\"\"Copies parameters from another source without reallocation.\n\n Args:\n params (Iterable): Iterable of parameter arrays.\n\n \"\"\"\n for dst, src in zip(self.parameters, params):\n if isinstance(dst, numpy.ndarray):\n if isinstance(src, numpy.ndarray):\n numpy.copyto(dst, src)\n else:\n src.get(dst)\n elif isinstance(src, numpy.ndarray):\n dst.set(src)\n else:\n cuda.copy(src, out=dst)\n\n @property\n def parameters(self):\n \"\"\"Tuple of parameter arrays of all registered functions.\n\n The order of parameters is consistent with :meth:`gradients` property.\n\n \"\"\"\n return sum((func.parameters for _, func in self._get_sorted_funcs()),\n ())\n\n @parameters.setter\n def parameters(self, params):\n param_iter = iter(params)\n for _, func in self._get_sorted_funcs():\n func.parameters = param_iter\n\n @property\n def gradients(self):\n \"\"\"Tuple of gradient arrays of all registered functions.\n\n The order of gradients is consistent with :meth:`parameters` property.\n\n \"\"\"\n return sum((func.gradients for _, func in self._get_sorted_funcs()),\n ())\n\n @gradients.setter\n def gradients(self, grads):\n grad_iter = iter(grads)\n for _, func in self._get_sorted_funcs():\n func.gradients = grad_iter\n\n def _get_sorted_funcs(self):\n return sorted(six.iteritems(self.__dict__))\n", "path": "chainer/function_set.py"}]}
1,452
103
gh_patches_debug_2774
rasdani/github-patches
git_diff
ipython__ipython-1942
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- script magics cause terminal spam since the addition of script magics in cdde5bba8 one gets a _which_ error message outputted to the terminal on each start: e.g. if no python3 is available: ``` $ ipython which: no python3 in (/scratch/jtaylor/progs/localinst/lib/ccache:/scratch/jtaylor/progs/localinst/bin:/scratch/jtaylor/progs/Reflex/software/bin:/usr/lib/qt-3.3/bin:/usr/kerberos/bin:/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:/home/jtaylor/gasgano/bin:/scisoft/bin:/home/jtaylor/scripts:/scratch/jtaylor/progs/root/bin) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/utils/_process_posix.py` Content: ``` 1 """Posix-specific implementation of process utilities. 2 3 This file is only meant to be imported by process.py, not by end-users. 4 """ 5 6 #----------------------------------------------------------------------------- 7 # Copyright (C) 2010-2011 The IPython Development Team 8 # 9 # Distributed under the terms of the BSD License. The full license is in 10 # the file COPYING, distributed as part of this software. 11 #----------------------------------------------------------------------------- 12 13 #----------------------------------------------------------------------------- 14 # Imports 15 #----------------------------------------------------------------------------- 16 from __future__ import print_function 17 18 # Stdlib 19 import subprocess as sp 20 import sys 21 22 from IPython.external import pexpect 23 24 # Our own 25 from .autoattr import auto_attr 26 from ._process_common import getoutput, arg_split 27 from IPython.utils import text 28 from IPython.utils import py3compat 29 from IPython.utils.encoding import DEFAULT_ENCODING 30 31 #----------------------------------------------------------------------------- 32 # Function definitions 33 #----------------------------------------------------------------------------- 34 35 def _find_cmd(cmd): 36 """Find the full path to a command using which.""" 37 38 path = sp.Popen(['/usr/bin/env', 'which', cmd], 39 stdout=sp.PIPE).communicate()[0] 40 return py3compat.bytes_to_str(path) 41 42 43 class ProcessHandler(object): 44 """Execute subprocesses under the control of pexpect. 45 """ 46 # Timeout in seconds to wait on each reading of the subprocess' output. 47 # This should not be set too low to avoid cpu overusage from our side, 48 # since we read in a loop whose period is controlled by this timeout. 49 read_timeout = 0.05 50 51 # Timeout to give a process if we receive SIGINT, between sending the 52 # SIGINT to the process and forcefully terminating it. 53 terminate_timeout = 0.2 54 55 # File object where stdout and stderr of the subprocess will be written 56 logfile = None 57 58 # Shell to call for subprocesses to execute 59 sh = None 60 61 @auto_attr 62 def sh(self): 63 sh = pexpect.which('sh') 64 if sh is None: 65 raise OSError('"sh" shell not found') 66 return sh 67 68 def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None): 69 """Arguments are used for pexpect calls.""" 70 self.read_timeout = (ProcessHandler.read_timeout if read_timeout is 71 None else read_timeout) 72 self.terminate_timeout = (ProcessHandler.terminate_timeout if 73 terminate_timeout is None else 74 terminate_timeout) 75 self.logfile = sys.stdout if logfile is None else logfile 76 77 def getoutput(self, cmd): 78 """Run a command and return its stdout/stderr as a string. 79 80 Parameters 81 ---------- 82 cmd : str 83 A command to be executed in the system shell. 84 85 Returns 86 ------- 87 output : str 88 A string containing the combination of stdout and stderr from the 89 subprocess, in whatever order the subprocess originally wrote to its 90 file descriptors (so the order of the information in this string is the 91 correct order as would be seen if running the command in a terminal). 92 """ 93 try: 94 return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n') 95 except KeyboardInterrupt: 96 print('^C', file=sys.stderr, end='') 97 98 def getoutput_pexpect(self, cmd): 99 """Run a command and return its stdout/stderr as a string. 100 101 Parameters 102 ---------- 103 cmd : str 104 A command to be executed in the system shell. 105 106 Returns 107 ------- 108 output : str 109 A string containing the combination of stdout and stderr from the 110 subprocess, in whatever order the subprocess originally wrote to its 111 file descriptors (so the order of the information in this string is the 112 correct order as would be seen if running the command in a terminal). 113 """ 114 try: 115 return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n') 116 except KeyboardInterrupt: 117 print('^C', file=sys.stderr, end='') 118 119 def system(self, cmd): 120 """Execute a command in a subshell. 121 122 Parameters 123 ---------- 124 cmd : str 125 A command to be executed in the system shell. 126 127 Returns 128 ------- 129 int : child's exitstatus 130 """ 131 # Get likely encoding for the output. 132 enc = DEFAULT_ENCODING 133 134 # Patterns to match on the output, for pexpect. We read input and 135 # allow either a short timeout or EOF 136 patterns = [pexpect.TIMEOUT, pexpect.EOF] 137 # the index of the EOF pattern in the list. 138 # even though we know it's 1, this call means we don't have to worry if 139 # we change the above list, and forget to change this value: 140 EOF_index = patterns.index(pexpect.EOF) 141 # The size of the output stored so far in the process output buffer. 142 # Since pexpect only appends to this buffer, each time we print we 143 # record how far we've printed, so that next time we only print *new* 144 # content from the buffer. 145 out_size = 0 146 try: 147 # Since we're not really searching the buffer for text patterns, we 148 # can set pexpect's search window to be tiny and it won't matter. 149 # We only search for the 'patterns' timeout or EOF, which aren't in 150 # the text itself. 151 #child = pexpect.spawn(pcmd, searchwindowsize=1) 152 if hasattr(pexpect, 'spawnb'): 153 child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U 154 else: 155 child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect 156 flush = sys.stdout.flush 157 while True: 158 # res is the index of the pattern that caused the match, so we 159 # know whether we've finished (if we matched EOF) or not 160 res_idx = child.expect_list(patterns, self.read_timeout) 161 print(child.before[out_size:].decode(enc, 'replace'), end='') 162 flush() 163 if res_idx==EOF_index: 164 break 165 # Update the pointer to what we've already printed 166 out_size = len(child.before) 167 except KeyboardInterrupt: 168 # We need to send ^C to the process. The ascii code for '^C' is 3 169 # (the character is known as ETX for 'End of Text', see 170 # curses.ascii.ETX). 171 child.sendline(chr(3)) 172 # Read and print any more output the program might produce on its 173 # way out. 174 try: 175 out_size = len(child.before) 176 child.expect_list(patterns, self.terminate_timeout) 177 print(child.before[out_size:].decode(enc, 'replace'), end='') 178 sys.stdout.flush() 179 except KeyboardInterrupt: 180 # Impatient users tend to type it multiple times 181 pass 182 finally: 183 # Ensure the subprocess really is terminated 184 child.terminate(force=True) 185 # add isalive check, to ensure exitstatus is set: 186 child.isalive() 187 return child.exitstatus 188 189 190 # Make system() with a functional interface for outside use. Note that we use 191 # getoutput() from the _common utils, which is built on top of popen(). Using 192 # pexpect to get subprocess output produces difficult to parse output, since 193 # programs think they are talking to a tty and produce highly formatted output 194 # (ls is a good example) that makes them hard. 195 system = ProcessHandler().system 196 197 198 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/utils/_process_posix.py b/IPython/utils/_process_posix.py --- a/IPython/utils/_process_posix.py +++ b/IPython/utils/_process_posix.py @@ -36,7 +36,7 @@ """Find the full path to a command using which.""" path = sp.Popen(['/usr/bin/env', 'which', cmd], - stdout=sp.PIPE).communicate()[0] + stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0] return py3compat.bytes_to_str(path)
{"golden_diff": "diff --git a/IPython/utils/_process_posix.py b/IPython/utils/_process_posix.py\n--- a/IPython/utils/_process_posix.py\n+++ b/IPython/utils/_process_posix.py\n@@ -36,7 +36,7 @@\n \"\"\"Find the full path to a command using which.\"\"\"\n \n path = sp.Popen(['/usr/bin/env', 'which', cmd],\n- stdout=sp.PIPE).communicate()[0]\n+ stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0]\n return py3compat.bytes_to_str(path)\n", "issue": "script magics cause terminal spam\nsince the addition of script magics in cdde5bba8 one gets a _which_ error message outputted to the terminal on each start:\ne.g. if no python3 is available:\n\n```\n$ ipython\nwhich: no python3 in (/scratch/jtaylor/progs/localinst/lib/ccache:/scratch/jtaylor/progs/localinst/bin:/scratch/jtaylor/progs/Reflex/software/bin:/usr/lib/qt-3.3/bin:/usr/kerberos/bin:/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:/home/jtaylor/gasgano/bin:/scisoft/bin:/home/jtaylor/scripts:/scratch/jtaylor/progs/root/bin)\n```\n\n", "before_files": [{"content": "\"\"\"Posix-specific implementation of process utilities.\n\nThis file is only meant to be imported by process.py, not by end-users.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib\nimport subprocess as sp\nimport sys\n\nfrom IPython.external import pexpect\n\n# Our own\nfrom .autoattr import auto_attr\nfrom ._process_common import getoutput, arg_split\nfrom IPython.utils import text\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\n#-----------------------------------------------------------------------------\n# Function definitions\n#-----------------------------------------------------------------------------\n\ndef _find_cmd(cmd):\n \"\"\"Find the full path to a command using which.\"\"\"\n\n path = sp.Popen(['/usr/bin/env', 'which', cmd],\n stdout=sp.PIPE).communicate()[0]\n return py3compat.bytes_to_str(path)\n\n\nclass ProcessHandler(object):\n \"\"\"Execute subprocesses under the control of pexpect.\n \"\"\"\n # Timeout in seconds to wait on each reading of the subprocess' output.\n # This should not be set too low to avoid cpu overusage from our side,\n # since we read in a loop whose period is controlled by this timeout.\n read_timeout = 0.05\n\n # Timeout to give a process if we receive SIGINT, between sending the\n # SIGINT to the process and forcefully terminating it.\n terminate_timeout = 0.2\n\n # File object where stdout and stderr of the subprocess will be written\n logfile = None\n\n # Shell to call for subprocesses to execute\n sh = None\n\n @auto_attr\n def sh(self):\n sh = pexpect.which('sh')\n if sh is None:\n raise OSError('\"sh\" shell not found')\n return sh\n\n def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):\n \"\"\"Arguments are used for pexpect calls.\"\"\"\n self.read_timeout = (ProcessHandler.read_timeout if read_timeout is\n None else read_timeout)\n self.terminate_timeout = (ProcessHandler.terminate_timeout if\n terminate_timeout is None else\n terminate_timeout)\n self.logfile = sys.stdout if logfile is None else logfile\n\n def getoutput(self, cmd):\n \"\"\"Run a command and return its stdout/stderr as a string.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n output : str\n A string containing the combination of stdout and stderr from the\n subprocess, in whatever order the subprocess originally wrote to its\n file descriptors (so the order of the information in this string is the\n correct order as would be seen if running the command in a terminal).\n \"\"\"\n try:\n return pexpect.run(self.sh, args=['-c', cmd]).replace('\\r\\n', '\\n')\n except KeyboardInterrupt:\n print('^C', file=sys.stderr, end='')\n\n def getoutput_pexpect(self, cmd):\n \"\"\"Run a command and return its stdout/stderr as a string.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n output : str\n A string containing the combination of stdout and stderr from the\n subprocess, in whatever order the subprocess originally wrote to its\n file descriptors (so the order of the information in this string is the\n correct order as would be seen if running the command in a terminal).\n \"\"\"\n try:\n return pexpect.run(self.sh, args=['-c', cmd]).replace('\\r\\n', '\\n')\n except KeyboardInterrupt:\n print('^C', file=sys.stderr, end='')\n\n def system(self, cmd):\n \"\"\"Execute a command in a subshell.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n int : child's exitstatus\n \"\"\"\n # Get likely encoding for the output.\n enc = DEFAULT_ENCODING\n \n # Patterns to match on the output, for pexpect. We read input and\n # allow either a short timeout or EOF\n patterns = [pexpect.TIMEOUT, pexpect.EOF]\n # the index of the EOF pattern in the list.\n # even though we know it's 1, this call means we don't have to worry if\n # we change the above list, and forget to change this value:\n EOF_index = patterns.index(pexpect.EOF)\n # The size of the output stored so far in the process output buffer.\n # Since pexpect only appends to this buffer, each time we print we\n # record how far we've printed, so that next time we only print *new*\n # content from the buffer.\n out_size = 0\n try:\n # Since we're not really searching the buffer for text patterns, we\n # can set pexpect's search window to be tiny and it won't matter.\n # We only search for the 'patterns' timeout or EOF, which aren't in\n # the text itself.\n #child = pexpect.spawn(pcmd, searchwindowsize=1)\n if hasattr(pexpect, 'spawnb'):\n child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U\n else:\n child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect\n flush = sys.stdout.flush\n while True:\n # res is the index of the pattern that caused the match, so we\n # know whether we've finished (if we matched EOF) or not\n res_idx = child.expect_list(patterns, self.read_timeout)\n print(child.before[out_size:].decode(enc, 'replace'), end='')\n flush()\n if res_idx==EOF_index:\n break\n # Update the pointer to what we've already printed\n out_size = len(child.before)\n except KeyboardInterrupt:\n # We need to send ^C to the process. The ascii code for '^C' is 3\n # (the character is known as ETX for 'End of Text', see\n # curses.ascii.ETX).\n child.sendline(chr(3))\n # Read and print any more output the program might produce on its\n # way out.\n try:\n out_size = len(child.before)\n child.expect_list(patterns, self.terminate_timeout)\n print(child.before[out_size:].decode(enc, 'replace'), end='')\n sys.stdout.flush()\n except KeyboardInterrupt:\n # Impatient users tend to type it multiple times\n pass\n finally:\n # Ensure the subprocess really is terminated\n child.terminate(force=True)\n # add isalive check, to ensure exitstatus is set:\n child.isalive()\n return child.exitstatus\n\n\n# Make system() with a functional interface for outside use. Note that we use\n# getoutput() from the _common utils, which is built on top of popen(). Using\n# pexpect to get subprocess output produces difficult to parse output, since\n# programs think they are talking to a tty and produce highly formatted output\n# (ls is a good example) that makes them hard.\nsystem = ProcessHandler().system\n\n\n\n", "path": "IPython/utils/_process_posix.py"}], "after_files": [{"content": "\"\"\"Posix-specific implementation of process utilities.\n\nThis file is only meant to be imported by process.py, not by end-users.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib\nimport subprocess as sp\nimport sys\n\nfrom IPython.external import pexpect\n\n# Our own\nfrom .autoattr import auto_attr\nfrom ._process_common import getoutput, arg_split\nfrom IPython.utils import text\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\n#-----------------------------------------------------------------------------\n# Function definitions\n#-----------------------------------------------------------------------------\n\ndef _find_cmd(cmd):\n \"\"\"Find the full path to a command using which.\"\"\"\n\n path = sp.Popen(['/usr/bin/env', 'which', cmd],\n stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0]\n return py3compat.bytes_to_str(path)\n\n\nclass ProcessHandler(object):\n \"\"\"Execute subprocesses under the control of pexpect.\n \"\"\"\n # Timeout in seconds to wait on each reading of the subprocess' output.\n # This should not be set too low to avoid cpu overusage from our side,\n # since we read in a loop whose period is controlled by this timeout.\n read_timeout = 0.05\n\n # Timeout to give a process if we receive SIGINT, between sending the\n # SIGINT to the process and forcefully terminating it.\n terminate_timeout = 0.2\n\n # File object where stdout and stderr of the subprocess will be written\n logfile = None\n\n # Shell to call for subprocesses to execute\n sh = None\n\n @auto_attr\n def sh(self):\n sh = pexpect.which('sh')\n if sh is None:\n raise OSError('\"sh\" shell not found')\n return sh\n\n def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):\n \"\"\"Arguments are used for pexpect calls.\"\"\"\n self.read_timeout = (ProcessHandler.read_timeout if read_timeout is\n None else read_timeout)\n self.terminate_timeout = (ProcessHandler.terminate_timeout if\n terminate_timeout is None else\n terminate_timeout)\n self.logfile = sys.stdout if logfile is None else logfile\n\n def getoutput(self, cmd):\n \"\"\"Run a command and return its stdout/stderr as a string.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n output : str\n A string containing the combination of stdout and stderr from the\n subprocess, in whatever order the subprocess originally wrote to its\n file descriptors (so the order of the information in this string is the\n correct order as would be seen if running the command in a terminal).\n \"\"\"\n try:\n return pexpect.run(self.sh, args=['-c', cmd]).replace('\\r\\n', '\\n')\n except KeyboardInterrupt:\n print('^C', file=sys.stderr, end='')\n\n def getoutput_pexpect(self, cmd):\n \"\"\"Run a command and return its stdout/stderr as a string.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n output : str\n A string containing the combination of stdout and stderr from the\n subprocess, in whatever order the subprocess originally wrote to its\n file descriptors (so the order of the information in this string is the\n correct order as would be seen if running the command in a terminal).\n \"\"\"\n try:\n return pexpect.run(self.sh, args=['-c', cmd]).replace('\\r\\n', '\\n')\n except KeyboardInterrupt:\n print('^C', file=sys.stderr, end='')\n\n def system(self, cmd):\n \"\"\"Execute a command in a subshell.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n int : child's exitstatus\n \"\"\"\n # Get likely encoding for the output.\n enc = DEFAULT_ENCODING\n \n # Patterns to match on the output, for pexpect. We read input and\n # allow either a short timeout or EOF\n patterns = [pexpect.TIMEOUT, pexpect.EOF]\n # the index of the EOF pattern in the list.\n # even though we know it's 1, this call means we don't have to worry if\n # we change the above list, and forget to change this value:\n EOF_index = patterns.index(pexpect.EOF)\n # The size of the output stored so far in the process output buffer.\n # Since pexpect only appends to this buffer, each time we print we\n # record how far we've printed, so that next time we only print *new*\n # content from the buffer.\n out_size = 0\n try:\n # Since we're not really searching the buffer for text patterns, we\n # can set pexpect's search window to be tiny and it won't matter.\n # We only search for the 'patterns' timeout or EOF, which aren't in\n # the text itself.\n #child = pexpect.spawn(pcmd, searchwindowsize=1)\n if hasattr(pexpect, 'spawnb'):\n child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U\n else:\n child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect\n flush = sys.stdout.flush\n while True:\n # res is the index of the pattern that caused the match, so we\n # know whether we've finished (if we matched EOF) or not\n res_idx = child.expect_list(patterns, self.read_timeout)\n print(child.before[out_size:].decode(enc, 'replace'), end='')\n flush()\n if res_idx==EOF_index:\n break\n # Update the pointer to what we've already printed\n out_size = len(child.before)\n except KeyboardInterrupt:\n # We need to send ^C to the process. The ascii code for '^C' is 3\n # (the character is known as ETX for 'End of Text', see\n # curses.ascii.ETX).\n child.sendline(chr(3))\n # Read and print any more output the program might produce on its\n # way out.\n try:\n out_size = len(child.before)\n child.expect_list(patterns, self.terminate_timeout)\n print(child.before[out_size:].decode(enc, 'replace'), end='')\n sys.stdout.flush()\n except KeyboardInterrupt:\n # Impatient users tend to type it multiple times\n pass\n finally:\n # Ensure the subprocess really is terminated\n child.terminate(force=True)\n # add isalive check, to ensure exitstatus is set:\n child.isalive()\n return child.exitstatus\n\n\n# Make system() with a functional interface for outside use. Note that we use\n# getoutput() from the _common utils, which is built on top of popen(). Using\n# pexpect to get subprocess output produces difficult to parse output, since\n# programs think they are talking to a tty and produce highly formatted output\n# (ls is a good example) that makes them hard.\nsystem = ProcessHandler().system\n\n\n\n", "path": "IPython/utils/_process_posix.py"}]}
2,519
123
gh_patches_debug_17522
rasdani/github-patches
git_diff
mkdocs__mkdocs-1767
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- README.md in theme directory overrides index.md in docs ## Summary MkDocs will generate `index.html` from `README.md` in the theme directory, even if `index.md` exists in the docs directory. ## Steps to reproduce Consider the following minimal example: ``` ├── docs │   └── index.md ├── mkdocs.yml └── theme ├── main.html └── README.md ``` docs/index.md: ``` The right index. ``` theme/README.md: ``` The wrong index. ``` theme/main.html: ``` {{ page.content }} ``` mkdocs.yml: ```yaml site_name: Repro theme: name: null custom_dir: theme nav: - Overview: index.md ``` After running `mkdocs build`, the following `index.html` is produced: ``` <p>The wrong index.</p> ``` Furthermore, `mkdocs build` emits the following message: ``` INFO - The following pages exist in the docs directory, but are not included in the "nav" configuration: - README.md ``` This is especially surprising, because the [the docs say](https://www.mkdocs.org/user-guide/writing-your-docs/#file-layout): > If both an index.md file and a README.md file are found in the same directory, then the index.md file is used and the README.md file is ignored. I would expect markdown files in the `theme` directory to not affect how the documentation is built, unless they are specifically included from a template. ``` $ mkdocs --version mkdocs, version 1.0.4 from /usr/lib/python3.7/site-packages/mkdocs (Python 3.7) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mkdocs/structure/files.py` Content: ``` 1 # coding: utf-8 2 3 from __future__ import unicode_literals 4 import fnmatch 5 import os 6 import logging 7 from functools import cmp_to_key 8 9 from mkdocs import utils 10 11 12 log = logging.getLogger(__name__) 13 log.addFilter(utils.warning_filter) 14 15 16 class Files(object): 17 """ A collection of File objects. """ 18 def __init__(self, files): 19 self._files = files 20 self.src_paths = {file.src_path: file for file in files} 21 22 def __iter__(self): 23 return iter(self._files) 24 25 def __len__(self): 26 return len(self._files) 27 28 def __contains__(self, path): 29 return path in self.src_paths 30 31 def get_file_from_path(self, path): 32 """ Return a File instance with File.src_path equal to path. """ 33 return self.src_paths.get(os.path.normpath(path)) 34 35 def append(self, file): 36 """ Append file to Files collection. """ 37 self._files.append(file) 38 self.src_paths[file.src_path] = file 39 40 def copy_static_files(self, dirty=False): 41 """ Copy static files from source to destination. """ 42 for file in self: 43 if not file.is_documentation_page(): 44 file.copy_file(dirty) 45 46 def documentation_pages(self): 47 """ Return iterable of all Markdown page file objects. """ 48 return [file for file in self if file.is_documentation_page()] 49 50 def static_pages(self): 51 """ Return iterable of all static page file objects. """ 52 return [file for file in self if file.is_static_page()] 53 54 def media_files(self): 55 """ Return iterable of all file objects which are not documentation or static pages. """ 56 return [file for file in self if file.is_media_file()] 57 58 def javascript_files(self): 59 """ Return iterable of all javascript file objects. """ 60 return [file for file in self if file.is_javascript()] 61 62 def css_files(self): 63 """ Return iterable of all CSS file objects. """ 64 return [file for file in self if file.is_css()] 65 66 def add_files_from_theme(self, env, config): 67 """ Retrieve static files from Jinja environment and add to collection. """ 68 def filter(name): 69 patterns = ['.*', '*.py', '*.pyc', '*.html', 'mkdocs_theme.yml'] 70 patterns.extend(config['theme'].static_templates) 71 for pattern in patterns: 72 if fnmatch.fnmatch(name, pattern): 73 return False 74 return True 75 for path in env.list_templates(filter_func=filter): 76 # Theme files do not override docs_dir files 77 if path not in self: 78 for dir in config['theme'].dirs: 79 # Find the first theme dir which contains path 80 if os.path.isfile(os.path.join(dir, path)): 81 self.append(File(path, dir, config['site_dir'], config['use_directory_urls'])) 82 break 83 84 85 class File(object): 86 """ 87 A MkDocs File object. 88 89 Points to the source and destination locations of a file. 90 91 The `path` argument must be a path that exists relative to `src_dir`. 92 93 The `src_dir` and `dest_dir` must be absolute paths on the local file system. 94 95 The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is 96 mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is 97 mapped to an HTML index file (`index.html`) nested in a directory using the "name" of the file in `path`. The 98 `use_directory_urls` argument has no effect on non-Markdown files. 99 100 File objects have the following properties, which are Unicode strings: 101 102 File.src_path 103 The pure path of the source file relative to the source directory. 104 105 File.abs_src_path 106 The absolute concrete path of the source file. 107 108 File.dest_path 109 The pure path of the destination file relative to the destination directory. 110 111 File.abs_dest_path 112 The absolute concrete path of the destination file. 113 114 File.url 115 The url of the destination file relative to the destination directory as a string. 116 """ 117 def __init__(self, path, src_dir, dest_dir, use_directory_urls): 118 self.page = None 119 self.src_path = os.path.normpath(path) 120 self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path)) 121 self.name = self._get_stem() 122 self.dest_path = self._get_dest_path(use_directory_urls) 123 self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path)) 124 self.url = self._get_url(use_directory_urls) 125 126 def __eq__(self, other): 127 128 def sub_dict(d): 129 return dict((key, value) for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url']) 130 131 return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__)) 132 133 def __ne__(self, other): 134 return not self.__eq__(other) 135 136 def _get_stem(self): 137 """ Return the name of the file without it's extension. """ 138 filename = os.path.basename(self.src_path) 139 stem, ext = os.path.splitext(filename) 140 return 'index' if stem in ('index', 'README') else stem 141 142 def _get_dest_path(self, use_directory_urls): 143 """ Return destination path based on source path. """ 144 if self.is_documentation_page(): 145 if use_directory_urls: 146 parent, filename = os.path.split(self.src_path) 147 if self.name == 'index': 148 # index.md or README.md => index.html 149 return os.path.join(parent, 'index.html') 150 else: 151 # foo.md => foo/index.html 152 return os.path.join(parent, self.name, 'index.html') 153 else: 154 # foo.md => foo.html 155 root, ext = os.path.splitext(self.src_path) 156 return root + '.html' 157 return self.src_path 158 159 def _get_url(self, use_directory_urls): 160 """ Return url based in destination path. """ 161 url = self.dest_path.replace(os.path.sep, '/') 162 dirname, filename = os.path.split(url) 163 if use_directory_urls and filename == 'index.html': 164 if dirname == '': 165 url = '.' 166 else: 167 url = dirname + '/' 168 return utils.urlquote(url) 169 170 def url_relative_to(self, other): 171 """ Return url for file relative to other file. """ 172 return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other) 173 174 def copy_file(self, dirty=False): 175 """ Copy source file to destination, ensuring parent directories exist. """ 176 if dirty and not self.is_modified(): 177 log.debug("Skip copying unmodified file: '{}'".format(self.src_path)) 178 else: 179 log.debug("Copying media file: '{}'".format(self.src_path)) 180 utils.copy_file(self.abs_src_path, self.abs_dest_path) 181 182 def is_modified(self): 183 if os.path.isfile(self.abs_dest_path): 184 return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path) 185 return True 186 187 def is_documentation_page(self): 188 """ Return True if file is a Markdown page. """ 189 return os.path.splitext(self.src_path)[1] in utils.markdown_extensions 190 191 def is_static_page(self): 192 """ Return True if file is a static page (html, xml, json). """ 193 return os.path.splitext(self.src_path)[1] in ( 194 '.html', 195 '.htm', 196 '.xml', 197 '.json', 198 ) 199 200 def is_media_file(self): 201 """ Return True if file is not a documentation or static page. """ 202 return not (self.is_documentation_page() or self.is_static_page()) 203 204 def is_javascript(self): 205 """ Return True if file is a JavaScript file. """ 206 return os.path.splitext(self.src_path)[1] in ( 207 '.js', 208 '.javascript', 209 ) 210 211 def is_css(self): 212 """ Return True if file is a CSS file. """ 213 return os.path.splitext(self.src_path)[1] in ( 214 '.css', 215 ) 216 217 218 def get_files(config): 219 """ Walk the `docs_dir` and return a Files collection. """ 220 files = [] 221 exclude = ['.*', '/templates'] 222 223 for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True): 224 relative_dir = os.path.relpath(source_dir, config['docs_dir']) 225 226 for dirname in list(dirnames): 227 path = os.path.normpath(os.path.join(relative_dir, dirname)) 228 # Skip any excluded directories 229 if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude): 230 dirnames.remove(dirname) 231 dirnames.sort() 232 233 for filename in _sort_files(filenames): 234 path = os.path.normpath(os.path.join(relative_dir, filename)) 235 # Skip any excluded files 236 if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude): 237 continue 238 # Skip README.md if an index file also exists in dir 239 if filename.lower() == 'readme.md' and 'index.md' in filenames: 240 log.warning("Both index.md and readme.md found. Skipping readme.md from {}".format(source_dir)) 241 continue 242 files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls'])) 243 244 return Files(files) 245 246 247 def _sort_files(filenames): 248 """ Always sort `index` or `README` as first filename in list. """ 249 250 def compare(x, y): 251 if x == y: 252 return 0 253 if os.path.splitext(y)[0] in ['index', 'README']: 254 return 1 255 if os.path.splitext(x)[0] in ['index', 'README'] or x < y: 256 return -1 257 return 1 258 259 return sorted(filenames, key=cmp_to_key(compare)) 260 261 262 def _filter_paths(basename, path, is_dir, exclude): 263 """ .gitignore style file filtering. """ 264 for item in exclude: 265 # Items ending in '/' apply only to directories. 266 if item.endswith('/') and not is_dir: 267 continue 268 # Items starting with '/' apply to the whole path. 269 # In any other cases just the basename is used. 270 match = path if item.startswith('/') else basename 271 if fnmatch.fnmatch(match, item.strip('/')): 272 return True 273 return False 274 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mkdocs/structure/files.py b/mkdocs/structure/files.py --- a/mkdocs/structure/files.py +++ b/mkdocs/structure/files.py @@ -66,10 +66,11 @@ def add_files_from_theme(self, env, config): """ Retrieve static files from Jinja environment and add to collection. """ def filter(name): - patterns = ['.*', '*.py', '*.pyc', '*.html', 'mkdocs_theme.yml'] + patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml'] + patterns.extend('*{0}'.format(x) for x in utils.markdown_extensions) patterns.extend(config['theme'].static_templates) for pattern in patterns: - if fnmatch.fnmatch(name, pattern): + if fnmatch.fnmatch(name.lower(), pattern): return False return True for path in env.list_templates(filter_func=filter):
{"golden_diff": "diff --git a/mkdocs/structure/files.py b/mkdocs/structure/files.py\n--- a/mkdocs/structure/files.py\n+++ b/mkdocs/structure/files.py\n@@ -66,10 +66,11 @@\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n- patterns = ['.*', '*.py', '*.pyc', '*.html', 'mkdocs_theme.yml']\n+ patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n+ patterns.extend('*{0}'.format(x) for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n- if fnmatch.fnmatch(name, pattern):\n+ if fnmatch.fnmatch(name.lower(), pattern):\n return False\n return True\n for path in env.list_templates(filter_func=filter):\n", "issue": "README.md in theme directory overrides index.md in docs\n## Summary\r\n\r\nMkDocs will generate `index.html` from `README.md` in the theme directory, even if `index.md` exists in the docs directory.\r\n\r\n## Steps to reproduce\r\nConsider the following minimal example:\r\n```\r\n\u251c\u2500\u2500 docs\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 index.md\r\n\u251c\u2500\u2500 mkdocs.yml\r\n\u2514\u2500\u2500 theme\r\n \u251c\u2500\u2500 main.html\r\n \u2514\u2500\u2500 README.md\r\n```\r\ndocs/index.md:\r\n```\r\nThe right index.\r\n```\r\ntheme/README.md:\r\n```\r\nThe wrong index.\r\n```\r\ntheme/main.html:\r\n```\r\n{{ page.content }}\r\n```\r\nmkdocs.yml:\r\n```yaml\r\nsite_name: Repro\r\ntheme:\r\n name: null\r\n custom_dir: theme\r\nnav:\r\n - Overview: index.md\r\n```\r\n\r\nAfter running `mkdocs build`, the following `index.html` is produced:\r\n```\r\n<p>The wrong index.</p>\r\n```\r\nFurthermore, `mkdocs build` emits the following message:\r\n```\r\nINFO - The following pages exist in the docs directory, but are not included in the \"nav\" configuration:\r\n - README.md \r\n```\r\n\r\nThis is especially surprising, because the [the docs say](https://www.mkdocs.org/user-guide/writing-your-docs/#file-layout):\r\n\r\n> If both an index.md file and a README.md file are found in the same directory, then the index.md file is used and the README.md file is ignored.\r\n\r\nI would expect markdown files in the `theme` directory to not affect how the documentation is built, unless they are specifically included from a template.\r\n\r\n```\r\n$ mkdocs --version\r\nmkdocs, version 1.0.4 from /usr/lib/python3.7/site-packages/mkdocs (Python 3.7)\r\n```\n", "before_files": [{"content": "# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport fnmatch\nimport os\nimport logging\nfrom functools import cmp_to_key\n\nfrom mkdocs import utils\n\n\nlog = logging.getLogger(__name__)\nlog.addFilter(utils.warning_filter)\n\n\nclass Files(object):\n \"\"\" A collection of File objects. \"\"\"\n def __init__(self, files):\n self._files = files\n self.src_paths = {file.src_path: file for file in files}\n\n def __iter__(self):\n return iter(self._files)\n\n def __len__(self):\n return len(self._files)\n\n def __contains__(self, path):\n return path in self.src_paths\n\n def get_file_from_path(self, path):\n \"\"\" Return a File instance with File.src_path equal to path. \"\"\"\n return self.src_paths.get(os.path.normpath(path))\n\n def append(self, file):\n \"\"\" Append file to Files collection. \"\"\"\n self._files.append(file)\n self.src_paths[file.src_path] = file\n\n def copy_static_files(self, dirty=False):\n \"\"\" Copy static files from source to destination. \"\"\"\n for file in self:\n if not file.is_documentation_page():\n file.copy_file(dirty)\n\n def documentation_pages(self):\n \"\"\" Return iterable of all Markdown page file objects. \"\"\"\n return [file for file in self if file.is_documentation_page()]\n\n def static_pages(self):\n \"\"\" Return iterable of all static page file objects. \"\"\"\n return [file for file in self if file.is_static_page()]\n\n def media_files(self):\n \"\"\" Return iterable of all file objects which are not documentation or static pages. \"\"\"\n return [file for file in self if file.is_media_file()]\n\n def javascript_files(self):\n \"\"\" Return iterable of all javascript file objects. \"\"\"\n return [file for file in self if file.is_javascript()]\n\n def css_files(self):\n \"\"\" Return iterable of all CSS file objects. \"\"\"\n return [file for file in self if file.is_css()]\n\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n patterns = ['.*', '*.py', '*.pyc', '*.html', 'mkdocs_theme.yml']\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n if fnmatch.fnmatch(name, pattern):\n return False\n return True\n for path in env.list_templates(filter_func=filter):\n # Theme files do not override docs_dir files\n if path not in self:\n for dir in config['theme'].dirs:\n # Find the first theme dir which contains path\n if os.path.isfile(os.path.join(dir, path)):\n self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))\n break\n\n\nclass File(object):\n \"\"\"\n A MkDocs File object.\n\n Points to the source and destination locations of a file.\n\n The `path` argument must be a path that exists relative to `src_dir`.\n\n The `src_dir` and `dest_dir` must be absolute paths on the local file system.\n\n The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is\n mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is\n mapped to an HTML index file (`index.html`) nested in a directory using the \"name\" of the file in `path`. The\n `use_directory_urls` argument has no effect on non-Markdown files.\n\n File objects have the following properties, which are Unicode strings:\n\n File.src_path\n The pure path of the source file relative to the source directory.\n\n File.abs_src_path\n The absolute concrete path of the source file.\n\n File.dest_path\n The pure path of the destination file relative to the destination directory.\n\n File.abs_dest_path\n The absolute concrete path of the destination file.\n\n File.url\n The url of the destination file relative to the destination directory as a string.\n \"\"\"\n def __init__(self, path, src_dir, dest_dir, use_directory_urls):\n self.page = None\n self.src_path = os.path.normpath(path)\n self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))\n self.name = self._get_stem()\n self.dest_path = self._get_dest_path(use_directory_urls)\n self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))\n self.url = self._get_url(use_directory_urls)\n\n def __eq__(self, other):\n\n def sub_dict(d):\n return dict((key, value) for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url'])\n\n return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def _get_stem(self):\n \"\"\" Return the name of the file without it's extension. \"\"\"\n filename = os.path.basename(self.src_path)\n stem, ext = os.path.splitext(filename)\n return 'index' if stem in ('index', 'README') else stem\n\n def _get_dest_path(self, use_directory_urls):\n \"\"\" Return destination path based on source path. \"\"\"\n if self.is_documentation_page():\n if use_directory_urls:\n parent, filename = os.path.split(self.src_path)\n if self.name == 'index':\n # index.md or README.md => index.html\n return os.path.join(parent, 'index.html')\n else:\n # foo.md => foo/index.html\n return os.path.join(parent, self.name, 'index.html')\n else:\n # foo.md => foo.html\n root, ext = os.path.splitext(self.src_path)\n return root + '.html'\n return self.src_path\n\n def _get_url(self, use_directory_urls):\n \"\"\" Return url based in destination path. \"\"\"\n url = self.dest_path.replace(os.path.sep, '/')\n dirname, filename = os.path.split(url)\n if use_directory_urls and filename == 'index.html':\n if dirname == '':\n url = '.'\n else:\n url = dirname + '/'\n return utils.urlquote(url)\n\n def url_relative_to(self, other):\n \"\"\" Return url for file relative to other file. \"\"\"\n return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)\n\n def copy_file(self, dirty=False):\n \"\"\" Copy source file to destination, ensuring parent directories exist. \"\"\"\n if dirty and not self.is_modified():\n log.debug(\"Skip copying unmodified file: '{}'\".format(self.src_path))\n else:\n log.debug(\"Copying media file: '{}'\".format(self.src_path))\n utils.copy_file(self.abs_src_path, self.abs_dest_path)\n\n def is_modified(self):\n if os.path.isfile(self.abs_dest_path):\n return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)\n return True\n\n def is_documentation_page(self):\n \"\"\" Return True if file is a Markdown page. \"\"\"\n return os.path.splitext(self.src_path)[1] in utils.markdown_extensions\n\n def is_static_page(self):\n \"\"\" Return True if file is a static page (html, xml, json). \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.html',\n '.htm',\n '.xml',\n '.json',\n )\n\n def is_media_file(self):\n \"\"\" Return True if file is not a documentation or static page. \"\"\"\n return not (self.is_documentation_page() or self.is_static_page())\n\n def is_javascript(self):\n \"\"\" Return True if file is a JavaScript file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.js',\n '.javascript',\n )\n\n def is_css(self):\n \"\"\" Return True if file is a CSS file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.css',\n )\n\n\ndef get_files(config):\n \"\"\" Walk the `docs_dir` and return a Files collection. \"\"\"\n files = []\n exclude = ['.*', '/templates']\n\n for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):\n relative_dir = os.path.relpath(source_dir, config['docs_dir'])\n\n for dirname in list(dirnames):\n path = os.path.normpath(os.path.join(relative_dir, dirname))\n # Skip any excluded directories\n if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):\n dirnames.remove(dirname)\n dirnames.sort()\n\n for filename in _sort_files(filenames):\n path = os.path.normpath(os.path.join(relative_dir, filename))\n # Skip any excluded files\n if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):\n continue\n # Skip README.md if an index file also exists in dir\n if filename.lower() == 'readme.md' and 'index.md' in filenames:\n log.warning(\"Both index.md and readme.md found. Skipping readme.md from {}\".format(source_dir))\n continue\n files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))\n\n return Files(files)\n\n\ndef _sort_files(filenames):\n \"\"\" Always sort `index` or `README` as first filename in list. \"\"\"\n\n def compare(x, y):\n if x == y:\n return 0\n if os.path.splitext(y)[0] in ['index', 'README']:\n return 1\n if os.path.splitext(x)[0] in ['index', 'README'] or x < y:\n return -1\n return 1\n\n return sorted(filenames, key=cmp_to_key(compare))\n\n\ndef _filter_paths(basename, path, is_dir, exclude):\n \"\"\" .gitignore style file filtering. \"\"\"\n for item in exclude:\n # Items ending in '/' apply only to directories.\n if item.endswith('/') and not is_dir:\n continue\n # Items starting with '/' apply to the whole path.\n # In any other cases just the basename is used.\n match = path if item.startswith('/') else basename\n if fnmatch.fnmatch(match, item.strip('/')):\n return True\n return False\n", "path": "mkdocs/structure/files.py"}], "after_files": [{"content": "# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport fnmatch\nimport os\nimport logging\nfrom functools import cmp_to_key\n\nfrom mkdocs import utils\n\n\nlog = logging.getLogger(__name__)\nlog.addFilter(utils.warning_filter)\n\n\nclass Files(object):\n \"\"\" A collection of File objects. \"\"\"\n def __init__(self, files):\n self._files = files\n self.src_paths = {file.src_path: file for file in files}\n\n def __iter__(self):\n return iter(self._files)\n\n def __len__(self):\n return len(self._files)\n\n def __contains__(self, path):\n return path in self.src_paths\n\n def get_file_from_path(self, path):\n \"\"\" Return a File instance with File.src_path equal to path. \"\"\"\n return self.src_paths.get(os.path.normpath(path))\n\n def append(self, file):\n \"\"\" Append file to Files collection. \"\"\"\n self._files.append(file)\n self.src_paths[file.src_path] = file\n\n def copy_static_files(self, dirty=False):\n \"\"\" Copy static files from source to destination. \"\"\"\n for file in self:\n if not file.is_documentation_page():\n file.copy_file(dirty)\n\n def documentation_pages(self):\n \"\"\" Return iterable of all Markdown page file objects. \"\"\"\n return [file for file in self if file.is_documentation_page()]\n\n def static_pages(self):\n \"\"\" Return iterable of all static page file objects. \"\"\"\n return [file for file in self if file.is_static_page()]\n\n def media_files(self):\n \"\"\" Return iterable of all file objects which are not documentation or static pages. \"\"\"\n return [file for file in self if file.is_media_file()]\n\n def javascript_files(self):\n \"\"\" Return iterable of all javascript file objects. \"\"\"\n return [file for file in self if file.is_javascript()]\n\n def css_files(self):\n \"\"\" Return iterable of all CSS file objects. \"\"\"\n return [file for file in self if file.is_css()]\n\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n patterns.extend('*{0}'.format(x) for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n if fnmatch.fnmatch(name.lower(), pattern):\n return False\n return True\n for path in env.list_templates(filter_func=filter):\n # Theme files do not override docs_dir files\n if path not in self:\n for dir in config['theme'].dirs:\n # Find the first theme dir which contains path\n if os.path.isfile(os.path.join(dir, path)):\n self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))\n break\n\n\nclass File(object):\n \"\"\"\n A MkDocs File object.\n\n Points to the source and destination locations of a file.\n\n The `path` argument must be a path that exists relative to `src_dir`.\n\n The `src_dir` and `dest_dir` must be absolute paths on the local file system.\n\n The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is\n mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is\n mapped to an HTML index file (`index.html`) nested in a directory using the \"name\" of the file in `path`. The\n `use_directory_urls` argument has no effect on non-Markdown files.\n\n File objects have the following properties, which are Unicode strings:\n\n File.src_path\n The pure path of the source file relative to the source directory.\n\n File.abs_src_path\n The absolute concrete path of the source file.\n\n File.dest_path\n The pure path of the destination file relative to the destination directory.\n\n File.abs_dest_path\n The absolute concrete path of the destination file.\n\n File.url\n The url of the destination file relative to the destination directory as a string.\n \"\"\"\n def __init__(self, path, src_dir, dest_dir, use_directory_urls):\n self.page = None\n self.src_path = os.path.normpath(path)\n self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))\n self.name = self._get_stem()\n self.dest_path = self._get_dest_path(use_directory_urls)\n self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))\n self.url = self._get_url(use_directory_urls)\n\n def __eq__(self, other):\n\n def sub_dict(d):\n return dict((key, value) for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url'])\n\n return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def _get_stem(self):\n \"\"\" Return the name of the file without it's extension. \"\"\"\n filename = os.path.basename(self.src_path)\n stem, ext = os.path.splitext(filename)\n return 'index' if stem in ('index', 'README') else stem\n\n def _get_dest_path(self, use_directory_urls):\n \"\"\" Return destination path based on source path. \"\"\"\n if self.is_documentation_page():\n if use_directory_urls:\n parent, filename = os.path.split(self.src_path)\n if self.name == 'index':\n # index.md or README.md => index.html\n return os.path.join(parent, 'index.html')\n else:\n # foo.md => foo/index.html\n return os.path.join(parent, self.name, 'index.html')\n else:\n # foo.md => foo.html\n root, ext = os.path.splitext(self.src_path)\n return root + '.html'\n return self.src_path\n\n def _get_url(self, use_directory_urls):\n \"\"\" Return url based in destination path. \"\"\"\n url = self.dest_path.replace(os.path.sep, '/')\n dirname, filename = os.path.split(url)\n if use_directory_urls and filename == 'index.html':\n if dirname == '':\n url = '.'\n else:\n url = dirname + '/'\n return utils.urlquote(url)\n\n def url_relative_to(self, other):\n \"\"\" Return url for file relative to other file. \"\"\"\n return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)\n\n def copy_file(self, dirty=False):\n \"\"\" Copy source file to destination, ensuring parent directories exist. \"\"\"\n if dirty and not self.is_modified():\n log.debug(\"Skip copying unmodified file: '{}'\".format(self.src_path))\n else:\n log.debug(\"Copying media file: '{}'\".format(self.src_path))\n utils.copy_file(self.abs_src_path, self.abs_dest_path)\n\n def is_modified(self):\n if os.path.isfile(self.abs_dest_path):\n return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)\n return True\n\n def is_documentation_page(self):\n \"\"\" Return True if file is a Markdown page. \"\"\"\n return os.path.splitext(self.src_path)[1] in utils.markdown_extensions\n\n def is_static_page(self):\n \"\"\" Return True if file is a static page (html, xml, json). \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.html',\n '.htm',\n '.xml',\n '.json',\n )\n\n def is_media_file(self):\n \"\"\" Return True if file is not a documentation or static page. \"\"\"\n return not (self.is_documentation_page() or self.is_static_page())\n\n def is_javascript(self):\n \"\"\" Return True if file is a JavaScript file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.js',\n '.javascript',\n )\n\n def is_css(self):\n \"\"\" Return True if file is a CSS file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.css',\n )\n\n\ndef get_files(config):\n \"\"\" Walk the `docs_dir` and return a Files collection. \"\"\"\n files = []\n exclude = ['.*', '/templates']\n\n for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):\n relative_dir = os.path.relpath(source_dir, config['docs_dir'])\n\n for dirname in list(dirnames):\n path = os.path.normpath(os.path.join(relative_dir, dirname))\n # Skip any excluded directories\n if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):\n dirnames.remove(dirname)\n dirnames.sort()\n\n for filename in _sort_files(filenames):\n path = os.path.normpath(os.path.join(relative_dir, filename))\n # Skip any excluded files\n if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):\n continue\n # Skip README.md if an index file also exists in dir\n if filename.lower() == 'readme.md' and 'index.md' in filenames:\n log.warning(\"Both index.md and readme.md found. Skipping readme.md from {}\".format(source_dir))\n continue\n files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))\n\n return Files(files)\n\n\ndef _sort_files(filenames):\n \"\"\" Always sort `index` or `README` as first filename in list. \"\"\"\n\n def compare(x, y):\n if x == y:\n return 0\n if os.path.splitext(y)[0] in ['index', 'README']:\n return 1\n if os.path.splitext(x)[0] in ['index', 'README'] or x < y:\n return -1\n return 1\n\n return sorted(filenames, key=cmp_to_key(compare))\n\n\ndef _filter_paths(basename, path, is_dir, exclude):\n \"\"\" .gitignore style file filtering. \"\"\"\n for item in exclude:\n # Items ending in '/' apply only to directories.\n if item.endswith('/') and not is_dir:\n continue\n # Items starting with '/' apply to the whole path.\n # In any other cases just the basename is used.\n match = path if item.startswith('/') else basename\n if fnmatch.fnmatch(match, item.strip('/')):\n return True\n return False\n", "path": "mkdocs/structure/files.py"}]}
3,606
214
gh_patches_debug_7499
rasdani/github-patches
git_diff
zestedesavoir__zds-site-4985
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Impossible de ne plus suivre un billet Je me suis abonné au billet [Ô Belgique, ô ... ](https://zestedesavoir.com/billets/2681/o-belgique-o/) peu après sa publication. Or, il m'est désormais impossible de ne plus le suivre. De même, j'ai cliqué sur "Être notifié par courriel", et il m'est impossible de l'annuler. Quand je clique sur les boutons correspondants dans la sidebar, rien ne se passe. La même chose se passe sur le billet [Notification des failles de sécurité et droit pénal](https://zestedesavoir.com/billets/2568/notification-des-failles-de-securite-et-droit-penal/). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zds/notification/managers.py` Content: ``` 1 from django.contrib.contenttypes.models import ContentType 2 from django.core.exceptions import ObjectDoesNotExist 3 from django.db import models 4 5 from zds.forum.models import Topic 6 from zds.notification import signals 7 from zds.utils import get_current_user 8 9 10 class SubscriptionManager(models.Manager): 11 """ 12 Custom subscription manager 13 """ 14 15 def __create_lookup_args(self, user, content_object, is_active, by_email): 16 """ 17 Generates QuerySet lookup parameters for use with get(), filter(), ... 18 """ 19 content_type = ContentType.objects.get_for_model(content_object) 20 lookup = dict( 21 object_id=content_object.pk, 22 content_type__pk=content_type.pk, 23 user=user 24 ) 25 if is_active is not None: 26 lookup['is_active'] = is_active 27 if by_email is not None: 28 lookup['by_email'] = by_email 29 return lookup 30 31 def get_existing(self, user, content_object, is_active=None, by_email=None): 32 """ 33 If exists, return the existing subscription for the given user and content object. 34 35 :param user: concerned user. 36 :type user: django.contrib.auth.models.User 37 :param content_object: Generic content concerned. 38 :type content_object: instance concerned by notifications 39 :param is_active: Boolean to know if we want a subscription active or not. 40 :type is_active: Boolean 41 :param by_email: Boolean to know if we want a subscription for email or not. 42 :type by_email: Boolean 43 :return: subscription or None 44 """ 45 lookup = self.__create_lookup_args(user, content_object, is_active, by_email) 46 try: 47 existing = self.get(**lookup) 48 except ObjectDoesNotExist: 49 existing = None 50 return existing 51 52 def does_exist(self, user, content_object, is_active=None, by_email=None): 53 """ 54 Check if there is a subscription for the given user and content object. 55 56 :param user: concerned user. 57 :type user: django.contrib.auth.models.User 58 :param content_object: Generic content concerned. 59 :type content_object: instance concerned by notifications 60 :param is_active: Boolean to know if we want a subscription active or not. 61 :type is_active: Boolean 62 :param by_email: Boolean to know if we want a subscription for email or not. 63 :type by_email: Boolean 64 :return: Boolean, whether this subscription exists or not 65 """ 66 lookup = self.__create_lookup_args(user, content_object, is_active, by_email) 67 return self.filter(**lookup).exists() 68 69 def get_or_create_active(self, user, content_object): 70 """ 71 Gets (or create if it doesn't exist) the subscription for the content object given. 72 73 :param user: concerned user. 74 :type user: django.contrib.auth.models.User 75 :param content_object: Generic content concerned. 76 :type content_object: instance concerned by notifications 77 :return: subscription 78 """ 79 content_type = ContentType.objects.get_for_model(content_object) 80 try: 81 subscription = self.get( 82 object_id=content_object.pk, 83 content_type__pk=content_type.pk, 84 user=user) 85 if not subscription.is_active: 86 subscription.activate() 87 except ObjectDoesNotExist: 88 subscription = self.model(user=user, content_object=content_object) 89 subscription.save() 90 91 return subscription 92 93 def get_subscriptions(self, content_object, is_active=True): 94 """ 95 Gets subscriptions of the content object. 96 97 :param content_object: Generic content concerned. 98 :type content_object: instance concerned by notifications 99 :param is_active: Boolean to know if we want a subscription active or not. 100 :type is_active: Boolean 101 :return: an iterable list of subscriptions 102 """ 103 content_type = ContentType.objects.get_for_model(content_object) 104 return self.filter(object_id=content_object.pk, 105 content_type__pk=content_type.pk, 106 is_active=is_active) 107 108 def get_subscribers(self, content_object, only_by_email=False): 109 """ 110 Gets all subscribers of a content object. 111 112 :param content_object: Generic content concerned. 113 :type content_object: instance concerned by notifications 114 :param only_by_email: Boolean to know if we want a subscription for email or not. 115 :type only_by_email: Boolean 116 :return: users 117 """ 118 content_type = ContentType.objects.get_for_model(content_object) 119 if only_by_email: 120 # if I'm only interested by the email subscription 121 subscription_list = self.filter( 122 object_id=content_object.pk, 123 content_type__pk=content_type.pk, 124 by_email=True) 125 else: 126 subscription_list = self.filter( 127 object_id=content_object.pk, 128 content_type__pk=content_type.pk) 129 130 return [subscription.user for subscription in subscription_list] 131 132 def toggle_follow(self, content_object, user=None, by_email=False): 133 """ 134 Toggle following of a resource notifiable for a user. 135 136 :param content_object: A resource notifiable. 137 :param user: A user. If undefined, the current user is used. 138 :param by_email: Get subscription by email or not. 139 :return: subscription of the user for the content. 140 """ 141 if not user: 142 user = get_current_user() 143 if by_email: 144 existing = self.get_existing(user, content_object, is_active=True, by_email=True) 145 else: 146 existing = self.get_existing(user, content_object, is_active=True) 147 if not existing: 148 subscription = self.get_or_create_active(user, content_object) 149 if by_email: 150 subscription.activate_email() 151 return subscription 152 signals.content_read.send(sender=content_object.__class__, instance=content_object, user=user) 153 if by_email: 154 existing.deactivate_email() 155 else: 156 existing.deactivate() 157 return existing 158 159 def deactivate_subscriptions(self, user, _object): 160 subscription = self.get_existing(user, _object) 161 if subscription: 162 subscription.is_active = False 163 notification = subscription.last_notification 164 notification.is_read = True 165 notification.is_dead = True 166 notification.save(update_fields=['is_read', 'is_dead']) 167 subscription.save(update_fields=['is_active']) 168 169 170 class NewTopicSubscriptionManager(SubscriptionManager): 171 def mark_read_everybody_at(self, topic): 172 """ 173 Mark every unaccessible notifications as read. 174 175 :param topic: 176 :return: 177 """ 178 from zds.notification.models import Notification 179 notifications = Notification.objects.filter(content_type__pk=ContentType.objects.get_for_model(topic).pk, 180 object_id=topic.pk) 181 for notification in notifications: 182 if not topic.forum.can_read(notification.subscription.user): 183 notification.is_read = True 184 notification.save() 185 186 187 class TopicAnswerSubscriptionManager(SubscriptionManager): 188 """ 189 Custom topic answer subscription manager. 190 """ 191 192 def get_objects_followed_by(self, user): 193 """ 194 Gets objects followed by the given user. 195 196 :param user: concerned user. 197 :type user: django.contrib.auth.models.User 198 :return: All objects followed by given user. 199 """ 200 topic_list = self.filter(user=user, is_active=True, content_type=ContentType.objects.get_for_model(Topic)) \ 201 .values_list('object_id', flat=True) 202 203 return Topic.objects.filter(id__in=topic_list).order_by('-last_message__pubdate') 204 205 def unfollow_and_mark_read_everybody_at(self, topic): 206 """ 207 Deactivate a subscription at a topic and mark read the notification associated if exist. 208 209 :param topic: topic concerned. 210 :type topic: zds.forum.models.Topic 211 """ 212 subscriptions = self.get_subscriptions(topic) 213 for subscription in subscriptions: 214 if not topic.forum.can_read(subscription.user): 215 subscription.deactivate() 216 subscription.mark_notification_read() 217 218 219 class NotificationManager(models.Manager): 220 """ 221 Custom notification manager. 222 """ 223 224 def get_notifications_of(self, user): 225 """ 226 Gets all notifications of a user. 227 228 :param user: user object. 229 :return: a queryset of notifications. 230 """ 231 return self.filter(subscription__user=user).select_related('sender') 232 233 def get_unread_notifications_of(self, user): 234 """ 235 Gets all notifications for a user whose user is passed as argument. 236 237 :param user: user object 238 :type user: django.contrib.auth.models.User 239 :return: an iterable over notifications with user data already loaded 240 :rtype: an iterable list of notifications 241 """ 242 return self.filter(subscription__user=user, is_read=False) \ 243 .select_related('sender') 244 245 def filter_content_type_of(self, model): 246 """ 247 Applies a filter on the content_type. 248 249 :param model: Model concerned for the ContentType 250 :type model: Model concerned by notifications 251 :return: an iterable list of notifications 252 """ 253 content_subscription_type = ContentType.objects.get_for_model(model) 254 return self.filter(subscription__content_type__pk=content_subscription_type.pk) 255 256 def get_users_for_unread_notification_on(self, content_object): 257 """ 258 Gets all users who have an notification unread on the given content object. 259 260 :param content_object: generic content object. 261 :type content_object: instance concerned by notifications 262 :return: an iterable list of users. 263 """ 264 content_type = ContentType.objects.get_for_model(content_object) 265 notifications = self.filter(object_id=content_object.pk, content_type__pk=content_type.pk) \ 266 .select_related('subscription') \ 267 .select_related('subscription__user') 268 return [notification.subscription.user for notification in notifications] 269 270 271 class TopicFollowedManager(models.Manager): 272 def get_followers_by_email(self, topic): 273 """ 274 :return: the set of users who follow this topic by email. 275 """ 276 return self.filter(topic=topic, email=True).select_related('user') 277 278 def is_followed(self, topic, user=None): 279 """ 280 Checks if the user follows this topic. 281 :param user: A user. If undefined, the current user is used. 282 :return: `True` if the user follows this topic, `False` otherwise. 283 """ 284 if user is None: 285 user = get_current_user() 286 287 return self.filter(topic=topic, user=user).exists() 288 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zds/notification/managers.py b/zds/notification/managers.py --- a/zds/notification/managers.py +++ b/zds/notification/managers.py @@ -149,7 +149,8 @@ if by_email: subscription.activate_email() return subscription - signals.content_read.send(sender=content_object.__class__, instance=content_object, user=user) + signals.content_read.send(sender=content_object.__class__, instance=content_object, user=user, + target=content_object.__class__) if by_email: existing.deactivate_email() else:
{"golden_diff": "diff --git a/zds/notification/managers.py b/zds/notification/managers.py\n--- a/zds/notification/managers.py\n+++ b/zds/notification/managers.py\n@@ -149,7 +149,8 @@\n if by_email:\n subscription.activate_email()\n return subscription\n- signals.content_read.send(sender=content_object.__class__, instance=content_object, user=user)\n+ signals.content_read.send(sender=content_object.__class__, instance=content_object, user=user,\n+ target=content_object.__class__)\n if by_email:\n existing.deactivate_email()\n else:\n", "issue": "Impossible de ne plus suivre un billet\nJe me suis abonn\u00e9 au billet [\u00d4 Belgique, \u00f4 ... ](https://zestedesavoir.com/billets/2681/o-belgique-o/) peu apr\u00e8s sa publication. Or, il m'est d\u00e9sormais impossible de ne plus le suivre. De m\u00eame, j'ai cliqu\u00e9 sur \"\u00catre notifi\u00e9 par courriel\", et il m'est impossible de l'annuler. Quand je clique sur les boutons correspondants dans la sidebar, rien ne se passe.\r\n\r\nLa m\u00eame chose se passe sur le billet [Notification des failles de s\u00e9curit\u00e9 et droit p\u00e9nal](https://zestedesavoir.com/billets/2568/notification-des-failles-de-securite-et-droit-penal/).\n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\n\nfrom zds.forum.models import Topic\nfrom zds.notification import signals\nfrom zds.utils import get_current_user\n\n\nclass SubscriptionManager(models.Manager):\n \"\"\"\n Custom subscription manager\n \"\"\"\n\n def __create_lookup_args(self, user, content_object, is_active, by_email):\n \"\"\"\n Generates QuerySet lookup parameters for use with get(), filter(), ...\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n lookup = dict(\n object_id=content_object.pk,\n content_type__pk=content_type.pk,\n user=user\n )\n if is_active is not None:\n lookup['is_active'] = is_active\n if by_email is not None:\n lookup['by_email'] = by_email\n return lookup\n\n def get_existing(self, user, content_object, is_active=None, by_email=None):\n \"\"\"\n If exists, return the existing subscription for the given user and content object.\n\n :param user: concerned user.\n :type user: django.contrib.auth.models.User\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :param is_active: Boolean to know if we want a subscription active or not.\n :type is_active: Boolean\n :param by_email: Boolean to know if we want a subscription for email or not.\n :type by_email: Boolean\n :return: subscription or None\n \"\"\"\n lookup = self.__create_lookup_args(user, content_object, is_active, by_email)\n try:\n existing = self.get(**lookup)\n except ObjectDoesNotExist:\n existing = None\n return existing\n\n def does_exist(self, user, content_object, is_active=None, by_email=None):\n \"\"\"\n Check if there is a subscription for the given user and content object.\n\n :param user: concerned user.\n :type user: django.contrib.auth.models.User\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :param is_active: Boolean to know if we want a subscription active or not.\n :type is_active: Boolean\n :param by_email: Boolean to know if we want a subscription for email or not.\n :type by_email: Boolean\n :return: Boolean, whether this subscription exists or not\n \"\"\"\n lookup = self.__create_lookup_args(user, content_object, is_active, by_email)\n return self.filter(**lookup).exists()\n\n def get_or_create_active(self, user, content_object):\n \"\"\"\n Gets (or create if it doesn't exist) the subscription for the content object given.\n\n :param user: concerned user.\n :type user: django.contrib.auth.models.User\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :return: subscription\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n try:\n subscription = self.get(\n object_id=content_object.pk,\n content_type__pk=content_type.pk,\n user=user)\n if not subscription.is_active:\n subscription.activate()\n except ObjectDoesNotExist:\n subscription = self.model(user=user, content_object=content_object)\n subscription.save()\n\n return subscription\n\n def get_subscriptions(self, content_object, is_active=True):\n \"\"\"\n Gets subscriptions of the content object.\n\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :param is_active: Boolean to know if we want a subscription active or not.\n :type is_active: Boolean\n :return: an iterable list of subscriptions\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n return self.filter(object_id=content_object.pk,\n content_type__pk=content_type.pk,\n is_active=is_active)\n\n def get_subscribers(self, content_object, only_by_email=False):\n \"\"\"\n Gets all subscribers of a content object.\n\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :param only_by_email: Boolean to know if we want a subscription for email or not.\n :type only_by_email: Boolean\n :return: users\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n if only_by_email:\n # if I'm only interested by the email subscription\n subscription_list = self.filter(\n object_id=content_object.pk,\n content_type__pk=content_type.pk,\n by_email=True)\n else:\n subscription_list = self.filter(\n object_id=content_object.pk,\n content_type__pk=content_type.pk)\n\n return [subscription.user for subscription in subscription_list]\n\n def toggle_follow(self, content_object, user=None, by_email=False):\n \"\"\"\n Toggle following of a resource notifiable for a user.\n\n :param content_object: A resource notifiable.\n :param user: A user. If undefined, the current user is used.\n :param by_email: Get subscription by email or not.\n :return: subscription of the user for the content.\n \"\"\"\n if not user:\n user = get_current_user()\n if by_email:\n existing = self.get_existing(user, content_object, is_active=True, by_email=True)\n else:\n existing = self.get_existing(user, content_object, is_active=True)\n if not existing:\n subscription = self.get_or_create_active(user, content_object)\n if by_email:\n subscription.activate_email()\n return subscription\n signals.content_read.send(sender=content_object.__class__, instance=content_object, user=user)\n if by_email:\n existing.deactivate_email()\n else:\n existing.deactivate()\n return existing\n\n def deactivate_subscriptions(self, user, _object):\n subscription = self.get_existing(user, _object)\n if subscription:\n subscription.is_active = False\n notification = subscription.last_notification\n notification.is_read = True\n notification.is_dead = True\n notification.save(update_fields=['is_read', 'is_dead'])\n subscription.save(update_fields=['is_active'])\n\n\nclass NewTopicSubscriptionManager(SubscriptionManager):\n def mark_read_everybody_at(self, topic):\n \"\"\"\n Mark every unaccessible notifications as read.\n\n :param topic:\n :return:\n \"\"\"\n from zds.notification.models import Notification\n notifications = Notification.objects.filter(content_type__pk=ContentType.objects.get_for_model(topic).pk,\n object_id=topic.pk)\n for notification in notifications:\n if not topic.forum.can_read(notification.subscription.user):\n notification.is_read = True\n notification.save()\n\n\nclass TopicAnswerSubscriptionManager(SubscriptionManager):\n \"\"\"\n Custom topic answer subscription manager.\n \"\"\"\n\n def get_objects_followed_by(self, user):\n \"\"\"\n Gets objects followed by the given user.\n\n :param user: concerned user.\n :type user: django.contrib.auth.models.User\n :return: All objects followed by given user.\n \"\"\"\n topic_list = self.filter(user=user, is_active=True, content_type=ContentType.objects.get_for_model(Topic)) \\\n .values_list('object_id', flat=True)\n\n return Topic.objects.filter(id__in=topic_list).order_by('-last_message__pubdate')\n\n def unfollow_and_mark_read_everybody_at(self, topic):\n \"\"\"\n Deactivate a subscription at a topic and mark read the notification associated if exist.\n\n :param topic: topic concerned.\n :type topic: zds.forum.models.Topic\n \"\"\"\n subscriptions = self.get_subscriptions(topic)\n for subscription in subscriptions:\n if not topic.forum.can_read(subscription.user):\n subscription.deactivate()\n subscription.mark_notification_read()\n\n\nclass NotificationManager(models.Manager):\n \"\"\"\n Custom notification manager.\n \"\"\"\n\n def get_notifications_of(self, user):\n \"\"\"\n Gets all notifications of a user.\n\n :param user: user object.\n :return: a queryset of notifications.\n \"\"\"\n return self.filter(subscription__user=user).select_related('sender')\n\n def get_unread_notifications_of(self, user):\n \"\"\"\n Gets all notifications for a user whose user is passed as argument.\n\n :param user: user object\n :type user: django.contrib.auth.models.User\n :return: an iterable over notifications with user data already loaded\n :rtype: an iterable list of notifications\n \"\"\"\n return self.filter(subscription__user=user, is_read=False) \\\n .select_related('sender')\n\n def filter_content_type_of(self, model):\n \"\"\"\n Applies a filter on the content_type.\n\n :param model: Model concerned for the ContentType\n :type model: Model concerned by notifications\n :return: an iterable list of notifications\n \"\"\"\n content_subscription_type = ContentType.objects.get_for_model(model)\n return self.filter(subscription__content_type__pk=content_subscription_type.pk)\n\n def get_users_for_unread_notification_on(self, content_object):\n \"\"\"\n Gets all users who have an notification unread on the given content object.\n\n :param content_object: generic content object.\n :type content_object: instance concerned by notifications\n :return: an iterable list of users.\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n notifications = self.filter(object_id=content_object.pk, content_type__pk=content_type.pk) \\\n .select_related('subscription') \\\n .select_related('subscription__user')\n return [notification.subscription.user for notification in notifications]\n\n\nclass TopicFollowedManager(models.Manager):\n def get_followers_by_email(self, topic):\n \"\"\"\n :return: the set of users who follow this topic by email.\n \"\"\"\n return self.filter(topic=topic, email=True).select_related('user')\n\n def is_followed(self, topic, user=None):\n \"\"\"\n Checks if the user follows this topic.\n :param user: A user. If undefined, the current user is used.\n :return: `True` if the user follows this topic, `False` otherwise.\n \"\"\"\n if user is None:\n user = get_current_user()\n\n return self.filter(topic=topic, user=user).exists()\n", "path": "zds/notification/managers.py"}], "after_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\n\nfrom zds.forum.models import Topic\nfrom zds.notification import signals\nfrom zds.utils import get_current_user\n\n\nclass SubscriptionManager(models.Manager):\n \"\"\"\n Custom subscription manager\n \"\"\"\n\n def __create_lookup_args(self, user, content_object, is_active, by_email):\n \"\"\"\n Generates QuerySet lookup parameters for use with get(), filter(), ...\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n lookup = dict(\n object_id=content_object.pk,\n content_type__pk=content_type.pk,\n user=user\n )\n if is_active is not None:\n lookup['is_active'] = is_active\n if by_email is not None:\n lookup['by_email'] = by_email\n return lookup\n\n def get_existing(self, user, content_object, is_active=None, by_email=None):\n \"\"\"\n If exists, return the existing subscription for the given user and content object.\n\n :param user: concerned user.\n :type user: django.contrib.auth.models.User\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :param is_active: Boolean to know if we want a subscription active or not.\n :type is_active: Boolean\n :param by_email: Boolean to know if we want a subscription for email or not.\n :type by_email: Boolean\n :return: subscription or None\n \"\"\"\n lookup = self.__create_lookup_args(user, content_object, is_active, by_email)\n try:\n existing = self.get(**lookup)\n except ObjectDoesNotExist:\n existing = None\n return existing\n\n def does_exist(self, user, content_object, is_active=None, by_email=None):\n \"\"\"\n Check if there is a subscription for the given user and content object.\n\n :param user: concerned user.\n :type user: django.contrib.auth.models.User\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :param is_active: Boolean to know if we want a subscription active or not.\n :type is_active: Boolean\n :param by_email: Boolean to know if we want a subscription for email or not.\n :type by_email: Boolean\n :return: Boolean, whether this subscription exists or not\n \"\"\"\n lookup = self.__create_lookup_args(user, content_object, is_active, by_email)\n return self.filter(**lookup).exists()\n\n def get_or_create_active(self, user, content_object):\n \"\"\"\n Gets (or create if it doesn't exist) the subscription for the content object given.\n\n :param user: concerned user.\n :type user: django.contrib.auth.models.User\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :return: subscription\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n try:\n subscription = self.get(\n object_id=content_object.pk,\n content_type__pk=content_type.pk,\n user=user)\n if not subscription.is_active:\n subscription.activate()\n except ObjectDoesNotExist:\n subscription = self.model(user=user, content_object=content_object)\n subscription.save()\n\n return subscription\n\n def get_subscriptions(self, content_object, is_active=True):\n \"\"\"\n Gets subscriptions of the content object.\n\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :param is_active: Boolean to know if we want a subscription active or not.\n :type is_active: Boolean\n :return: an iterable list of subscriptions\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n return self.filter(object_id=content_object.pk,\n content_type__pk=content_type.pk,\n is_active=is_active)\n\n def get_subscribers(self, content_object, only_by_email=False):\n \"\"\"\n Gets all subscribers of a content object.\n\n :param content_object: Generic content concerned.\n :type content_object: instance concerned by notifications\n :param only_by_email: Boolean to know if we want a subscription for email or not.\n :type only_by_email: Boolean\n :return: users\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n if only_by_email:\n # if I'm only interested by the email subscription\n subscription_list = self.filter(\n object_id=content_object.pk,\n content_type__pk=content_type.pk,\n by_email=True)\n else:\n subscription_list = self.filter(\n object_id=content_object.pk,\n content_type__pk=content_type.pk)\n\n return [subscription.user for subscription in subscription_list]\n\n def toggle_follow(self, content_object, user=None, by_email=False):\n \"\"\"\n Toggle following of a resource notifiable for a user.\n\n :param content_object: A resource notifiable.\n :param user: A user. If undefined, the current user is used.\n :param by_email: Get subscription by email or not.\n :return: subscription of the user for the content.\n \"\"\"\n if not user:\n user = get_current_user()\n if by_email:\n existing = self.get_existing(user, content_object, is_active=True, by_email=True)\n else:\n existing = self.get_existing(user, content_object, is_active=True)\n if not existing:\n subscription = self.get_or_create_active(user, content_object)\n if by_email:\n subscription.activate_email()\n return subscription\n signals.content_read.send(sender=content_object.__class__, instance=content_object, user=user,\n target=content_object.__class__)\n if by_email:\n existing.deactivate_email()\n else:\n existing.deactivate()\n return existing\n\n def deactivate_subscriptions(self, user, _object):\n subscription = self.get_existing(user, _object)\n if subscription:\n subscription.is_active = False\n notification = subscription.last_notification\n notification.is_read = True\n notification.is_dead = True\n notification.save(update_fields=['is_read', 'is_dead'])\n subscription.save(update_fields=['is_active'])\n\n\nclass NewTopicSubscriptionManager(SubscriptionManager):\n def mark_read_everybody_at(self, topic):\n \"\"\"\n Mark every unaccessible notifications as read.\n\n :param topic:\n :return:\n \"\"\"\n from zds.notification.models import Notification\n notifications = Notification.objects.filter(content_type__pk=ContentType.objects.get_for_model(topic).pk,\n object_id=topic.pk)\n for notification in notifications:\n if not topic.forum.can_read(notification.subscription.user):\n notification.is_read = True\n notification.save()\n\n\nclass TopicAnswerSubscriptionManager(SubscriptionManager):\n \"\"\"\n Custom topic answer subscription manager.\n \"\"\"\n\n def get_objects_followed_by(self, user):\n \"\"\"\n Gets objects followed by the given user.\n\n :param user: concerned user.\n :type user: django.contrib.auth.models.User\n :return: All objects followed by given user.\n \"\"\"\n topic_list = self.filter(user=user, is_active=True, content_type=ContentType.objects.get_for_model(Topic)) \\\n .values_list('object_id', flat=True)\n\n return Topic.objects.filter(id__in=topic_list).order_by('-last_message__pubdate')\n\n def unfollow_and_mark_read_everybody_at(self, topic):\n \"\"\"\n Deactivate a subscription at a topic and mark read the notification associated if exist.\n\n :param topic: topic concerned.\n :type topic: zds.forum.models.Topic\n \"\"\"\n subscriptions = self.get_subscriptions(topic)\n for subscription in subscriptions:\n if not topic.forum.can_read(subscription.user):\n subscription.deactivate()\n subscription.mark_notification_read()\n\n\nclass NotificationManager(models.Manager):\n \"\"\"\n Custom notification manager.\n \"\"\"\n\n def get_notifications_of(self, user):\n \"\"\"\n Gets all notifications of a user.\n\n :param user: user object.\n :return: a queryset of notifications.\n \"\"\"\n return self.filter(subscription__user=user).select_related('sender')\n\n def get_unread_notifications_of(self, user):\n \"\"\"\n Gets all notifications for a user whose user is passed as argument.\n\n :param user: user object\n :type user: django.contrib.auth.models.User\n :return: an iterable over notifications with user data already loaded\n :rtype: an iterable list of notifications\n \"\"\"\n return self.filter(subscription__user=user, is_read=False) \\\n .select_related('sender')\n\n def filter_content_type_of(self, model):\n \"\"\"\n Applies a filter on the content_type.\n\n :param model: Model concerned for the ContentType\n :type model: Model concerned by notifications\n :return: an iterable list of notifications\n \"\"\"\n content_subscription_type = ContentType.objects.get_for_model(model)\n return self.filter(subscription__content_type__pk=content_subscription_type.pk)\n\n def get_users_for_unread_notification_on(self, content_object):\n \"\"\"\n Gets all users who have an notification unread on the given content object.\n\n :param content_object: generic content object.\n :type content_object: instance concerned by notifications\n :return: an iterable list of users.\n \"\"\"\n content_type = ContentType.objects.get_for_model(content_object)\n notifications = self.filter(object_id=content_object.pk, content_type__pk=content_type.pk) \\\n .select_related('subscription') \\\n .select_related('subscription__user')\n return [notification.subscription.user for notification in notifications]\n\n\nclass TopicFollowedManager(models.Manager):\n def get_followers_by_email(self, topic):\n \"\"\"\n :return: the set of users who follow this topic by email.\n \"\"\"\n return self.filter(topic=topic, email=True).select_related('user')\n\n def is_followed(self, topic, user=None):\n \"\"\"\n Checks if the user follows this topic.\n :param user: A user. If undefined, the current user is used.\n :return: `True` if the user follows this topic, `False` otherwise.\n \"\"\"\n if user is None:\n user = get_current_user()\n\n return self.filter(topic=topic, user=user).exists()\n", "path": "zds/notification/managers.py"}]}
3,369
129
gh_patches_debug_55343
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-16707
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- vidzi.tv doesn't work $youtube-dl --version 2018.06.04 $youtube-dl and http://vidzi.tv links doesn't work for example: $youtube-dl http://vidzi.tv/n83vo2mlnpgb Failed to parse JSON (caused by ValueError("Expecting ',' delimiter: line 12 column 175 (char 771)",)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. $youtube-dl --verbose http://vidzi.tv/n83vo2mlnpgb [debug] System config: [] [debug] User config: [] [debug] Custom config: [] [debug] Command-line args: [u'--verbose', u'http://vidzi.tv/n83vo2mlnpgb'] [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2018.06.04 [debug] Python version 2.7.10 (CPython) - Darwin-17.5.0-x86_64-i386-64bit [debug] exe versions: avconv 12.3, avprobe 12.3, ffmpeg 3.4.2, ffprobe 3.4.2 [debug] Proxy map: {} [Vidzi] n83vo2mlnpgb: Downloading webpage ERROR: n83vo2mlnpgb: Failed to parse JSON (caused by ValueError("Expecting ',' delimiter: line 12 column 175 (char 791)",)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. Traceback (most recent call last): File "/Library/Python/2.7/site-packages/youtube_dl/extractor/common.py", line 774, in _parse_json return json.loads(json_string) File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/__init__.py", line 338, in loads return _default_decoder.decode(s) File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/decoder.py", line 366, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/decoder.py", line 382, in raw_decode obj, end = self.scan_once(s, idx) ValueError: Expecting ',' delimiter: line 12 column 175 (char 791) Traceback (most recent call last): File "/Library/Python/2.7/site-packages/youtube_dl/YoutubeDL.py", line 792, in extract_info ie_result = ie.extract(url) File "/Library/Python/2.7/site-packages/youtube_dl/extractor/common.py", line 500, in extract ie_result = self._real_extract(url) File "/Library/Python/2.7/site-packages/youtube_dl/extractor/vidzi.py", line 57, in _real_extract video_id, transform_source=js_to_json) File "/Library/Python/2.7/site-packages/youtube_dl/extractor/common.py", line 778, in _parse_json raise ExtractorError(errmsg, cause=ve) ExtractorError: n83vo2mlnpgb: Failed to parse JSON (caused by ValueError("Expecting ',' delimiter: line 12 column 175 (char 791)",)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `youtube_dl/extractor/vidzi.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import re 5 6 from .common import InfoExtractor 7 from ..utils import ( 8 decode_packed_codes, 9 js_to_json, 10 NO_DEFAULT, 11 PACKED_CODES_RE, 12 ) 13 14 15 class VidziIE(InfoExtractor): 16 _VALID_URL = r'https?://(?:www\.)?vidzi\.(?:tv|cc|si)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)' 17 _TESTS = [{ 18 'url': 'http://vidzi.tv/cghql9yq6emu.html', 19 'md5': '4f16c71ca0c8c8635ab6932b5f3f1660', 20 'info_dict': { 21 'id': 'cghql9yq6emu', 22 'ext': 'mp4', 23 'title': 'youtube-dl test video 1\\\\2\'3/4<5\\\\6ä7↭', 24 }, 25 'params': { 26 # m3u8 download 27 'skip_download': True, 28 }, 29 }, { 30 'url': 'http://vidzi.tv/embed-4z2yb0rzphe9-600x338.html', 31 'only_matching': True, 32 }, { 33 'url': 'http://vidzi.cc/cghql9yq6emu.html', 34 'only_matching': True, 35 }, { 36 'url': 'https://vidzi.si/rph9gztxj1et.html', 37 'only_matching': True, 38 }] 39 40 def _real_extract(self, url): 41 video_id = self._match_id(url) 42 43 webpage = self._download_webpage( 44 'http://vidzi.tv/%s' % video_id, video_id) 45 title = self._html_search_regex( 46 r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title') 47 48 codes = [webpage] 49 codes.extend([ 50 decode_packed_codes(mobj.group(0)).replace('\\\'', '\'') 51 for mobj in re.finditer(PACKED_CODES_RE, webpage)]) 52 for num, code in enumerate(codes, 1): 53 jwplayer_data = self._parse_json( 54 self._search_regex( 55 r'setup\(([^)]+)\)', code, 'jwplayer data', 56 default=NO_DEFAULT if num == len(codes) else '{}'), 57 video_id, transform_source=js_to_json) 58 if jwplayer_data: 59 break 60 61 info_dict = self._parse_jwplayer_data(jwplayer_data, video_id, require_title=False) 62 info_dict['title'] = title 63 64 return info_dict 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py --- a/youtube_dl/extractor/vidzi.py +++ b/youtube_dl/extractor/vidzi.py @@ -54,7 +54,8 @@ self._search_regex( r'setup\(([^)]+)\)', code, 'jwplayer data', default=NO_DEFAULT if num == len(codes) else '{}'), - video_id, transform_source=js_to_json) + video_id, transform_source=lambda s: js_to_json( + re.sub(r'\s*\+\s*window\[.+?\]', '', s))) if jwplayer_data: break
{"golden_diff": "diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py\n--- a/youtube_dl/extractor/vidzi.py\n+++ b/youtube_dl/extractor/vidzi.py\n@@ -54,7 +54,8 @@\n self._search_regex(\n r'setup\\(([^)]+)\\)', code, 'jwplayer data',\n default=NO_DEFAULT if num == len(codes) else '{}'),\n- video_id, transform_source=js_to_json)\n+ video_id, transform_source=lambda s: js_to_json(\n+ re.sub(r'\\s*\\+\\s*window\\[.+?\\]', '', s)))\n if jwplayer_data:\n break\n", "issue": "vidzi.tv doesn't work\n$youtube-dl --version\r\n2018.06.04\r\n\r\n$youtube-dl and http://vidzi.tv links doesn't work\r\n\r\nfor example:\r\n$youtube-dl http://vidzi.tv/n83vo2mlnpgb\r\n\r\nFailed to parse JSON (caused by ValueError(\"Expecting ',' delimiter: line 12 column 175 (char 771)\",)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\n\r\n$youtube-dl --verbose http://vidzi.tv/n83vo2mlnpgb\r\n\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'--verbose', u'http://vidzi.tv/n83vo2mlnpgb']\r\n[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2018.06.04\r\n[debug] Python version 2.7.10 (CPython) - Darwin-17.5.0-x86_64-i386-64bit\r\n[debug] exe versions: avconv 12.3, avprobe 12.3, ffmpeg 3.4.2, ffprobe 3.4.2\r\n[debug] Proxy map: {}\r\n[Vidzi] n83vo2mlnpgb: Downloading webpage\r\nERROR: n83vo2mlnpgb: Failed to parse JSON (caused by ValueError(\"Expecting ',' delimiter: line 12 column 175 (char 791)\",)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"/Library/Python/2.7/site-packages/youtube_dl/extractor/common.py\", line 774, in _parse_json\r\n return json.loads(json_string)\r\n File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/__init__.py\", line 338, in loads\r\n return _default_decoder.decode(s)\r\n File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/decoder.py\", line 366, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/decoder.py\", line 382, in raw_decode\r\n obj, end = self.scan_once(s, idx)\r\nValueError: Expecting ',' delimiter: line 12 column 175 (char 791)\r\nTraceback (most recent call last):\r\n File \"/Library/Python/2.7/site-packages/youtube_dl/YoutubeDL.py\", line 792, in extract_info\r\n ie_result = ie.extract(url)\r\n File \"/Library/Python/2.7/site-packages/youtube_dl/extractor/common.py\", line 500, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/Library/Python/2.7/site-packages/youtube_dl/extractor/vidzi.py\", line 57, in _real_extract\r\n video_id, transform_source=js_to_json)\r\n File \"/Library/Python/2.7/site-packages/youtube_dl/extractor/common.py\", line 778, in _parse_json\r\n raise ExtractorError(errmsg, cause=ve)\r\nExtractorError: n83vo2mlnpgb: Failed to parse JSON (caused by ValueError(\"Expecting ',' delimiter: line 12 column 175 (char 791)\",)); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\n\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n decode_packed_codes,\n js_to_json,\n NO_DEFAULT,\n PACKED_CODES_RE,\n)\n\n\nclass VidziIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?vidzi\\.(?:tv|cc|si)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'\n _TESTS = [{\n 'url': 'http://vidzi.tv/cghql9yq6emu.html',\n 'md5': '4f16c71ca0c8c8635ab6932b5f3f1660',\n 'info_dict': {\n 'id': 'cghql9yq6emu',\n 'ext': 'mp4',\n 'title': 'youtube-dl test video 1\\\\\\\\2\\'3/4<5\\\\\\\\6\u00e47\u21ad',\n },\n 'params': {\n # m3u8 download\n 'skip_download': True,\n },\n }, {\n 'url': 'http://vidzi.tv/embed-4z2yb0rzphe9-600x338.html',\n 'only_matching': True,\n }, {\n 'url': 'http://vidzi.cc/cghql9yq6emu.html',\n 'only_matching': True,\n }, {\n 'url': 'https://vidzi.si/rph9gztxj1et.html',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n webpage = self._download_webpage(\n 'http://vidzi.tv/%s' % video_id, video_id)\n title = self._html_search_regex(\n r'(?s)<h2 class=\"video-title\">(.*?)</h2>', webpage, 'title')\n\n codes = [webpage]\n codes.extend([\n decode_packed_codes(mobj.group(0)).replace('\\\\\\'', '\\'')\n for mobj in re.finditer(PACKED_CODES_RE, webpage)])\n for num, code in enumerate(codes, 1):\n jwplayer_data = self._parse_json(\n self._search_regex(\n r'setup\\(([^)]+)\\)', code, 'jwplayer data',\n default=NO_DEFAULT if num == len(codes) else '{}'),\n video_id, transform_source=js_to_json)\n if jwplayer_data:\n break\n\n info_dict = self._parse_jwplayer_data(jwplayer_data, video_id, require_title=False)\n info_dict['title'] = title\n\n return info_dict\n", "path": "youtube_dl/extractor/vidzi.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n decode_packed_codes,\n js_to_json,\n NO_DEFAULT,\n PACKED_CODES_RE,\n)\n\n\nclass VidziIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?vidzi\\.(?:tv|cc|si)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'\n _TESTS = [{\n 'url': 'http://vidzi.tv/cghql9yq6emu.html',\n 'md5': '4f16c71ca0c8c8635ab6932b5f3f1660',\n 'info_dict': {\n 'id': 'cghql9yq6emu',\n 'ext': 'mp4',\n 'title': 'youtube-dl test video 1\\\\\\\\2\\'3/4<5\\\\\\\\6\u00e47\u21ad',\n },\n 'params': {\n # m3u8 download\n 'skip_download': True,\n },\n }, {\n 'url': 'http://vidzi.tv/embed-4z2yb0rzphe9-600x338.html',\n 'only_matching': True,\n }, {\n 'url': 'http://vidzi.cc/cghql9yq6emu.html',\n 'only_matching': True,\n }, {\n 'url': 'https://vidzi.si/rph9gztxj1et.html',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n webpage = self._download_webpage(\n 'http://vidzi.tv/%s' % video_id, video_id)\n title = self._html_search_regex(\n r'(?s)<h2 class=\"video-title\">(.*?)</h2>', webpage, 'title')\n\n codes = [webpage]\n codes.extend([\n decode_packed_codes(mobj.group(0)).replace('\\\\\\'', '\\'')\n for mobj in re.finditer(PACKED_CODES_RE, webpage)])\n for num, code in enumerate(codes, 1):\n jwplayer_data = self._parse_json(\n self._search_regex(\n r'setup\\(([^)]+)\\)', code, 'jwplayer data',\n default=NO_DEFAULT if num == len(codes) else '{}'),\n video_id, transform_source=lambda s: js_to_json(\n re.sub(r'\\s*\\+\\s*window\\[.+?\\]', '', s)))\n if jwplayer_data:\n break\n\n info_dict = self._parse_jwplayer_data(jwplayer_data, video_id, require_title=False)\n info_dict['title'] = title\n\n return info_dict\n", "path": "youtube_dl/extractor/vidzi.py"}]}
1,950
159
gh_patches_debug_10400
rasdani/github-patches
git_diff
python-poetry__poetry-1815
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pre-release `~=` constrains are mis calculated <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this; let's make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill these first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. - [ ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). <!-- Once those are done, if you're able to fill in the following list with your information, it'd be very helpful to whoever handles the issue. --> - **OS version and name**: Windows - **Poetry version**: 0.12.16 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: N/A, requires access to a proprietary pypi instance ## Issue <!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ --> My constraints aren't resolving. A rough look at my dependency tree - A: `B = "^0.7.5" via poetry - B: `"C~=0.2.0dev16"` via setup.py Poetry complains that it cannot resolve B's dependency on C ``` [SolverProblemError] Because B (0.7.5) depends on C(>=0.2.0,<0.3.0) which doesn't match any versions, B is forbidden. So, because no versions of B match >0.7.5,<0.8.0 and A depends on B (^0.7.5), version solving failed. ``` I traced the problem down into [`semver/__init__.py:parse_single_constraint`](https://github.com/sdispater/poetry/blob/master/poetry/semver/__init__.py#L67) where the constraint - `~=0.2.0dev16` gets compiled into - `>=0.2.0,<0.3.0` In contrast, the constraint - `~=2.0.dev0` correctly gets compiled into - ` >=2.0.dev0,<3.0.0` The problem seems to be ```python if precision == 2: low = version high = version.stable.next_major else: low = Version(version.major, version.minor, 0) high = version.stable.next_minor ``` where if the `precision` is 1 or 3, then the pre-release is dropped from `low`, disqualifying them from resolving the constraint. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `poetry/semver/__init__.py` Content: ``` 1 import re 2 3 from .empty_constraint import EmptyConstraint 4 from .patterns import BASIC_CONSTRAINT 5 from .patterns import CARET_CONSTRAINT 6 from .patterns import TILDE_CONSTRAINT 7 from .patterns import TILDE_PEP440_CONSTRAINT 8 from .patterns import X_CONSTRAINT 9 from .version import Version 10 from .version_constraint import VersionConstraint 11 from .version_range import VersionRange 12 from .version_union import VersionUnion 13 14 15 def parse_constraint(constraints): # type: (str) -> VersionConstraint 16 if constraints == "*": 17 return VersionRange() 18 19 or_constraints = re.split(r"\s*\|\|?\s*", constraints.strip()) 20 or_groups = [] 21 for constraints in or_constraints: 22 and_constraints = re.split( 23 "(?<!^)(?<![=>< ,]) *(?<!-)[, ](?!-) *(?!,|$)", constraints 24 ) 25 constraint_objects = [] 26 27 if len(and_constraints) > 1: 28 for constraint in and_constraints: 29 constraint_objects.append(parse_single_constraint(constraint)) 30 else: 31 constraint_objects.append(parse_single_constraint(and_constraints[0])) 32 33 if len(constraint_objects) == 1: 34 constraint = constraint_objects[0] 35 else: 36 constraint = constraint_objects[0] 37 for next_constraint in constraint_objects[1:]: 38 constraint = constraint.intersect(next_constraint) 39 40 or_groups.append(constraint) 41 42 if len(or_groups) == 1: 43 return or_groups[0] 44 else: 45 return VersionUnion.of(*or_groups) 46 47 48 def parse_single_constraint(constraint): # type: (str) -> VersionConstraint 49 m = re.match(r"(?i)^v?[xX*](\.[xX*])*$", constraint) 50 if m: 51 return VersionRange() 52 53 # Tilde range 54 m = TILDE_CONSTRAINT.match(constraint) 55 if m: 56 version = Version.parse(m.group(1)) 57 58 high = version.stable.next_minor 59 if len(m.group(1).split(".")) == 1: 60 high = version.stable.next_major 61 62 return VersionRange( 63 version, high, include_min=True, always_include_max_prerelease=True 64 ) 65 66 # PEP 440 Tilde range (~=) 67 m = TILDE_PEP440_CONSTRAINT.match(constraint) 68 if m: 69 precision = 1 70 if m.group(3): 71 precision += 1 72 73 if m.group(4): 74 precision += 1 75 76 version = Version.parse(m.group(1)) 77 78 if precision == 2: 79 low = version 80 high = version.stable.next_major 81 else: 82 low = Version(version.major, version.minor, version.patch) 83 high = version.stable.next_minor 84 85 return VersionRange( 86 low, high, include_min=True, always_include_max_prerelease=True 87 ) 88 89 # Caret range 90 m = CARET_CONSTRAINT.match(constraint) 91 if m: 92 version = Version.parse(m.group(1)) 93 94 return VersionRange( 95 version, 96 version.next_breaking, 97 include_min=True, 98 always_include_max_prerelease=True, 99 ) 100 101 # X Range 102 m = X_CONSTRAINT.match(constraint) 103 if m: 104 op = m.group(1) 105 major = int(m.group(2)) 106 minor = m.group(3) 107 108 if minor is not None: 109 version = Version(major, int(minor), 0) 110 111 result = VersionRange( 112 version, 113 version.next_minor, 114 include_min=True, 115 always_include_max_prerelease=True, 116 ) 117 else: 118 if major == 0: 119 result = VersionRange(max=Version(1, 0, 0)) 120 else: 121 version = Version(major, 0, 0) 122 123 result = VersionRange( 124 version, 125 version.next_major, 126 include_min=True, 127 always_include_max_prerelease=True, 128 ) 129 130 if op == "!=": 131 result = VersionRange().difference(result) 132 133 return result 134 135 # Basic comparator 136 m = BASIC_CONSTRAINT.match(constraint) 137 if m: 138 op = m.group(1) 139 version = m.group(2) 140 141 if version == "dev": 142 version = "0.0-dev" 143 144 try: 145 version = Version.parse(version) 146 except ValueError: 147 raise ValueError( 148 "Could not parse version constraint: {}".format(constraint) 149 ) 150 151 if op == "<": 152 return VersionRange(max=version) 153 elif op == "<=": 154 return VersionRange(max=version, include_max=True) 155 elif op == ">": 156 return VersionRange(min=version) 157 elif op == ">=": 158 return VersionRange(min=version, include_min=True) 159 elif op == "!=": 160 return VersionUnion(VersionRange(max=version), VersionRange(min=version)) 161 else: 162 return version 163 164 raise ValueError("Could not parse version constraint: {}".format(constraint)) 165 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/poetry/semver/__init__.py b/poetry/semver/__init__.py --- a/poetry/semver/__init__.py +++ b/poetry/semver/__init__.py @@ -76,14 +76,12 @@ version = Version.parse(m.group(1)) if precision == 2: - low = version high = version.stable.next_major else: - low = Version(version.major, version.minor, version.patch) high = version.stable.next_minor return VersionRange( - low, high, include_min=True, always_include_max_prerelease=True + version, high, include_min=True, always_include_max_prerelease=True ) # Caret range
{"golden_diff": "diff --git a/poetry/semver/__init__.py b/poetry/semver/__init__.py\n--- a/poetry/semver/__init__.py\n+++ b/poetry/semver/__init__.py\n@@ -76,14 +76,12 @@\n version = Version.parse(m.group(1))\n \n if precision == 2:\n- low = version\n high = version.stable.next_major\n else:\n- low = Version(version.major, version.minor, version.patch)\n high = version.stable.next_minor\n \n return VersionRange(\n- low, high, include_min=True, always_include_max_prerelease=True\n+ version, high, include_min=True, always_include_max_prerelease=True\n )\n \n # Caret range\n", "issue": "Pre-release `~=` constrains are mis calculated\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [ ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Windows\r\n- **Poetry version**: 0.12.16\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: N/A, requires access to a proprietary pypi instance\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\n\r\nMy constraints aren't resolving.\r\n\r\nA rough look at my dependency tree\r\n- A: `B = \"^0.7.5\" via poetry\r\n- B: `\"C~=0.2.0dev16\"` via setup.py\r\n\r\nPoetry complains that it cannot resolve B's dependency on C\r\n```\r\n[SolverProblemError]\r\nBecause B (0.7.5) depends on C(>=0.2.0,<0.3.0) which doesn't match any versions, B is forbidden.\r\nSo, because no versions of B match >0.7.5,<0.8.0\r\n and A depends on B (^0.7.5), version solving failed.\r\n```\r\n\r\nI traced the problem down into [`semver/__init__.py:parse_single_constraint`](https://github.com/sdispater/poetry/blob/master/poetry/semver/__init__.py#L67) where the constraint\r\n- `~=0.2.0dev16`\r\ngets compiled into\r\n- `>=0.2.0,<0.3.0`\r\n\r\nIn contrast, the constraint\r\n- `~=2.0.dev0`\r\ncorrectly gets compiled into\r\n- ` >=2.0.dev0,<3.0.0`\r\n\r\nThe problem seems to be\r\n```python\r\n if precision == 2:\r\n low = version\r\n high = version.stable.next_major\r\n else:\r\n low = Version(version.major, version.minor, 0)\r\n high = version.stable.next_minor\r\n```\r\nwhere if the `precision` is 1 or 3, then the pre-release is dropped from `low`, disqualifying them from resolving the constraint.\n", "before_files": [{"content": "import re\n\nfrom .empty_constraint import EmptyConstraint\nfrom .patterns import BASIC_CONSTRAINT\nfrom .patterns import CARET_CONSTRAINT\nfrom .patterns import TILDE_CONSTRAINT\nfrom .patterns import TILDE_PEP440_CONSTRAINT\nfrom .patterns import X_CONSTRAINT\nfrom .version import Version\nfrom .version_constraint import VersionConstraint\nfrom .version_range import VersionRange\nfrom .version_union import VersionUnion\n\n\ndef parse_constraint(constraints): # type: (str) -> VersionConstraint\n if constraints == \"*\":\n return VersionRange()\n\n or_constraints = re.split(r\"\\s*\\|\\|?\\s*\", constraints.strip())\n or_groups = []\n for constraints in or_constraints:\n and_constraints = re.split(\n \"(?<!^)(?<![=>< ,]) *(?<!-)[, ](?!-) *(?!,|$)\", constraints\n )\n constraint_objects = []\n\n if len(and_constraints) > 1:\n for constraint in and_constraints:\n constraint_objects.append(parse_single_constraint(constraint))\n else:\n constraint_objects.append(parse_single_constraint(and_constraints[0]))\n\n if len(constraint_objects) == 1:\n constraint = constraint_objects[0]\n else:\n constraint = constraint_objects[0]\n for next_constraint in constraint_objects[1:]:\n constraint = constraint.intersect(next_constraint)\n\n or_groups.append(constraint)\n\n if len(or_groups) == 1:\n return or_groups[0]\n else:\n return VersionUnion.of(*or_groups)\n\n\ndef parse_single_constraint(constraint): # type: (str) -> VersionConstraint\n m = re.match(r\"(?i)^v?[xX*](\\.[xX*])*$\", constraint)\n if m:\n return VersionRange()\n\n # Tilde range\n m = TILDE_CONSTRAINT.match(constraint)\n if m:\n version = Version.parse(m.group(1))\n\n high = version.stable.next_minor\n if len(m.group(1).split(\".\")) == 1:\n high = version.stable.next_major\n\n return VersionRange(\n version, high, include_min=True, always_include_max_prerelease=True\n )\n\n # PEP 440 Tilde range (~=)\n m = TILDE_PEP440_CONSTRAINT.match(constraint)\n if m:\n precision = 1\n if m.group(3):\n precision += 1\n\n if m.group(4):\n precision += 1\n\n version = Version.parse(m.group(1))\n\n if precision == 2:\n low = version\n high = version.stable.next_major\n else:\n low = Version(version.major, version.minor, version.patch)\n high = version.stable.next_minor\n\n return VersionRange(\n low, high, include_min=True, always_include_max_prerelease=True\n )\n\n # Caret range\n m = CARET_CONSTRAINT.match(constraint)\n if m:\n version = Version.parse(m.group(1))\n\n return VersionRange(\n version,\n version.next_breaking,\n include_min=True,\n always_include_max_prerelease=True,\n )\n\n # X Range\n m = X_CONSTRAINT.match(constraint)\n if m:\n op = m.group(1)\n major = int(m.group(2))\n minor = m.group(3)\n\n if minor is not None:\n version = Version(major, int(minor), 0)\n\n result = VersionRange(\n version,\n version.next_minor,\n include_min=True,\n always_include_max_prerelease=True,\n )\n else:\n if major == 0:\n result = VersionRange(max=Version(1, 0, 0))\n else:\n version = Version(major, 0, 0)\n\n result = VersionRange(\n version,\n version.next_major,\n include_min=True,\n always_include_max_prerelease=True,\n )\n\n if op == \"!=\":\n result = VersionRange().difference(result)\n\n return result\n\n # Basic comparator\n m = BASIC_CONSTRAINT.match(constraint)\n if m:\n op = m.group(1)\n version = m.group(2)\n\n if version == \"dev\":\n version = \"0.0-dev\"\n\n try:\n version = Version.parse(version)\n except ValueError:\n raise ValueError(\n \"Could not parse version constraint: {}\".format(constraint)\n )\n\n if op == \"<\":\n return VersionRange(max=version)\n elif op == \"<=\":\n return VersionRange(max=version, include_max=True)\n elif op == \">\":\n return VersionRange(min=version)\n elif op == \">=\":\n return VersionRange(min=version, include_min=True)\n elif op == \"!=\":\n return VersionUnion(VersionRange(max=version), VersionRange(min=version))\n else:\n return version\n\n raise ValueError(\"Could not parse version constraint: {}\".format(constraint))\n", "path": "poetry/semver/__init__.py"}], "after_files": [{"content": "import re\n\nfrom .empty_constraint import EmptyConstraint\nfrom .patterns import BASIC_CONSTRAINT\nfrom .patterns import CARET_CONSTRAINT\nfrom .patterns import TILDE_CONSTRAINT\nfrom .patterns import TILDE_PEP440_CONSTRAINT\nfrom .patterns import X_CONSTRAINT\nfrom .version import Version\nfrom .version_constraint import VersionConstraint\nfrom .version_range import VersionRange\nfrom .version_union import VersionUnion\n\n\ndef parse_constraint(constraints): # type: (str) -> VersionConstraint\n if constraints == \"*\":\n return VersionRange()\n\n or_constraints = re.split(r\"\\s*\\|\\|?\\s*\", constraints.strip())\n or_groups = []\n for constraints in or_constraints:\n and_constraints = re.split(\n \"(?<!^)(?<![=>< ,]) *(?<!-)[, ](?!-) *(?!,|$)\", constraints\n )\n constraint_objects = []\n\n if len(and_constraints) > 1:\n for constraint in and_constraints:\n constraint_objects.append(parse_single_constraint(constraint))\n else:\n constraint_objects.append(parse_single_constraint(and_constraints[0]))\n\n if len(constraint_objects) == 1:\n constraint = constraint_objects[0]\n else:\n constraint = constraint_objects[0]\n for next_constraint in constraint_objects[1:]:\n constraint = constraint.intersect(next_constraint)\n\n or_groups.append(constraint)\n\n if len(or_groups) == 1:\n return or_groups[0]\n else:\n return VersionUnion.of(*or_groups)\n\n\ndef parse_single_constraint(constraint): # type: (str) -> VersionConstraint\n m = re.match(r\"(?i)^v?[xX*](\\.[xX*])*$\", constraint)\n if m:\n return VersionRange()\n\n # Tilde range\n m = TILDE_CONSTRAINT.match(constraint)\n if m:\n version = Version.parse(m.group(1))\n\n high = version.stable.next_minor\n if len(m.group(1).split(\".\")) == 1:\n high = version.stable.next_major\n\n return VersionRange(\n version, high, include_min=True, always_include_max_prerelease=True\n )\n\n # PEP 440 Tilde range (~=)\n m = TILDE_PEP440_CONSTRAINT.match(constraint)\n if m:\n precision = 1\n if m.group(3):\n precision += 1\n\n if m.group(4):\n precision += 1\n\n version = Version.parse(m.group(1))\n\n if precision == 2:\n high = version.stable.next_major\n else:\n high = version.stable.next_minor\n\n return VersionRange(\n version, high, include_min=True, always_include_max_prerelease=True\n )\n\n # Caret range\n m = CARET_CONSTRAINT.match(constraint)\n if m:\n version = Version.parse(m.group(1))\n\n return VersionRange(\n version,\n version.next_breaking,\n include_min=True,\n always_include_max_prerelease=True,\n )\n\n # X Range\n m = X_CONSTRAINT.match(constraint)\n if m:\n op = m.group(1)\n major = int(m.group(2))\n minor = m.group(3)\n\n if minor is not None:\n version = Version(major, int(minor), 0)\n\n result = VersionRange(\n version,\n version.next_minor,\n include_min=True,\n always_include_max_prerelease=True,\n )\n else:\n if major == 0:\n result = VersionRange(max=Version(1, 0, 0))\n else:\n version = Version(major, 0, 0)\n\n result = VersionRange(\n version,\n version.next_major,\n include_min=True,\n always_include_max_prerelease=True,\n )\n\n if op == \"!=\":\n result = VersionRange().difference(result)\n\n return result\n\n # Basic comparator\n m = BASIC_CONSTRAINT.match(constraint)\n if m:\n op = m.group(1)\n version = m.group(2)\n\n if version == \"dev\":\n version = \"0.0-dev\"\n\n try:\n version = Version.parse(version)\n except ValueError:\n raise ValueError(\n \"Could not parse version constraint: {}\".format(constraint)\n )\n\n if op == \"<\":\n return VersionRange(max=version)\n elif op == \"<=\":\n return VersionRange(max=version, include_max=True)\n elif op == \">\":\n return VersionRange(min=version)\n elif op == \">=\":\n return VersionRange(min=version, include_min=True)\n elif op == \"!=\":\n return VersionUnion(VersionRange(max=version), VersionRange(min=version))\n else:\n return version\n\n raise ValueError(\"Could not parse version constraint: {}\".format(constraint))\n", "path": "poetry/semver/__init__.py"}]}
2,381
176
gh_patches_debug_57059
rasdani/github-patches
git_diff
gratipay__gratipay.com-4197
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 403 clicking "fix credit card" in email when not logged in My credit card expired and I got the email reminding me to fix payment info. I clicked the "fix credit card" button in the email and was taken to a 403 Forbidden page. Would expect to be taken to login form when I'm not already logged in. Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gratipay/utils/__init__.py` Content: ``` 1 # encoding: utf8 2 3 from __future__ import absolute_import, division, print_function, unicode_literals 4 5 from base64 import urlsafe_b64encode, urlsafe_b64decode 6 from datetime import datetime, timedelta 7 8 from aspen import Response, json 9 from aspen.utils import to_rfc822, utcnow 10 from dependency_injection import resolve_dependencies 11 from postgres.cursors import SimpleCursorBase 12 13 import gratipay 14 15 16 BEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii') 17 18 # Difference between current time and credit card expiring date when 19 # card is considered as expiring 20 EXPIRING_DELTA = timedelta(days = 30) 21 22 23 def dict_to_querystring(mapping): 24 if not mapping: 25 return u'' 26 27 arguments = [] 28 for key, values in mapping.iteritems(): 29 for val in values: 30 arguments.append(u'='.join([key, val])) 31 32 return u'?' + u'&'.join(arguments) 33 34 35 def use_tildes_for_participants(website, request): 36 if request.path.raw.startswith('/~/'): 37 to = '/~' + request.path.raw[3:] 38 if request.qs.raw: 39 to += '?' + request.qs.raw 40 website.redirect(to) 41 elif request.path.raw.startswith('/~'): 42 request.path.__init__('/~/' + request.path.raw[2:]) 43 44 45 def canonicalize(redirect, path, base, canonical, given, arguments=None): 46 if given != canonical: 47 assert canonical.lower() == given.lower() # sanity check 48 remainder = path[len(base + given):] 49 50 if arguments is not None: 51 arguments = dict_to_querystring(arguments) 52 53 newpath = base + canonical + remainder + arguments or '' 54 redirect(newpath) 55 56 57 def get_participant(state, restrict=True, resolve_unclaimed=True): 58 """Given a Request, raise Response or return Participant. 59 60 If restrict is True then we'll restrict access to owners and admins. 61 62 """ 63 redirect = state['website'].redirect 64 request = state['request'] 65 user = state['user'] 66 slug = request.line.uri.path['username'] 67 qs = request.line.uri.querystring 68 _ = state['_'] 69 70 if restrict: 71 if user.ANON: 72 raise Response(403, _("You need to log in to access this page.")) 73 74 from gratipay.models.participant import Participant # avoid circular import 75 participant = Participant.from_username(slug) 76 77 if participant is None: 78 raise Response(404) 79 80 canonicalize(redirect, request.line.uri.path.raw, '/~/', participant.username, slug, qs) 81 82 if participant.is_closed: 83 if user.ADMIN: 84 return participant 85 raise Response(410) 86 87 if participant.claimed_time is None and resolve_unclaimed: 88 to = participant.resolve_unclaimed() 89 if to: 90 # This is a stub account (someone on another platform who hasn't 91 # actually registered with Gratipay yet) 92 redirect(to) 93 else: 94 # This is an archived account (result of take_over) 95 if user.ADMIN: 96 return participant 97 raise Response(404) 98 99 if restrict: 100 if participant != user.participant: 101 if not user.ADMIN: 102 raise Response(403, _("You are not authorized to access this page.")) 103 104 return participant 105 106 107 def get_team(state): 108 """Given a Request, raise Response or return Team. 109 """ 110 redirect = state['website'].redirect 111 request = state['request'] 112 user = state['user'] 113 slug = request.line.uri.path['team'] 114 qs = request.line.uri.querystring 115 116 from gratipay.models.team import Team # avoid circular import 117 team = Team.from_slug(slug) 118 119 if team is None: 120 # Try to redirect to a Participant. 121 from gratipay.models.participant import Participant # avoid circular import 122 participant = Participant.from_username(slug) 123 if participant is not None: 124 qs = '?' + request.qs.raw if request.qs.raw else '' 125 redirect('/~' + request.path.raw[1:] + qs) 126 raise Response(404) 127 128 canonicalize(redirect, request.line.uri.path.raw, '/', team.slug, slug, qs) 129 130 if team.is_closed and not user.ADMIN: 131 raise Response(410) 132 133 return team 134 135 136 def encode_for_querystring(s): 137 """Given a unicode, return a unicode that's safe for transport across a querystring. 138 """ 139 if not isinstance(s, unicode): 140 raise TypeError('unicode required') 141 return urlsafe_b64encode(s.encode('utf8')).replace(b'=', b'~').decode('ascii') 142 143 144 def decode_from_querystring(s, **kw): 145 """Given a unicode computed by encode_for_querystring, return the inverse. 146 147 We raise Response(400) if the input value can't be decoded (i.e., it's not 148 ASCII, not padded properly, or not decodable as UTF-8 once Base64-decoded). 149 150 """ 151 if not isinstance(s, unicode): 152 raise TypeError('unicode required') 153 try: 154 return urlsafe_b64decode(s.encode('ascii').replace(b'~', b'=')).decode('utf8') 155 except: 156 if 'default' in kw: 157 # Enable callers to handle errors without using try/except. 158 return kw['default'] 159 raise Response(400, "invalid input") 160 161 162 def update_cta(website): 163 nusers = website.db.one(""" 164 SELECT nusers FROM paydays 165 ORDER BY ts_end DESC LIMIT 1 166 """, default=0) 167 nreceiving_from = website.db.one(""" 168 SELECT nreceiving_from 169 FROM teams 170 WHERE slug = 'Gratipay' 171 """, default=0) 172 website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0 173 if cur < 10: goal = 20 174 elif cur < 15: goal = 30 175 elif cur < 25: goal = 40 176 elif cur < 35: goal = 50 177 elif cur < 45: goal = 60 178 elif cur < 55: goal = 70 179 elif cur < 65: goal = 80 180 elif cur > 70: goal = None 181 website.support_goal = goal 182 183 184 def _execute(this, sql, params=[]): 185 print(sql.strip(), params) 186 super(SimpleCursorBase, this).execute(sql, params) 187 188 def log_cursor(f): 189 "Prints sql and params to stdout. Works globaly so watch for threaded use." 190 def wrapper(*a, **kw): 191 try: 192 SimpleCursorBase.execute = _execute 193 ret = f(*a, **kw) 194 finally: 195 del SimpleCursorBase.execute 196 return ret 197 return wrapper 198 199 200 def format_money(money): 201 format = '%.2f' if money < 1000 else '%.0f' 202 return format % money 203 204 205 def excerpt_intro(text, length=175, append=u'…'): 206 if not text: 207 return '' 208 if len(text) > length: 209 return text[:length] + append 210 return text 211 212 213 def is_card_expiring(expiration_year, expiration_month): 214 now = datetime.utcnow() 215 expiring_date = datetime(expiration_year, expiration_month, 1) 216 delta = expiring_date - now 217 return delta < EXPIRING_DELTA 218 219 220 def set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'): 221 cookies[key] = value 222 cookie = cookies[key] 223 if expires: 224 if isinstance(expires, timedelta): 225 expires += utcnow() 226 if isinstance(expires, datetime): 227 expires = to_rfc822(expires).encode('ascii') 228 cookie[b'expires'] = expires 229 if httponly: 230 cookie[b'httponly'] = True 231 if path: 232 cookie[b'path'] = path 233 if gratipay.use_secure_cookies: 234 cookie[b'secure'] = True 235 236 237 def erase_cookie(cookies, key, **kw): 238 set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw) 239 240 241 def filter_profile_nav(user, participant, pages): 242 out = [] 243 for foo, bar, show_them, show_others in pages: 244 if (user.participant == participant and show_them) \ 245 or (user.participant != participant and show_others) \ 246 or user.ADMIN: 247 out.append((foo, bar, show_them, show_others)) 248 return out 249 250 251 def to_javascript(obj): 252 """For when you want to inject an object into a <script> tag. 253 """ 254 return json.dumps(obj).replace('</', '<\\/') 255 256 257 class LazyResponse(Response): 258 259 def __init__(self, code, lazy_body, **kw): 260 Response.__init__(self, code, '', **kw) 261 self.lazy_body = lazy_body 262 263 def render_body(self, state): 264 f = self.lazy_body 265 self.body = f(*resolve_dependencies(f, state).as_args) 266 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gratipay/utils/__init__.py b/gratipay/utils/__init__.py --- a/gratipay/utils/__init__.py +++ b/gratipay/utils/__init__.py @@ -69,7 +69,7 @@ if restrict: if user.ANON: - raise Response(403, _("You need to log in to access this page.")) + raise Response(401, _("You need to log in to access this page.")) from gratipay.models.participant import Participant # avoid circular import participant = Participant.from_username(slug)
{"golden_diff": "diff --git a/gratipay/utils/__init__.py b/gratipay/utils/__init__.py\n--- a/gratipay/utils/__init__.py\n+++ b/gratipay/utils/__init__.py\n@@ -69,7 +69,7 @@\n \n if restrict:\n if user.ANON:\n- raise Response(403, _(\"You need to log in to access this page.\"))\n+ raise Response(401, _(\"You need to log in to access this page.\"))\n \n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n", "issue": "403 clicking \"fix credit card\" in email when not logged in\nMy credit card expired and I got the email reminding me to fix payment info. I clicked the \"fix credit card\" button in the email and was taken to a 403 Forbidden page. Would expect to be taken to login form when I'm not already logged in. Thanks!\n\n", "before_files": [{"content": "# encoding: utf8\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nfrom datetime import datetime, timedelta\n\nfrom aspen import Response, json\nfrom aspen.utils import to_rfc822, utcnow\nfrom dependency_injection import resolve_dependencies\nfrom postgres.cursors import SimpleCursorBase\n\nimport gratipay\n\n\nBEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')\n\n# Difference between current time and credit card expiring date when\n# card is considered as expiring\nEXPIRING_DELTA = timedelta(days = 30)\n\n\ndef dict_to_querystring(mapping):\n if not mapping:\n return u''\n\n arguments = []\n for key, values in mapping.iteritems():\n for val in values:\n arguments.append(u'='.join([key, val]))\n\n return u'?' + u'&'.join(arguments)\n\n\ndef use_tildes_for_participants(website, request):\n if request.path.raw.startswith('/~/'):\n to = '/~' + request.path.raw[3:]\n if request.qs.raw:\n to += '?' + request.qs.raw\n website.redirect(to)\n elif request.path.raw.startswith('/~'):\n request.path.__init__('/~/' + request.path.raw[2:])\n\n\ndef canonicalize(redirect, path, base, canonical, given, arguments=None):\n if given != canonical:\n assert canonical.lower() == given.lower() # sanity check\n remainder = path[len(base + given):]\n\n if arguments is not None:\n arguments = dict_to_querystring(arguments)\n\n newpath = base + canonical + remainder + arguments or ''\n redirect(newpath)\n\n\ndef get_participant(state, restrict=True, resolve_unclaimed=True):\n \"\"\"Given a Request, raise Response or return Participant.\n\n If restrict is True then we'll restrict access to owners and admins.\n\n \"\"\"\n redirect = state['website'].redirect\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['username']\n qs = request.line.uri.querystring\n _ = state['_']\n\n if restrict:\n if user.ANON:\n raise Response(403, _(\"You need to log in to access this page.\"))\n\n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n\n if participant is None:\n raise Response(404)\n\n canonicalize(redirect, request.line.uri.path.raw, '/~/', participant.username, slug, qs)\n\n if participant.is_closed:\n if user.ADMIN:\n return participant\n raise Response(410)\n\n if participant.claimed_time is None and resolve_unclaimed:\n to = participant.resolve_unclaimed()\n if to:\n # This is a stub account (someone on another platform who hasn't\n # actually registered with Gratipay yet)\n redirect(to)\n else:\n # This is an archived account (result of take_over)\n if user.ADMIN:\n return participant\n raise Response(404)\n\n if restrict:\n if participant != user.participant:\n if not user.ADMIN:\n raise Response(403, _(\"You are not authorized to access this page.\"))\n\n return participant\n\n\ndef get_team(state):\n \"\"\"Given a Request, raise Response or return Team.\n \"\"\"\n redirect = state['website'].redirect\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['team']\n qs = request.line.uri.querystring\n\n from gratipay.models.team import Team # avoid circular import\n team = Team.from_slug(slug)\n\n if team is None:\n # Try to redirect to a Participant.\n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n if participant is not None:\n qs = '?' + request.qs.raw if request.qs.raw else ''\n redirect('/~' + request.path.raw[1:] + qs)\n raise Response(404)\n\n canonicalize(redirect, request.line.uri.path.raw, '/', team.slug, slug, qs)\n\n if team.is_closed and not user.ADMIN:\n raise Response(410)\n\n return team\n\n\ndef encode_for_querystring(s):\n \"\"\"Given a unicode, return a unicode that's safe for transport across a querystring.\n \"\"\"\n if not isinstance(s, unicode):\n raise TypeError('unicode required')\n return urlsafe_b64encode(s.encode('utf8')).replace(b'=', b'~').decode('ascii')\n\n\ndef decode_from_querystring(s, **kw):\n \"\"\"Given a unicode computed by encode_for_querystring, return the inverse.\n\n We raise Response(400) if the input value can't be decoded (i.e., it's not\n ASCII, not padded properly, or not decodable as UTF-8 once Base64-decoded).\n\n \"\"\"\n if not isinstance(s, unicode):\n raise TypeError('unicode required')\n try:\n return urlsafe_b64decode(s.encode('ascii').replace(b'~', b'=')).decode('utf8')\n except:\n if 'default' in kw:\n # Enable callers to handle errors without using try/except.\n return kw['default']\n raise Response(400, \"invalid input\")\n\n\ndef update_cta(website):\n nusers = website.db.one(\"\"\"\n SELECT nusers FROM paydays\n ORDER BY ts_end DESC LIMIT 1\n \"\"\", default=0)\n nreceiving_from = website.db.one(\"\"\"\n SELECT nreceiving_from\n FROM teams\n WHERE slug = 'Gratipay'\n \"\"\", default=0)\n website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0\n if cur < 10: goal = 20\n elif cur < 15: goal = 30\n elif cur < 25: goal = 40\n elif cur < 35: goal = 50\n elif cur < 45: goal = 60\n elif cur < 55: goal = 70\n elif cur < 65: goal = 80\n elif cur > 70: goal = None\n website.support_goal = goal\n\n\ndef _execute(this, sql, params=[]):\n print(sql.strip(), params)\n super(SimpleCursorBase, this).execute(sql, params)\n\ndef log_cursor(f):\n \"Prints sql and params to stdout. Works globaly so watch for threaded use.\"\n def wrapper(*a, **kw):\n try:\n SimpleCursorBase.execute = _execute\n ret = f(*a, **kw)\n finally:\n del SimpleCursorBase.execute\n return ret\n return wrapper\n\n\ndef format_money(money):\n format = '%.2f' if money < 1000 else '%.0f'\n return format % money\n\n\ndef excerpt_intro(text, length=175, append=u'\u2026'):\n if not text:\n return ''\n if len(text) > length:\n return text[:length] + append\n return text\n\n\ndef is_card_expiring(expiration_year, expiration_month):\n now = datetime.utcnow()\n expiring_date = datetime(expiration_year, expiration_month, 1)\n delta = expiring_date - now\n return delta < EXPIRING_DELTA\n\n\ndef set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):\n cookies[key] = value\n cookie = cookies[key]\n if expires:\n if isinstance(expires, timedelta):\n expires += utcnow()\n if isinstance(expires, datetime):\n expires = to_rfc822(expires).encode('ascii')\n cookie[b'expires'] = expires\n if httponly:\n cookie[b'httponly'] = True\n if path:\n cookie[b'path'] = path\n if gratipay.use_secure_cookies:\n cookie[b'secure'] = True\n\n\ndef erase_cookie(cookies, key, **kw):\n set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)\n\n\ndef filter_profile_nav(user, participant, pages):\n out = []\n for foo, bar, show_them, show_others in pages:\n if (user.participant == participant and show_them) \\\n or (user.participant != participant and show_others) \\\n or user.ADMIN:\n out.append((foo, bar, show_them, show_others))\n return out\n\n\ndef to_javascript(obj):\n \"\"\"For when you want to inject an object into a <script> tag.\n \"\"\"\n return json.dumps(obj).replace('</', '<\\\\/')\n\n\nclass LazyResponse(Response):\n\n def __init__(self, code, lazy_body, **kw):\n Response.__init__(self, code, '', **kw)\n self.lazy_body = lazy_body\n\n def render_body(self, state):\n f = self.lazy_body\n self.body = f(*resolve_dependencies(f, state).as_args)\n", "path": "gratipay/utils/__init__.py"}], "after_files": [{"content": "# encoding: utf8\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nfrom datetime import datetime, timedelta\n\nfrom aspen import Response, json\nfrom aspen.utils import to_rfc822, utcnow\nfrom dependency_injection import resolve_dependencies\nfrom postgres.cursors import SimpleCursorBase\n\nimport gratipay\n\n\nBEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')\n\n# Difference between current time and credit card expiring date when\n# card is considered as expiring\nEXPIRING_DELTA = timedelta(days = 30)\n\n\ndef dict_to_querystring(mapping):\n if not mapping:\n return u''\n\n arguments = []\n for key, values in mapping.iteritems():\n for val in values:\n arguments.append(u'='.join([key, val]))\n\n return u'?' + u'&'.join(arguments)\n\n\ndef use_tildes_for_participants(website, request):\n if request.path.raw.startswith('/~/'):\n to = '/~' + request.path.raw[3:]\n if request.qs.raw:\n to += '?' + request.qs.raw\n website.redirect(to)\n elif request.path.raw.startswith('/~'):\n request.path.__init__('/~/' + request.path.raw[2:])\n\n\ndef canonicalize(redirect, path, base, canonical, given, arguments=None):\n if given != canonical:\n assert canonical.lower() == given.lower() # sanity check\n remainder = path[len(base + given):]\n\n if arguments is not None:\n arguments = dict_to_querystring(arguments)\n\n newpath = base + canonical + remainder + arguments or ''\n redirect(newpath)\n\n\ndef get_participant(state, restrict=True, resolve_unclaimed=True):\n \"\"\"Given a Request, raise Response or return Participant.\n\n If restrict is True then we'll restrict access to owners and admins.\n\n \"\"\"\n redirect = state['website'].redirect\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['username']\n qs = request.line.uri.querystring\n _ = state['_']\n\n if restrict:\n if user.ANON:\n raise Response(401, _(\"You need to log in to access this page.\"))\n\n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n\n if participant is None:\n raise Response(404)\n\n canonicalize(redirect, request.line.uri.path.raw, '/~/', participant.username, slug, qs)\n\n if participant.is_closed:\n if user.ADMIN:\n return participant\n raise Response(410)\n\n if participant.claimed_time is None and resolve_unclaimed:\n to = participant.resolve_unclaimed()\n if to:\n # This is a stub account (someone on another platform who hasn't\n # actually registered with Gratipay yet)\n redirect(to)\n else:\n # This is an archived account (result of take_over)\n if user.ADMIN:\n return participant\n raise Response(404)\n\n if restrict:\n if participant != user.participant:\n if not user.ADMIN:\n raise Response(403, _(\"You are not authorized to access this page.\"))\n\n return participant\n\n\ndef get_team(state):\n \"\"\"Given a Request, raise Response or return Team.\n \"\"\"\n redirect = state['website'].redirect\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['team']\n qs = request.line.uri.querystring\n\n from gratipay.models.team import Team # avoid circular import\n team = Team.from_slug(slug)\n\n if team is None:\n # Try to redirect to a Participant.\n from gratipay.models.participant import Participant # avoid circular import\n participant = Participant.from_username(slug)\n if participant is not None:\n qs = '?' + request.qs.raw if request.qs.raw else ''\n redirect('/~' + request.path.raw[1:] + qs)\n raise Response(404)\n\n canonicalize(redirect, request.line.uri.path.raw, '/', team.slug, slug, qs)\n\n if team.is_closed and not user.ADMIN:\n raise Response(410)\n\n return team\n\n\ndef encode_for_querystring(s):\n \"\"\"Given a unicode, return a unicode that's safe for transport across a querystring.\n \"\"\"\n if not isinstance(s, unicode):\n raise TypeError('unicode required')\n return urlsafe_b64encode(s.encode('utf8')).replace(b'=', b'~').decode('ascii')\n\n\ndef decode_from_querystring(s, **kw):\n \"\"\"Given a unicode computed by encode_for_querystring, return the inverse.\n\n We raise Response(400) if the input value can't be decoded (i.e., it's not\n ASCII, not padded properly, or not decodable as UTF-8 once Base64-decoded).\n\n \"\"\"\n if not isinstance(s, unicode):\n raise TypeError('unicode required')\n try:\n return urlsafe_b64decode(s.encode('ascii').replace(b'~', b'=')).decode('utf8')\n except:\n if 'default' in kw:\n # Enable callers to handle errors without using try/except.\n return kw['default']\n raise Response(400, \"invalid input\")\n\n\ndef update_cta(website):\n nusers = website.db.one(\"\"\"\n SELECT nusers FROM paydays\n ORDER BY ts_end DESC LIMIT 1\n \"\"\", default=0)\n nreceiving_from = website.db.one(\"\"\"\n SELECT nreceiving_from\n FROM teams\n WHERE slug = 'Gratipay'\n \"\"\", default=0)\n website.support_current = cur = int(round(nreceiving_from / nusers * 100)) if nusers else 0\n if cur < 10: goal = 20\n elif cur < 15: goal = 30\n elif cur < 25: goal = 40\n elif cur < 35: goal = 50\n elif cur < 45: goal = 60\n elif cur < 55: goal = 70\n elif cur < 65: goal = 80\n elif cur > 70: goal = None\n website.support_goal = goal\n\n\ndef _execute(this, sql, params=[]):\n print(sql.strip(), params)\n super(SimpleCursorBase, this).execute(sql, params)\n\ndef log_cursor(f):\n \"Prints sql and params to stdout. Works globaly so watch for threaded use.\"\n def wrapper(*a, **kw):\n try:\n SimpleCursorBase.execute = _execute\n ret = f(*a, **kw)\n finally:\n del SimpleCursorBase.execute\n return ret\n return wrapper\n\n\ndef format_money(money):\n format = '%.2f' if money < 1000 else '%.0f'\n return format % money\n\n\ndef excerpt_intro(text, length=175, append=u'\u2026'):\n if not text:\n return ''\n if len(text) > length:\n return text[:length] + append\n return text\n\n\ndef is_card_expiring(expiration_year, expiration_month):\n now = datetime.utcnow()\n expiring_date = datetime(expiration_year, expiration_month, 1)\n delta = expiring_date - now\n return delta < EXPIRING_DELTA\n\n\ndef set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):\n cookies[key] = value\n cookie = cookies[key]\n if expires:\n if isinstance(expires, timedelta):\n expires += utcnow()\n if isinstance(expires, datetime):\n expires = to_rfc822(expires).encode('ascii')\n cookie[b'expires'] = expires\n if httponly:\n cookie[b'httponly'] = True\n if path:\n cookie[b'path'] = path\n if gratipay.use_secure_cookies:\n cookie[b'secure'] = True\n\n\ndef erase_cookie(cookies, key, **kw):\n set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)\n\n\ndef filter_profile_nav(user, participant, pages):\n out = []\n for foo, bar, show_them, show_others in pages:\n if (user.participant == participant and show_them) \\\n or (user.participant != participant and show_others) \\\n or user.ADMIN:\n out.append((foo, bar, show_them, show_others))\n return out\n\n\ndef to_javascript(obj):\n \"\"\"For when you want to inject an object into a <script> tag.\n \"\"\"\n return json.dumps(obj).replace('</', '<\\\\/')\n\n\nclass LazyResponse(Response):\n\n def __init__(self, code, lazy_body, **kw):\n Response.__init__(self, code, '', **kw)\n self.lazy_body = lazy_body\n\n def render_body(self, state):\n f = self.lazy_body\n self.body = f(*resolve_dependencies(f, state).as_args)\n", "path": "gratipay/utils/__init__.py"}]}
3,057
133
gh_patches_debug_6612
rasdani/github-patches
git_diff
zestedesavoir__zds-site-5187
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Mauvais format (taille d'image) avec la TwitterCard Le problème : https://twitter.com/A312_zds/status/1027577197987606528 L'image est flou car 60x60 :( HTML : <meta property="twitter:image" content="http://zestedesavoir.com/media/galleries/5313/e35c638c-b58c-4ed7-851b-03f0c21cef2c.png.60x60_q95_crop.jpg"> Le format de la TwitterCard devrait être au minimum : 96x96 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zds/settings/abstract_base/zds.py` Content: ``` 1 from os.path import join 2 from pathlib import Path 3 4 from django.utils.translation import gettext_lazy as _ 5 6 from .config import config 7 from .base_dir import BASE_DIR 8 9 10 zds_config = config.get('zds', {}) 11 12 13 GEOIP_PATH = join(BASE_DIR, 'geodata') 14 15 16 ES_ENABLED = True 17 18 ES_CONNECTIONS = { 19 'default': { 20 'hosts': ['localhost:9200'], 21 } 22 } 23 24 ES_SEARCH_INDEX = { 25 'name': 'zds_search', 26 'shards': 3, 27 'replicas': 0, 28 } 29 30 31 # Anonymous [Dis]Likes. Authors of [dis]likes before those pk will never be shown 32 VOTES_ID_LIMIT = zds_config.get('VOTES_ID_LIMIT', 0) 33 34 35 THUMBNAIL_ALIASES = { 36 '': { 37 'avatar': {'size': (60, 60), 'crop': True}, 38 'avatar_mini': {'size': (24, 24), 'crop': True}, 39 'tutorial_illu': {'size': (60, 60), 'crop': True}, 40 'article_illu': {'size': (60, 60), 'crop': True}, 41 'content_thumb': {'size': (96, 96), 'crop': True}, 42 'help_illu': {'size': (48, 48), 'crop': True}, 43 'help_mini_illu': {'size': (26, 26), 'crop': True}, 44 'gallery': {'size': (120, 120), 'crop': True}, 45 'featured': {'size': (228, 228), 'crop': True}, 46 'gallery_illu': {'size': (480, 270), 'crop': True}, 47 'content': {'size': (960, 960), 'crop': False}, 48 }, 49 } 50 51 52 ZDS_APP = { 53 'site': { 54 'name': 'ZesteDeSavoir', 55 'literal_name': 'Zeste de Savoir', 56 'slogan': 'Zeste de Savoir, la connaissance pour tous et sans pépins', 57 'abbr': 'zds', 58 'url': 'https://zestedesavoir.com', 59 'dns': 'zestedesavoir.com', 60 'email_contact': '[email protected]', 61 'email_noreply': '[email protected]', 62 'forum_feedback_users': '/forums/communaute/bug-suggestions/', 63 'contribute_link': 'https://docs.zestedesavoir.com/contributing.html', 64 'short_description': '', 65 'long_description': 'Zeste de Savoir est un site de partage de connaissances ' 66 'sur lequel vous trouverez des tutoriels de tous niveaux, ' 67 'des articles et des forums d’entraide animés par et pour ' 68 'la communauté.', 69 'googleAnalyticsID': zds_config.get('google_analytics_id'), 70 'googleTagManagerID': zds_config.get('google_tag_manager_id'), 71 'association': { 72 'name': 'Zeste de Savoir', 73 'fee': zds_config.get('association_fee', '20 €'), 74 'email': '[email protected]', 75 'email_ca': '[email protected]', 76 'forum_ca_pk': 25 77 }, 78 'repository': { 79 'url': 'https://github.com/zestedesavoir/zds-site', 80 'bugtracker': 'https://github.com/zestedesavoir/zds-site/issues', 81 'api': 'https://api.github.com/repos/zestedesavoir/zds-site', 82 'tags': [ 83 'C-Back', 'C-Front', 'C-API', 'C-Documentation', 'C-Infra', 'S-Bug', 'S-Régression', 84 'S-Évolution' 85 ] 86 }, 87 'licenses': { 88 'logo': { 89 'code': 'CC-BY', 90 'title': 'Creative Commons License', 91 'description': 'Licence Creative Commons Attribution - Pas d’Utilisation Commerciale - ' 92 'Partage dans les Mêmes Conditions 4.0 International.', 93 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png', 94 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/', 95 'author': 'MaxRoyo' 96 }, 97 'cookies': { 98 'code': 'CC-BY', 99 'title': 'Licence Creative Commons', 100 'description': 'licence Creative Commons Attribution 4.0 International', 101 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png', 102 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/' 103 }, 104 'source': { 105 'code': 'GPL v3', 106 'url_license': 'http://www.gnu.org/licenses/gpl-3.0.html', 107 'provider_name': 'Progdupeupl', 108 'provider_url': 'http://pdp.microjoe.org', 109 }, 110 'licence_info_title': 'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic' 111 'ences-sur-zeste-de-savoir/', 112 'licence_info_link': 'Le droit d\'auteur, Creative Commons et les licences sur Zeste de Savoir' 113 }, 114 'hosting': { 115 'name': 'GANDI SAS', 116 'address': '63-65 boulevard Massena - 75013 Paris - France' 117 }, 118 'social': { 119 'facebook': 'https://www.facebook.com/ZesteDeSavoir', 120 'twitter': 'https://twitter.com/ZesteDeSavoir', 121 'googleplus': 'https://plus.google.com/u/0/107033688356682807298' 122 }, 123 'cnil': '1771020', 124 }, 125 'member': { 126 'bot_account': zds_config.get('member_bot_username', 'admin'), 127 'anonymous_account': zds_config.get('member_anonymous_username', 'anonymous'), 128 'external_account': zds_config.get('member_external_username', 'external'), 129 'bot_group': 'bot', 130 'dev_group': 'devs', 131 'members_per_page': 100, 132 'providers_per_page': 100, 133 'hats_per_page': 50, 134 'users_in_hats_list': 5, 135 'requested_hats_per_page': 100, 136 'update_last_visit_interval': 600, # seconds 137 'old_smileys_allowed': False, 138 'old_smileys_cookie_key': 'use_old_smileys', 139 }, 140 'hats': { 141 'moderation': 'Staff', 142 'validation': 'Staff', 143 'hats_management': 'Staff', 144 }, 145 'gallery': { 146 'image_max_size': 1024 * 1024, 147 'gallery_per_page': 21, 148 'images_per_page': 21, 149 }, 150 'tutorial': { 151 'home_number': 4, 152 }, 153 'article': { 154 'home_number': 3 155 }, 156 'opinions': { 157 'home_number': 5 158 }, 159 'content': { 160 'repo_private_path': join(BASE_DIR, 'contents-private'), 161 'repo_public_path': join(BASE_DIR, 'contents-public'), 162 'extra_contents_dirname': 'extra_contents', 163 # can also be 'extra_content_generation_policy': 'WATCHDOG' 164 # or 'extra_content_generation_policy': 'NOTHING' 165 'extra_content_generation_policy': 'SYNC', 166 'extra_content_watchdog_dir': join(BASE_DIR, 'watchdog-build'), 167 'max_tree_depth': 3, 168 'default_licence_pk': 7, 169 'content_per_page': 42, 170 'max_last_publications_level_1': 6, 171 'max_last_publications_level_2': 12, 172 'max_last_publications_level_3': 12, 173 'notes_per_page': 25, 174 'helps_per_page': 20, 175 'commits_per_page': 20, 176 'feed_length': 5, 177 'user_page_number': 5, 178 'default_image': join(BASE_DIR, 'fixtures', 'noir_black.png'), 179 'import_image_prefix': 'archive', 180 'build_pdf_when_published': True, 181 'maximum_slug_size': 150, 182 'characters_per_minute': 1500, 183 'editorial_line_link': 184 'https://zestedesavoir.com/articles/222/la-ligne-editoriale-officielle-de-zeste-de-savoir/', 185 'epub_stylesheets': { 186 'toc': Path('toc.css'), 187 'full': Path(BASE_DIR) / 'dist' / 'css' / 'zmd.css', 188 'katex': Path(BASE_DIR) / 'dist' / 'css' / 'katex.min.css' 189 }, 190 'latex_template_repo': 'NOT_EXISTING_DIR' 191 }, 192 'forum': { 193 'posts_per_page': 21, 194 'topics_per_page': 21, 195 'spam_limit_seconds': 60 * 15, 196 'spam_limit_participant': 2, 197 'beta_forum_id': zds_config.get('publications_being_written_forum_id', 1), 198 'max_post_length': 1000000, 199 'top_tag_max': 5, 200 'home_number': 5, 201 'old_post_limit_days': 90, 202 # Exclude tags from top tags list. Tags listed here should not be relevant for most of users. 203 # Be warned exclude too much tags can restrict performance 204 'top_tag_exclu': ['bug', 'suggestion', 'tutoriel', 'beta', 'article'] 205 }, 206 'topic': { 207 'home_number': 5, 208 }, 209 'comment': { 210 'max_pings': 15, 211 # allow to mention (and notify) members in messages 212 'enable_pings': True, 213 }, 214 'featured_resource': { 215 'featured_per_page': 100, 216 'home_number': 5, 217 }, 218 'notification': { 219 'per_page': 50, 220 }, 221 'paginator': { 222 'folding_limit': 4 223 }, 224 'search': { 225 'mark_keywords': ['javafx', 'haskell', 'groovy', 'powershell', 'latex', 'linux', 'windows'], 226 'results_per_page': 20, 227 'search_groups': { 228 'content': ( 229 _('Contenus publiés'), ['publishedcontent', 'chapter'] 230 ), 231 'topic': ( 232 _('Sujets du forum'), ['topic'] 233 ), 234 'post': ( 235 _('Messages du forum'), ['post'] 236 ), 237 }, 238 'boosts': { 239 'publishedcontent': { 240 'global': 3.0, 241 'if_article': 1.0, 242 'if_tutorial': 1.0, 243 'if_medium_or_big_tutorial': 1.5, 244 'if_opinion': 0.66, 245 'if_opinion_not_picked': 0.5 246 }, 247 'topic': { 248 'global': 2.0, 249 'if_solved': 1.1, 250 'if_sticky': 1.2, 251 'if_locked': 0.1, 252 }, 253 'chapter': { 254 'global': 1.5, 255 }, 256 'post': { 257 'global': 1.0, 258 'if_first': 1.2, 259 'if_useful': 1.5, 260 'ld_ratio_above_1': 1.05, 261 'ld_ratio_below_1': 0.95, 262 } 263 } 264 }, 265 'visual_changes': [], 266 'display_search_bar': True, 267 'zmd': { 268 'server': 'http://127.0.0.1:27272', 269 'disable_pings': False 270 }, 271 'stats_ga_viewid': 'ga:86962671' 272 } 273 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zds/settings/abstract_base/zds.py b/zds/settings/abstract_base/zds.py --- a/zds/settings/abstract_base/zds.py +++ b/zds/settings/abstract_base/zds.py @@ -45,6 +45,9 @@ 'featured': {'size': (228, 228), 'crop': True}, 'gallery_illu': {'size': (480, 270), 'crop': True}, 'content': {'size': (960, 960), 'crop': False}, + 'social_network': {'size': (144, 144), 'crop': True}, + # ^^^ ^^^ -> minimum dimensions of 144x144 + # https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary }, }
{"golden_diff": "diff --git a/zds/settings/abstract_base/zds.py b/zds/settings/abstract_base/zds.py\n--- a/zds/settings/abstract_base/zds.py\n+++ b/zds/settings/abstract_base/zds.py\n@@ -45,6 +45,9 @@\n 'featured': {'size': (228, 228), 'crop': True},\n 'gallery_illu': {'size': (480, 270), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n+ 'social_network': {'size': (144, 144), 'crop': True},\n+ # ^^^ ^^^ -> minimum dimensions of 144x144\n+ # https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary\n },\n }\n", "issue": "Mauvais format (taille d'image) avec la TwitterCard\nLe probl\u00e8me : \r\n\r\nhttps://twitter.com/A312_zds/status/1027577197987606528\r\n\r\nL'image est flou car 60x60 :(\r\n\r\n\r\nHTML :\r\n\r\n <meta property=\"twitter:image\" content=\"http://zestedesavoir.com/media/galleries/5313/e35c638c-b58c-4ed7-851b-03f0c21cef2c.png.60x60_q95_crop.jpg\">\r\n\r\nLe format de la TwitterCard devrait \u00eatre au minimum : 96x96 \n", "before_files": [{"content": "from os.path import join\nfrom pathlib import Path\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .config import config\nfrom .base_dir import BASE_DIR\n\n\nzds_config = config.get('zds', {})\n\n\nGEOIP_PATH = join(BASE_DIR, 'geodata')\n\n\nES_ENABLED = True\n\nES_CONNECTIONS = {\n 'default': {\n 'hosts': ['localhost:9200'],\n }\n}\n\nES_SEARCH_INDEX = {\n 'name': 'zds_search',\n 'shards': 3,\n 'replicas': 0,\n}\n\n\n# Anonymous [Dis]Likes. Authors of [dis]likes before those pk will never be shown\nVOTES_ID_LIMIT = zds_config.get('VOTES_ID_LIMIT', 0)\n\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n 'avatar_mini': {'size': (24, 24), 'crop': True},\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\n 'article_illu': {'size': (60, 60), 'crop': True},\n 'content_thumb': {'size': (96, 96), 'crop': True},\n 'help_illu': {'size': (48, 48), 'crop': True},\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\n 'gallery': {'size': (120, 120), 'crop': True},\n 'featured': {'size': (228, 228), 'crop': True},\n 'gallery_illu': {'size': (480, 270), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n },\n}\n\n\nZDS_APP = {\n 'site': {\n 'name': 'ZesteDeSavoir',\n 'literal_name': 'Zeste de Savoir',\n 'slogan': 'Zeste de Savoir, la connaissance pour tous et sans p\u00e9pins',\n 'abbr': 'zds',\n 'url': 'https://zestedesavoir.com',\n 'dns': 'zestedesavoir.com',\n 'email_contact': '[email protected]',\n 'email_noreply': '[email protected]',\n 'forum_feedback_users': '/forums/communaute/bug-suggestions/',\n 'contribute_link': 'https://docs.zestedesavoir.com/contributing.html',\n 'short_description': '',\n 'long_description': 'Zeste de Savoir est un site de partage de connaissances '\n 'sur lequel vous trouverez des tutoriels de tous niveaux, '\n 'des articles et des forums d\u2019entraide anim\u00e9s par et pour '\n 'la communaut\u00e9.',\n 'googleAnalyticsID': zds_config.get('google_analytics_id'),\n 'googleTagManagerID': zds_config.get('google_tag_manager_id'),\n 'association': {\n 'name': 'Zeste de Savoir',\n 'fee': zds_config.get('association_fee', '20\u00a0\u20ac'),\n 'email': '[email protected]',\n 'email_ca': '[email protected]',\n 'forum_ca_pk': 25\n },\n 'repository': {\n 'url': 'https://github.com/zestedesavoir/zds-site',\n 'bugtracker': 'https://github.com/zestedesavoir/zds-site/issues',\n 'api': 'https://api.github.com/repos/zestedesavoir/zds-site',\n 'tags': [\n 'C-Back', 'C-Front', 'C-API', 'C-Documentation', 'C-Infra', 'S-Bug', 'S-R\u00e9gression',\n 'S-\u00c9volution'\n ]\n },\n 'licenses': {\n 'logo': {\n 'code': 'CC-BY',\n 'title': 'Creative Commons License',\n 'description': 'Licence Creative Commons Attribution - Pas d\u2019Utilisation Commerciale - '\n 'Partage dans les M\u00eames Conditions 4.0 International.',\n 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',\n 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/',\n 'author': 'MaxRoyo'\n },\n 'cookies': {\n 'code': 'CC-BY',\n 'title': 'Licence Creative Commons',\n 'description': 'licence Creative Commons Attribution 4.0 International',\n 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',\n 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/'\n },\n 'source': {\n 'code': 'GPL v3',\n 'url_license': 'http://www.gnu.org/licenses/gpl-3.0.html',\n 'provider_name': 'Progdupeupl',\n 'provider_url': 'http://pdp.microjoe.org',\n },\n 'licence_info_title': 'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\n 'ences-sur-zeste-de-savoir/',\n 'licence_info_link': 'Le droit d\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\n },\n 'hosting': {\n 'name': 'GANDI SAS',\n 'address': '63-65 boulevard Massena - 75013 Paris - France'\n },\n 'social': {\n 'facebook': 'https://www.facebook.com/ZesteDeSavoir',\n 'twitter': 'https://twitter.com/ZesteDeSavoir',\n 'googleplus': 'https://plus.google.com/u/0/107033688356682807298'\n },\n 'cnil': '1771020',\n },\n 'member': {\n 'bot_account': zds_config.get('member_bot_username', 'admin'),\n 'anonymous_account': zds_config.get('member_anonymous_username', 'anonymous'),\n 'external_account': zds_config.get('member_external_username', 'external'),\n 'bot_group': 'bot',\n 'dev_group': 'devs',\n 'members_per_page': 100,\n 'providers_per_page': 100,\n 'hats_per_page': 50,\n 'users_in_hats_list': 5,\n 'requested_hats_per_page': 100,\n 'update_last_visit_interval': 600, # seconds\n 'old_smileys_allowed': False,\n 'old_smileys_cookie_key': 'use_old_smileys',\n },\n 'hats': {\n 'moderation': 'Staff',\n 'validation': 'Staff',\n 'hats_management': 'Staff',\n },\n 'gallery': {\n 'image_max_size': 1024 * 1024,\n 'gallery_per_page': 21,\n 'images_per_page': 21,\n },\n 'tutorial': {\n 'home_number': 4,\n },\n 'article': {\n 'home_number': 3\n },\n 'opinions': {\n 'home_number': 5\n },\n 'content': {\n 'repo_private_path': join(BASE_DIR, 'contents-private'),\n 'repo_public_path': join(BASE_DIR, 'contents-public'),\n 'extra_contents_dirname': 'extra_contents',\n # can also be 'extra_content_generation_policy': 'WATCHDOG'\n # or 'extra_content_generation_policy': 'NOTHING'\n 'extra_content_generation_policy': 'SYNC',\n 'extra_content_watchdog_dir': join(BASE_DIR, 'watchdog-build'),\n 'max_tree_depth': 3,\n 'default_licence_pk': 7,\n 'content_per_page': 42,\n 'max_last_publications_level_1': 6,\n 'max_last_publications_level_2': 12,\n 'max_last_publications_level_3': 12,\n 'notes_per_page': 25,\n 'helps_per_page': 20,\n 'commits_per_page': 20,\n 'feed_length': 5,\n 'user_page_number': 5,\n 'default_image': join(BASE_DIR, 'fixtures', 'noir_black.png'),\n 'import_image_prefix': 'archive',\n 'build_pdf_when_published': True,\n 'maximum_slug_size': 150,\n 'characters_per_minute': 1500,\n 'editorial_line_link':\n 'https://zestedesavoir.com/articles/222/la-ligne-editoriale-officielle-de-zeste-de-savoir/',\n 'epub_stylesheets': {\n 'toc': Path('toc.css'),\n 'full': Path(BASE_DIR) / 'dist' / 'css' / 'zmd.css',\n 'katex': Path(BASE_DIR) / 'dist' / 'css' / 'katex.min.css'\n },\n 'latex_template_repo': 'NOT_EXISTING_DIR'\n },\n 'forum': {\n 'posts_per_page': 21,\n 'topics_per_page': 21,\n 'spam_limit_seconds': 60 * 15,\n 'spam_limit_participant': 2,\n 'beta_forum_id': zds_config.get('publications_being_written_forum_id', 1),\n 'max_post_length': 1000000,\n 'top_tag_max': 5,\n 'home_number': 5,\n 'old_post_limit_days': 90,\n # Exclude tags from top tags list. Tags listed here should not be relevant for most of users.\n # Be warned exclude too much tags can restrict performance\n 'top_tag_exclu': ['bug', 'suggestion', 'tutoriel', 'beta', 'article']\n },\n 'topic': {\n 'home_number': 5,\n },\n 'comment': {\n 'max_pings': 15,\n # allow to mention (and notify) members in messages\n 'enable_pings': True,\n },\n 'featured_resource': {\n 'featured_per_page': 100,\n 'home_number': 5,\n },\n 'notification': {\n 'per_page': 50,\n },\n 'paginator': {\n 'folding_limit': 4\n },\n 'search': {\n 'mark_keywords': ['javafx', 'haskell', 'groovy', 'powershell', 'latex', 'linux', 'windows'],\n 'results_per_page': 20,\n 'search_groups': {\n 'content': (\n _('Contenus publi\u00e9s'), ['publishedcontent', 'chapter']\n ),\n 'topic': (\n _('Sujets du forum'), ['topic']\n ),\n 'post': (\n _('Messages du forum'), ['post']\n ),\n },\n 'boosts': {\n 'publishedcontent': {\n 'global': 3.0,\n 'if_article': 1.0,\n 'if_tutorial': 1.0,\n 'if_medium_or_big_tutorial': 1.5,\n 'if_opinion': 0.66,\n 'if_opinion_not_picked': 0.5\n },\n 'topic': {\n 'global': 2.0,\n 'if_solved': 1.1,\n 'if_sticky': 1.2,\n 'if_locked': 0.1,\n },\n 'chapter': {\n 'global': 1.5,\n },\n 'post': {\n 'global': 1.0,\n 'if_first': 1.2,\n 'if_useful': 1.5,\n 'ld_ratio_above_1': 1.05,\n 'ld_ratio_below_1': 0.95,\n }\n }\n },\n 'visual_changes': [],\n 'display_search_bar': True,\n 'zmd': {\n 'server': 'http://127.0.0.1:27272',\n 'disable_pings': False\n },\n 'stats_ga_viewid': 'ga:86962671'\n}\n", "path": "zds/settings/abstract_base/zds.py"}], "after_files": [{"content": "from os.path import join\nfrom pathlib import Path\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .config import config\nfrom .base_dir import BASE_DIR\n\n\nzds_config = config.get('zds', {})\n\n\nGEOIP_PATH = join(BASE_DIR, 'geodata')\n\n\nES_ENABLED = True\n\nES_CONNECTIONS = {\n 'default': {\n 'hosts': ['localhost:9200'],\n }\n}\n\nES_SEARCH_INDEX = {\n 'name': 'zds_search',\n 'shards': 3,\n 'replicas': 0,\n}\n\n\n# Anonymous [Dis]Likes. Authors of [dis]likes before those pk will never be shown\nVOTES_ID_LIMIT = zds_config.get('VOTES_ID_LIMIT', 0)\n\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n 'avatar_mini': {'size': (24, 24), 'crop': True},\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\n 'article_illu': {'size': (60, 60), 'crop': True},\n 'content_thumb': {'size': (96, 96), 'crop': True},\n 'help_illu': {'size': (48, 48), 'crop': True},\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\n 'gallery': {'size': (120, 120), 'crop': True},\n 'featured': {'size': (228, 228), 'crop': True},\n 'gallery_illu': {'size': (480, 270), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n 'social_network': {'size': (144, 144), 'crop': True},\n # ^^^ ^^^ -> minimum dimensions of 144x144\n # https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary\n },\n}\n\n\nZDS_APP = {\n 'site': {\n 'name': 'ZesteDeSavoir',\n 'literal_name': 'Zeste de Savoir',\n 'slogan': 'Zeste de Savoir, la connaissance pour tous et sans p\u00e9pins',\n 'abbr': 'zds',\n 'url': 'https://zestedesavoir.com',\n 'dns': 'zestedesavoir.com',\n 'email_contact': '[email protected]',\n 'email_noreply': '[email protected]',\n 'forum_feedback_users': '/forums/communaute/bug-suggestions/',\n 'contribute_link': 'https://docs.zestedesavoir.com/contributing.html',\n 'short_description': '',\n 'long_description': 'Zeste de Savoir est un site de partage de connaissances '\n 'sur lequel vous trouverez des tutoriels de tous niveaux, '\n 'des articles et des forums d\u2019entraide anim\u00e9s par et pour '\n 'la communaut\u00e9.',\n 'googleAnalyticsID': zds_config.get('google_analytics_id'),\n 'googleTagManagerID': zds_config.get('google_tag_manager_id'),\n 'association': {\n 'name': 'Zeste de Savoir',\n 'fee': zds_config.get('association_fee', '20\u00a0\u20ac'),\n 'email': '[email protected]',\n 'email_ca': '[email protected]',\n 'forum_ca_pk': 25\n },\n 'repository': {\n 'url': 'https://github.com/zestedesavoir/zds-site',\n 'bugtracker': 'https://github.com/zestedesavoir/zds-site/issues',\n 'api': 'https://api.github.com/repos/zestedesavoir/zds-site',\n 'tags': [\n 'C-Back', 'C-Front', 'C-API', 'C-Documentation', 'C-Infra', 'S-Bug', 'S-R\u00e9gression',\n 'S-\u00c9volution'\n ]\n },\n 'licenses': {\n 'logo': {\n 'code': 'CC-BY',\n 'title': 'Creative Commons License',\n 'description': 'Licence Creative Commons Attribution - Pas d\u2019Utilisation Commerciale - '\n 'Partage dans les M\u00eames Conditions 4.0 International.',\n 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',\n 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/',\n 'author': 'MaxRoyo'\n },\n 'cookies': {\n 'code': 'CC-BY',\n 'title': 'Licence Creative Commons',\n 'description': 'licence Creative Commons Attribution 4.0 International',\n 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',\n 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/'\n },\n 'source': {\n 'code': 'GPL v3',\n 'url_license': 'http://www.gnu.org/licenses/gpl-3.0.html',\n 'provider_name': 'Progdupeupl',\n 'provider_url': 'http://pdp.microjoe.org',\n },\n 'licence_info_title': 'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\n 'ences-sur-zeste-de-savoir/',\n 'licence_info_link': 'Le droit d\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\n },\n 'hosting': {\n 'name': 'GANDI SAS',\n 'address': '63-65 boulevard Massena - 75013 Paris - France'\n },\n 'social': {\n 'facebook': 'https://www.facebook.com/ZesteDeSavoir',\n 'twitter': 'https://twitter.com/ZesteDeSavoir',\n 'googleplus': 'https://plus.google.com/u/0/107033688356682807298'\n },\n 'cnil': '1771020',\n },\n 'member': {\n 'bot_account': zds_config.get('member_bot_username', 'admin'),\n 'anonymous_account': zds_config.get('member_anonymous_username', 'anonymous'),\n 'external_account': zds_config.get('member_external_username', 'external'),\n 'bot_group': 'bot',\n 'dev_group': 'devs',\n 'members_per_page': 100,\n 'providers_per_page': 100,\n 'hats_per_page': 50,\n 'users_in_hats_list': 5,\n 'requested_hats_per_page': 100,\n 'update_last_visit_interval': 600, # seconds\n 'old_smileys_allowed': False,\n 'old_smileys_cookie_key': 'use_old_smileys',\n },\n 'hats': {\n 'moderation': 'Staff',\n 'validation': 'Staff',\n 'hats_management': 'Staff',\n },\n 'gallery': {\n 'image_max_size': 1024 * 1024,\n 'gallery_per_page': 21,\n 'images_per_page': 21,\n },\n 'tutorial': {\n 'home_number': 4,\n },\n 'article': {\n 'home_number': 3\n },\n 'opinions': {\n 'home_number': 5\n },\n 'content': {\n 'repo_private_path': join(BASE_DIR, 'contents-private'),\n 'repo_public_path': join(BASE_DIR, 'contents-public'),\n 'extra_contents_dirname': 'extra_contents',\n # can also be 'extra_content_generation_policy': 'WATCHDOG'\n # or 'extra_content_generation_policy': 'NOTHING'\n 'extra_content_generation_policy': 'SYNC',\n 'extra_content_watchdog_dir': join(BASE_DIR, 'watchdog-build'),\n 'max_tree_depth': 3,\n 'default_licence_pk': 7,\n 'content_per_page': 42,\n 'max_last_publications_level_1': 6,\n 'max_last_publications_level_2': 12,\n 'max_last_publications_level_3': 12,\n 'notes_per_page': 25,\n 'helps_per_page': 20,\n 'commits_per_page': 20,\n 'feed_length': 5,\n 'user_page_number': 5,\n 'default_image': join(BASE_DIR, 'fixtures', 'noir_black.png'),\n 'import_image_prefix': 'archive',\n 'build_pdf_when_published': True,\n 'maximum_slug_size': 150,\n 'characters_per_minute': 1500,\n 'editorial_line_link':\n 'https://zestedesavoir.com/articles/222/la-ligne-editoriale-officielle-de-zeste-de-savoir/',\n 'epub_stylesheets': {\n 'toc': Path('toc.css'),\n 'full': Path(BASE_DIR) / 'dist' / 'css' / 'zmd.css',\n 'katex': Path(BASE_DIR) / 'dist' / 'css' / 'katex.min.css'\n },\n 'latex_template_repo': 'NOT_EXISTING_DIR'\n },\n 'forum': {\n 'posts_per_page': 21,\n 'topics_per_page': 21,\n 'spam_limit_seconds': 60 * 15,\n 'spam_limit_participant': 2,\n 'beta_forum_id': zds_config.get('publications_being_written_forum_id', 1),\n 'max_post_length': 1000000,\n 'top_tag_max': 5,\n 'home_number': 5,\n 'old_post_limit_days': 90,\n # Exclude tags from top tags list. Tags listed here should not be relevant for most of users.\n # Be warned exclude too much tags can restrict performance\n 'top_tag_exclu': ['bug', 'suggestion', 'tutoriel', 'beta', 'article']\n },\n 'topic': {\n 'home_number': 5,\n },\n 'comment': {\n 'max_pings': 15,\n # allow to mention (and notify) members in messages\n 'enable_pings': True,\n },\n 'featured_resource': {\n 'featured_per_page': 100,\n 'home_number': 5,\n },\n 'notification': {\n 'per_page': 50,\n },\n 'paginator': {\n 'folding_limit': 4\n },\n 'search': {\n 'mark_keywords': ['javafx', 'haskell', 'groovy', 'powershell', 'latex', 'linux', 'windows'],\n 'results_per_page': 20,\n 'search_groups': {\n 'content': (\n _('Contenus publi\u00e9s'), ['publishedcontent', 'chapter']\n ),\n 'topic': (\n _('Sujets du forum'), ['topic']\n ),\n 'post': (\n _('Messages du forum'), ['post']\n ),\n },\n 'boosts': {\n 'publishedcontent': {\n 'global': 3.0,\n 'if_article': 1.0,\n 'if_tutorial': 1.0,\n 'if_medium_or_big_tutorial': 1.5,\n 'if_opinion': 0.66,\n 'if_opinion_not_picked': 0.5\n },\n 'topic': {\n 'global': 2.0,\n 'if_solved': 1.1,\n 'if_sticky': 1.2,\n 'if_locked': 0.1,\n },\n 'chapter': {\n 'global': 1.5,\n },\n 'post': {\n 'global': 1.0,\n 'if_first': 1.2,\n 'if_useful': 1.5,\n 'ld_ratio_above_1': 1.05,\n 'ld_ratio_below_1': 0.95,\n }\n }\n },\n 'visual_changes': [],\n 'display_search_bar': True,\n 'zmd': {\n 'server': 'http://127.0.0.1:27272',\n 'disable_pings': False\n },\n 'stats_ga_viewid': 'ga:86962671'\n}\n", "path": "zds/settings/abstract_base/zds.py"}]}
3,900
200
gh_patches_debug_28911
rasdani/github-patches
git_diff
hylang__hy-2425
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `hy.M` sugar for import on demand It's annoying to have to import to a gensym when you want to use a function from another module in a macro expansion. Suppose you could say `(hy.M.math.sqrt 2)` to call `math.sqrt` without actually binding `math` or `math.sqrt` to some name in the local scope. In addition to making macros neater, this could be convenient as a general shorthand for using something from a module, particularly a module with a long name, without having to add a separate `import` form. To get a module whose name itself has dots, one could use `/` instead of the dots, so `(hy.M.foo/bar/baz.bing)` would work like `(import foo.bar.baz) (foo.bar.baz.bing)`. Here's a simple implementation: ```python import hy class M: def __call__(self, module_name): import importlib return importlib.import_module(module_name) def __getattr__(self, s): return self('.'.join(hy.unmangle(s).split('/'))) hy.M = M() hy.eval(hy.read_many(''' (print hy.M.sklearn/neighbors.KNeighborsRegressor) ''')) ``` The nice thing about this feature is that it can be added without changing Hy's syntax or touching the set of core macros. `__call__` is provided so you can also say `(hy.M "foo.bar.baz")` to get the module `foo.bar.baz`, which may be more convenient when the module name isn't known until runtime. This sort of feature could probably be provided for requiring macros, too. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hy/__init__.py` Content: ``` 1 try: 2 from hy.version import __version__ 3 except ImportError: 4 __version__ = "unknown" 5 6 7 def _initialize_env_var(env_var, default_val): 8 import os 9 10 return bool(os.environ.get(env_var, default_val)) 11 12 13 import hy.importer # NOQA 14 15 hy.importer._inject_builtins() 16 # we import for side-effects. 17 18 # Import some names on demand so that the dependent modules don't have 19 # to be loaded if they're not needed. 20 21 _jit_imports = dict( 22 read="hy.reader", 23 read_many="hy.reader", 24 mangle="hy.reader", 25 unmangle="hy.reader", 26 eval=["hy.compiler", "hy_eval"], 27 repr=["hy.core.hy_repr", "hy_repr"], 28 repr_register=["hy.core.hy_repr", "hy_repr_register"], 29 gensym="hy.core.util", 30 macroexpand="hy.core.util", 31 macroexpand_1="hy.core.util", 32 disassemble="hy.core.util", 33 as_model="hy.models", 34 REPL="hy.repl", 35 ) 36 37 38 def __getattr__(k): 39 if k == "pyops": 40 global pyops 41 import hy.pyops 42 43 pyops = hy.pyops 44 return pyops 45 46 if k not in _jit_imports: 47 raise AttributeError(f"module {__name__!r} has no attribute {k!r}") 48 v = _jit_imports[k] 49 module, original_name = v if isinstance(v, list) else (v, k) 50 import importlib 51 52 globals()[k] = getattr(importlib.import_module(module), original_name) 53 return globals()[k] 54 ``` Path: `docs/conf.py` Content: ``` 1 # This file is execfile()d with the current directory set to its containing dir. 2 3 import html 4 import os 5 import re 6 import sys 7 import time 8 9 sys.path.insert(0, os.path.abspath("..")) 10 11 extensions = [ 12 "sphinx.ext.napoleon", 13 "sphinx.ext.intersphinx", 14 "sphinx.ext.autodoc", 15 "sphinx.ext.viewcode", 16 "sphinxcontrib.hydomain", 17 ] 18 19 from get_version import __version__ as hy_version 20 21 # Read the Docs might dirty its checkout, so strip the dirty flag. 22 hy_version = re.sub(r"[+.]dirty\Z", "", hy_version) 23 24 templates_path = ["_templates"] 25 source_suffix = ".rst" 26 27 master_doc = "index" 28 29 # General information about the project. 30 project = "hy" 31 copyright = "%s the authors" % time.strftime("%Y") 32 33 # The version info for the project you're documenting, acts as replacement for 34 # |version| and |release|, also used in various other places throughout the 35 # built documents. 36 # 37 # The short X.Y version. 38 version = ".".join(hy_version.split(".")[:-1]) 39 # The full version, including alpha/beta/rc tags. 40 release = hy_version 41 hy_descriptive_version = html.escape(hy_version) 42 if "+" in hy_version: 43 hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>" 44 45 exclude_patterns = ["_build", "coreteam.rst"] 46 add_module_names = True 47 48 pygments_style = "sphinx" 49 50 import sphinx_rtd_theme 51 52 html_theme = "sphinx_rtd_theme" 53 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 54 55 # Add any paths that contain custom static files (such as style sheets) here, 56 # relative to this directory. They are copied after the builtin static files, 57 # so a file named "default.css" will overwrite the builtin "default.css". 58 html_static_path = ["_static"] 59 60 html_use_smartypants = False 61 html_show_sphinx = False 62 63 html_context = dict( 64 hy_descriptive_version=hy_descriptive_version) 65 66 highlight_language = "clojure" 67 68 intersphinx_mapping = dict( 69 py=("https://docs.python.org/3/", None), 70 py3_10=("https://docs.python.org/3.10/", None), 71 hyrule=("https://hyrule.readthedocs.io/en/master/", None), 72 ) 73 # ** Generate Cheatsheet 74 import json 75 from itertools import zip_longest 76 from pathlib import Path 77 78 79 def refize(spec): 80 role = ":hy:func:" 81 if isinstance(spec, dict): 82 _name = spec["name"] 83 uri = spec["uri"] 84 if spec.get("internal"): 85 role = ":ref:" 86 else: 87 uri = spec 88 _name = str.split(uri, ".")[-1] 89 return "{}`{} <{}>`".format(role, _name, uri) 90 91 92 def format_refs(refs, indent): 93 args = [iter(map(refize, refs))] 94 ref_groups = zip_longest(*args, fillvalue="") 95 return str.join( 96 " \\\n" + " " * (indent + 3), 97 [str.join(" ", ref_group) for ref_group in ref_groups], 98 ) 99 100 101 def format_row(category, divider_loc): 102 return "{title: <{width}} | {methods}".format( 103 width=divider_loc, 104 title=category["name"], 105 methods=format_refs(category["methods"], divider_loc), 106 ) 107 108 109 def format_table(table_spec): 110 table_name = table_spec["name"] 111 categories = table_spec["categories"] 112 longest_cat_name = max(len(category["name"]) for category in categories) 113 table = [ 114 table_name, 115 "-" * len(table_name), 116 "", 117 "=" * longest_cat_name + " " + "=" * 25, 118 *(format_row(category, longest_cat_name) for category in categories), 119 "=" * longest_cat_name + " " + "=" * 25, 120 "", 121 ] 122 return "\n".join(table) 123 124 125 # Modifications to the cheatsheet should be added in `cheatsheet.json` 126 cheatsheet_spec = json.loads(Path("./docs/cheatsheet.json").read_text()) 127 cheatsheet = [ 128 "..", 129 " DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``", 130 " If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``", 131 "", 132 ".. _cheatsheet:", 133 "", 134 "Cheatsheet", 135 "==========", 136 "", 137 *map(format_table, cheatsheet_spec), 138 ] 139 Path("./docs/cheatsheet.rst").write_text("\n".join(cheatsheet)) 140 141 142 # ** Sphinx App Setup 143 144 145 def setup(app): 146 app.add_css_file("overrides.css") 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -70,6 +70,10 @@ py3_10=("https://docs.python.org/3.10/", None), hyrule=("https://hyrule.readthedocs.io/en/master/", None), ) + +import hy +hy.M = type(hy.M) # A trick to enable `hy:autoclass:: hy.M` + # ** Generate Cheatsheet import json from itertools import zip_longest diff --git a/hy/__init__.py b/hy/__init__.py --- a/hy/__init__.py +++ b/hy/__init__.py @@ -15,6 +15,23 @@ hy.importer._inject_builtins() # we import for side-effects. + +class M: + """``hy.M`` is an object that provides syntactic sugar for imports. It allows syntax like ``(hy.M.math.sqrt 2)`` to mean ``(import math) (math.sqrt 2)``, except without bringing ``math`` or ``math.sqrt`` into scope. This is useful in macros to avoid namespace pollution. To refer to a module with dots in its name, use slashes instead: ``hy.M.os/path.basename`` gets the function ``basename`` from the module ``os.path``. + + You can also call ``hy.M`` like a function, as in ``(hy.M "math")``, which is useful when the module name isn't known until run-time. This interface just calls :py:func:`importlib.import_module`, avoiding (1) mangling due to attribute lookup, and (2) the translation of ``/`` to ``.`` in the module name. The advantage of ``(hy.M modname)`` over ``importlib.import_module(modname)`` is merely that it avoids bringing ``importlib`` itself into scope.""" + def __call__(self, module_name): + import importlib + return importlib.import_module(module_name) + def __getattr__(self, s): + import re + return self(hy.mangle(re.sub( + r'/(-*)', + lambda m: '.' + '_' * len(m.group(1)), + hy.unmangle(s)))) +M = M() + + # Import some names on demand so that the dependent modules don't have # to be loaded if they're not needed.
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -70,6 +70,10 @@\n py3_10=(\"https://docs.python.org/3.10/\", None),\n hyrule=(\"https://hyrule.readthedocs.io/en/master/\", None),\n )\n+\n+import hy\n+hy.M = type(hy.M) # A trick to enable `hy:autoclass:: hy.M`\n+\n # ** Generate Cheatsheet\n import json\n from itertools import zip_longest\ndiff --git a/hy/__init__.py b/hy/__init__.py\n--- a/hy/__init__.py\n+++ b/hy/__init__.py\n@@ -15,6 +15,23 @@\n hy.importer._inject_builtins()\n # we import for side-effects.\n \n+\n+class M:\n+ \"\"\"``hy.M`` is an object that provides syntactic sugar for imports. It allows syntax like ``(hy.M.math.sqrt 2)`` to mean ``(import math) (math.sqrt 2)``, except without bringing ``math`` or ``math.sqrt`` into scope. This is useful in macros to avoid namespace pollution. To refer to a module with dots in its name, use slashes instead: ``hy.M.os/path.basename`` gets the function ``basename`` from the module ``os.path``.\n+\n+ You can also call ``hy.M`` like a function, as in ``(hy.M \"math\")``, which is useful when the module name isn't known until run-time. This interface just calls :py:func:`importlib.import_module`, avoiding (1) mangling due to attribute lookup, and (2) the translation of ``/`` to ``.`` in the module name. The advantage of ``(hy.M modname)`` over ``importlib.import_module(modname)`` is merely that it avoids bringing ``importlib`` itself into scope.\"\"\"\n+ def __call__(self, module_name):\n+ import importlib\n+ return importlib.import_module(module_name)\n+ def __getattr__(self, s):\n+ import re\n+ return self(hy.mangle(re.sub(\n+ r'/(-*)',\n+ lambda m: '.' + '_' * len(m.group(1)),\n+ hy.unmangle(s))))\n+M = M()\n+\n+\n # Import some names on demand so that the dependent modules don't have\n # to be loaded if they're not needed.\n", "issue": "`hy.M` sugar for import on demand\nIt's annoying to have to import to a gensym when you want to use a function from another module in a macro expansion. Suppose you could say `(hy.M.math.sqrt 2)` to call `math.sqrt` without actually binding `math` or `math.sqrt` to some name in the local scope. In addition to making macros neater, this could be convenient as a general shorthand for using something from a module, particularly a module with a long name, without having to add a separate `import` form.\r\n\r\nTo get a module whose name itself has dots, one could use `/` instead of the dots, so `(hy.M.foo/bar/baz.bing)` would work like `(import foo.bar.baz) (foo.bar.baz.bing)`.\r\n\r\nHere's a simple implementation:\r\n\r\n```python\r\nimport hy\r\n\r\nclass M:\r\n def __call__(self, module_name):\r\n import importlib\r\n return importlib.import_module(module_name)\r\n def __getattr__(self, s):\r\n return self('.'.join(hy.unmangle(s).split('/')))\r\n\r\nhy.M = M()\r\nhy.eval(hy.read_many('''\r\n(print hy.M.sklearn/neighbors.KNeighborsRegressor)\r\n'''))\r\n```\r\n\r\nThe nice thing about this feature is that it can be added without changing Hy's syntax or touching the set of core macros.\r\n\r\n`__call__` is provided so you can also say `(hy.M \"foo.bar.baz\")` to get the module `foo.bar.baz`, which may be more convenient when the module name isn't known until runtime.\r\n\r\nThis sort of feature could probably be provided for requiring macros, too.\n", "before_files": [{"content": "try:\n from hy.version import __version__\nexcept ImportError:\n __version__ = \"unknown\"\n\n\ndef _initialize_env_var(env_var, default_val):\n import os\n\n return bool(os.environ.get(env_var, default_val))\n\n\nimport hy.importer # NOQA\n\nhy.importer._inject_builtins()\n# we import for side-effects.\n\n# Import some names on demand so that the dependent modules don't have\n# to be loaded if they're not needed.\n\n_jit_imports = dict(\n read=\"hy.reader\",\n read_many=\"hy.reader\",\n mangle=\"hy.reader\",\n unmangle=\"hy.reader\",\n eval=[\"hy.compiler\", \"hy_eval\"],\n repr=[\"hy.core.hy_repr\", \"hy_repr\"],\n repr_register=[\"hy.core.hy_repr\", \"hy_repr_register\"],\n gensym=\"hy.core.util\",\n macroexpand=\"hy.core.util\",\n macroexpand_1=\"hy.core.util\",\n disassemble=\"hy.core.util\",\n as_model=\"hy.models\",\n REPL=\"hy.repl\",\n)\n\n\ndef __getattr__(k):\n if k == \"pyops\":\n global pyops\n import hy.pyops\n\n pyops = hy.pyops\n return pyops\n\n if k not in _jit_imports:\n raise AttributeError(f\"module {__name__!r} has no attribute {k!r}\")\n v = _jit_imports[k]\n module, original_name = v if isinstance(v, list) else (v, k)\n import importlib\n\n globals()[k] = getattr(importlib.import_module(module), original_name)\n return globals()[k]\n", "path": "hy/__init__.py"}, {"content": "# This file is execfile()d with the current directory set to its containing dir.\n\nimport html\nimport os\nimport re\nimport sys\nimport time\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinxcontrib.hydomain\",\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r\"[+.]dirty\\Z\", \"\", hy_version)\n\ntemplates_path = [\"_templates\"]\nsource_suffix = \".rst\"\n\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"hy\"\ncopyright = \"%s the authors\" % time.strftime(\"%Y\")\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = [\"_build\", \"coreteam.rst\"]\nadd_module_names = True\n\npygments_style = \"sphinx\"\n\nimport sphinx_rtd_theme\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version=hy_descriptive_version)\n\nhighlight_language = \"clojure\"\n\nintersphinx_mapping = dict(\n py=(\"https://docs.python.org/3/\", None),\n py3_10=(\"https://docs.python.org/3.10/\", None),\n hyrule=(\"https://hyrule.readthedocs.io/en/master/\", None),\n)\n# ** Generate Cheatsheet\nimport json\nfrom itertools import zip_longest\nfrom pathlib import Path\n\n\ndef refize(spec):\n role = \":hy:func:\"\n if isinstance(spec, dict):\n _name = spec[\"name\"]\n uri = spec[\"uri\"]\n if spec.get(\"internal\"):\n role = \":ref:\"\n else:\n uri = spec\n _name = str.split(uri, \".\")[-1]\n return \"{}`{} <{}>`\".format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n \" \\\\\\n\" + \" \" * (indent + 3),\n [str.join(\" \", ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return \"{title: <{width}} | {methods}\".format(\n width=divider_loc,\n title=category[\"name\"],\n methods=format_refs(category[\"methods\"], divider_loc),\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec[\"name\"]\n categories = table_spec[\"categories\"]\n longest_cat_name = max(len(category[\"name\"]) for category in categories)\n table = [\n table_name,\n \"-\" * len(table_name),\n \"\",\n \"=\" * longest_cat_name + \" \" + \"=\" * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n \"=\" * longest_cat_name + \" \" + \"=\" * 25,\n \"\",\n ]\n return \"\\n\".join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path(\"./docs/cheatsheet.json\").read_text())\ncheatsheet = [\n \"..\",\n \" DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``\",\n \" If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``\",\n \"\",\n \".. _cheatsheet:\",\n \"\",\n \"Cheatsheet\",\n \"==========\",\n \"\",\n *map(format_table, cheatsheet_spec),\n]\nPath(\"./docs/cheatsheet.rst\").write_text(\"\\n\".join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file(\"overrides.css\")\n", "path": "docs/conf.py"}], "after_files": [{"content": "try:\n from hy.version import __version__\nexcept ImportError:\n __version__ = \"unknown\"\n\n\ndef _initialize_env_var(env_var, default_val):\n import os\n\n return bool(os.environ.get(env_var, default_val))\n\n\nimport hy.importer # NOQA\n\nhy.importer._inject_builtins()\n# we import for side-effects.\n\n\nclass M:\n \"\"\"``hy.M`` is an object that provides syntactic sugar for imports. It allows syntax like ``(hy.M.math.sqrt 2)`` to mean ``(import math) (math.sqrt 2)``, except without bringing ``math`` or ``math.sqrt`` into scope. This is useful in macros to avoid namespace pollution. To refer to a module with dots in its name, use slashes instead: ``hy.M.os/path.basename`` gets the function ``basename`` from the module ``os.path``.\n\n You can also call ``hy.M`` like a function, as in ``(hy.M \"math\")``, which is useful when the module name isn't known until run-time. This interface just calls :py:func:`importlib.import_module`, avoiding (1) mangling due to attribute lookup, and (2) the translation of ``/`` to ``.`` in the module name. The advantage of ``(hy.M modname)`` over ``importlib.import_module(modname)`` is merely that it avoids bringing ``importlib`` itself into scope.\"\"\"\n def __call__(self, module_name):\n import importlib\n return importlib.import_module(module_name)\n def __getattr__(self, s):\n import re\n return self(hy.mangle(re.sub(\n r'/(-*)',\n lambda m: '.' + '_' * len(m.group(1)),\n hy.unmangle(s))))\nM = M()\n\n\n# Import some names on demand so that the dependent modules don't have\n# to be loaded if they're not needed.\n\n_jit_imports = dict(\n read=\"hy.reader\",\n read_many=\"hy.reader\",\n mangle=\"hy.reader\",\n unmangle=\"hy.reader\",\n eval=[\"hy.compiler\", \"hy_eval\"],\n repr=[\"hy.core.hy_repr\", \"hy_repr\"],\n repr_register=[\"hy.core.hy_repr\", \"hy_repr_register\"],\n gensym=\"hy.core.util\",\n macroexpand=\"hy.core.util\",\n macroexpand_1=\"hy.core.util\",\n disassemble=\"hy.core.util\",\n as_model=\"hy.models\",\n REPL=\"hy.repl\",\n)\n\n\ndef __getattr__(k):\n if k == \"pyops\":\n global pyops\n import hy.pyops\n\n pyops = hy.pyops\n return pyops\n\n if k not in _jit_imports:\n raise AttributeError(f\"module {__name__!r} has no attribute {k!r}\")\n v = _jit_imports[k]\n module, original_name = v if isinstance(v, list) else (v, k)\n import importlib\n\n globals()[k] = getattr(importlib.import_module(module), original_name)\n return globals()[k]\n", "path": "hy/__init__.py"}, {"content": "# This file is execfile()d with the current directory set to its containing dir.\n\nimport html\nimport os\nimport re\nimport sys\nimport time\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinxcontrib.hydomain\",\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r\"[+.]dirty\\Z\", \"\", hy_version)\n\ntemplates_path = [\"_templates\"]\nsource_suffix = \".rst\"\n\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"hy\"\ncopyright = \"%s the authors\" % time.strftime(\"%Y\")\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = [\"_build\", \"coreteam.rst\"]\nadd_module_names = True\n\npygments_style = \"sphinx\"\n\nimport sphinx_rtd_theme\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version=hy_descriptive_version)\n\nhighlight_language = \"clojure\"\n\nintersphinx_mapping = dict(\n py=(\"https://docs.python.org/3/\", None),\n py3_10=(\"https://docs.python.org/3.10/\", None),\n hyrule=(\"https://hyrule.readthedocs.io/en/master/\", None),\n)\n\nimport hy\nhy.M = type(hy.M) # A trick to enable `hy:autoclass:: hy.M`\n\n# ** Generate Cheatsheet\nimport json\nfrom itertools import zip_longest\nfrom pathlib import Path\n\n\ndef refize(spec):\n role = \":hy:func:\"\n if isinstance(spec, dict):\n _name = spec[\"name\"]\n uri = spec[\"uri\"]\n if spec.get(\"internal\"):\n role = \":ref:\"\n else:\n uri = spec\n _name = str.split(uri, \".\")[-1]\n return \"{}`{} <{}>`\".format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n \" \\\\\\n\" + \" \" * (indent + 3),\n [str.join(\" \", ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return \"{title: <{width}} | {methods}\".format(\n width=divider_loc,\n title=category[\"name\"],\n methods=format_refs(category[\"methods\"], divider_loc),\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec[\"name\"]\n categories = table_spec[\"categories\"]\n longest_cat_name = max(len(category[\"name\"]) for category in categories)\n table = [\n table_name,\n \"-\" * len(table_name),\n \"\",\n \"=\" * longest_cat_name + \" \" + \"=\" * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n \"=\" * longest_cat_name + \" \" + \"=\" * 25,\n \"\",\n ]\n return \"\\n\".join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path(\"./docs/cheatsheet.json\").read_text())\ncheatsheet = [\n \"..\",\n \" DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``\",\n \" If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``\",\n \"\",\n \".. _cheatsheet:\",\n \"\",\n \"Cheatsheet\",\n \"==========\",\n \"\",\n *map(format_table, cheatsheet_spec),\n]\nPath(\"./docs/cheatsheet.rst\").write_text(\"\\n\".join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file(\"overrides.css\")\n", "path": "docs/conf.py"}]}
2,440
530
gh_patches_debug_30173
rasdani/github-patches
git_diff
pyro-ppl__pyro-2271
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MaskedMixture distribution doesn't work properly when Delta distribution is used as a component ### Issue Description MaskedMixture fails when Delta distribution is used as a component distribution. In particular, sampling from MaskedMixture outputs wrong values and also alters the Delta distribution used as a component. ### Code Snippet ```py import sys import torch import pyro import pyro.distributions as dist print(sys.version) print(pyro.__version__) print(torch.__version__) delta = dist.Delta(torch.tensor([0.])) gamma = dist.Gamma(torch.ones(2)*100., torch.ones(1)) m = torch.tensor([0, 1]).bool() print("\nDelta dist before sampling:", delta) masked_mixture = dist.MaskedMixture(m, delta, gamma) print("\nSample masked mixture:", masked_mixture.sample()) print("\nDelta dist after sampling:", delta) ``` returns: ``` 3.7.6 (default, Jan 8 2020, 19:59:22) [GCC 7.3.0] 1.2.0 1.4.0 Delta dist before sampling: Delta(v: tensor([0.]), log_density: tensor([0.])) Sample masked mixture: tensor([103.3137, 103.3137]) Delta dist after sampling: Delta(v: tensor([103.3137]), log_density: tensor([0.])) ``` possible solution (at least it fixes the example above) is to use torch.where in pyro/distributions/mixture.py file like below: ```diff def sample(self, sample_shape=torch.Size()): mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask - result = self.component0.sample(sample_shape) - result[mask] = self.component1.sample(sample_shape)[mask] + result = torch.where(mask, + self.component1.sample(sample_shape), + self.component0.sample(sample_shape)) return result ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyro/distributions/mixture.py` Content: ``` 1 # Copyright (c) 2017-2019 Uber Technologies, Inc. 2 # SPDX-License-Identifier: Apache-2.0 3 4 import torch 5 from torch.distributions import constraints 6 from torch.distributions.utils import lazy_property 7 8 from pyro.distributions.torch_distribution import TorchDistribution 9 from pyro.distributions.util import broadcast_shape 10 11 12 class MaskedConstraint(constraints.Constraint): 13 """ 14 Combines two constraints interleaved elementwise by a mask. 15 16 :param torch.Tensor mask: boolean mask tensor (of dtype ``torch.bool``) 17 :param torch.constraints.Constraint constraint0: constraint that holds 18 wherever ``mask == 0`` 19 :param torch.constraints.Constraint constraint1: constraint that holds 20 wherever ``mask == 1`` 21 """ 22 def __init__(self, mask, constraint0, constraint1): 23 self.mask = mask 24 self.constraint0 = constraint0 25 self.constraint1 = constraint1 26 27 def check(self, value): 28 result = self.constraint0.check(value) 29 mask = self.mask.expand(result.shape) if result.shape != self.mask.shape else self.mask 30 result[mask] = self.constraint1.check(value)[mask] 31 return result 32 33 34 class MaskedMixture(TorchDistribution): 35 """ 36 A masked deterministic mixture of two distributions. 37 38 This is useful when the mask is sampled from another distribution, 39 possibly correlated across the batch. Often the mask can be 40 marginalized out via enumeration. 41 42 Example:: 43 44 change_point = pyro.sample("change_point", 45 dist.Categorical(torch.ones(len(data) + 1)), 46 infer={'enumerate': 'parallel'}) 47 mask = torch.arange(len(data), dtype=torch.long) >= changepoint 48 with pyro.plate("data", len(data)): 49 pyro.sample("obs", MaskedMixture(mask, dist1, dist2), obs=data) 50 51 :param torch.Tensor mask: A byte tensor toggling between ``component0`` 52 and ``component1``. 53 :param pyro.distributions.TorchDistribution component0: a distribution 54 for batch elements ``mask == 0``. 55 :param pyro.distributions.TorchDistribution component1: a distribution 56 for batch elements ``mask == 1``. 57 """ 58 arg_constraints = {} # nothing can be constrained 59 60 def __init__(self, mask, component0, component1, validate_args=None): 61 if not torch.is_tensor(mask) or mask.dtype != torch.bool: 62 raise ValueError('Expected mask to be a BoolTensor but got {}'.format(type(mask))) 63 if component0.event_shape != component1.event_shape: 64 raise ValueError('components event_shape disagree: {} vs {}' 65 .format(component0.event_shape, component1.event_shape)) 66 batch_shape = broadcast_shape(mask.shape, component0.batch_shape, component1.batch_shape) 67 if mask.shape != batch_shape: 68 mask = mask.expand(batch_shape) 69 if component0.batch_shape != batch_shape: 70 component0 = component0.expand(batch_shape) 71 if component1.batch_shape != batch_shape: 72 component1 = component1.expand(batch_shape) 73 74 self.mask = mask 75 self.component0 = component0 76 self.component1 = component1 77 super(MaskedMixture, self).__init__(batch_shape, component0.event_shape, validate_args) 78 79 # We need to disable _validate_sample on each component since samples are only valid on the 80 # component from which they are drawn. Instead we perform validation using a MaskedConstraint. 81 self.component0._validate_args = False 82 self.component1._validate_args = False 83 84 @property 85 def has_rsample(self): 86 return self.component0.has_rsample and self.component1.has_rsample 87 88 @constraints.dependent_property 89 def support(self): 90 if self.component0.support is self.component1.support: 91 return self.component0.support 92 return MaskedConstraint(self.mask, self.component0.support, self.component1.support) 93 94 def expand(self, batch_shape): 95 try: 96 return super(MaskedMixture, self).expand(batch_shape) 97 except NotImplementedError: 98 mask = self.mask.expand(batch_shape) 99 component0 = self.component0.expand(batch_shape) 100 component1 = self.component1.expand(batch_shape) 101 return type(self)(mask, component0, component1) 102 103 def sample(self, sample_shape=torch.Size()): 104 mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask 105 result = self.component0.sample(sample_shape) 106 result[mask] = self.component1.sample(sample_shape)[mask] 107 return result 108 109 def rsample(self, sample_shape=torch.Size()): 110 mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask 111 result = self.component0.rsample(sample_shape) 112 result[mask] = self.component1.rsample(sample_shape)[mask] 113 return result 114 115 def log_prob(self, value): 116 value_shape = broadcast_shape(value.shape, self.batch_shape + self.event_shape) 117 if value.shape != value_shape: 118 value = value.expand(value_shape) 119 if self._validate_args: 120 self._validate_sample(value) 121 mask_shape = value_shape[:len(value_shape) - len(self.event_shape)] 122 mask = self.mask 123 if mask.shape != mask_shape: 124 mask = mask.expand(mask_shape) 125 result = self.component0.log_prob(value) 126 result[mask] = self.component1.log_prob(value)[mask] 127 return result 128 129 @lazy_property 130 def mean(self): 131 result = self.component0.mean.clone() 132 result[self.mask] = self.component1.mean[self.mask] 133 return result 134 135 @lazy_property 136 def variance(self): 137 result = self.component0.variance.clone() 138 result[self.mask] = self.component1.variance[self.mask] 139 return result 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyro/distributions/mixture.py b/pyro/distributions/mixture.py --- a/pyro/distributions/mixture.py +++ b/pyro/distributions/mixture.py @@ -101,15 +101,19 @@ return type(self)(mask, component0, component1) def sample(self, sample_shape=torch.Size()): - mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask - result = self.component0.sample(sample_shape) - result[mask] = self.component1.sample(sample_shape)[mask] + mask = self.mask.reshape(self.mask.shape + (1,) * self.event_dim) + mask = mask.expand(sample_shape + self.shape()) + result = torch.where(mask, + self.component1.sample(sample_shape), + self.component0.sample(sample_shape)) return result def rsample(self, sample_shape=torch.Size()): - mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask - result = self.component0.rsample(sample_shape) - result[mask] = self.component1.rsample(sample_shape)[mask] + mask = self.mask.reshape(self.mask.shape + (1,) * self.event_dim) + mask = mask.expand(sample_shape + self.shape()) + result = torch.where(mask, + self.component1.rsample(sample_shape), + self.component0.rsample(sample_shape)) return result def log_prob(self, value): @@ -122,8 +126,9 @@ mask = self.mask if mask.shape != mask_shape: mask = mask.expand(mask_shape) - result = self.component0.log_prob(value) - result[mask] = self.component1.log_prob(value)[mask] + result = torch.where(mask, + self.component1.log_prob(value), + self.component0.log_prob(value)) return result @lazy_property
{"golden_diff": "diff --git a/pyro/distributions/mixture.py b/pyro/distributions/mixture.py\n--- a/pyro/distributions/mixture.py\n+++ b/pyro/distributions/mixture.py\n@@ -101,15 +101,19 @@\n return type(self)(mask, component0, component1)\n \n def sample(self, sample_shape=torch.Size()):\n- mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask\n- result = self.component0.sample(sample_shape)\n- result[mask] = self.component1.sample(sample_shape)[mask]\n+ mask = self.mask.reshape(self.mask.shape + (1,) * self.event_dim)\n+ mask = mask.expand(sample_shape + self.shape())\n+ result = torch.where(mask,\n+ self.component1.sample(sample_shape),\n+ self.component0.sample(sample_shape))\n return result\n \n def rsample(self, sample_shape=torch.Size()):\n- mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask\n- result = self.component0.rsample(sample_shape)\n- result[mask] = self.component1.rsample(sample_shape)[mask]\n+ mask = self.mask.reshape(self.mask.shape + (1,) * self.event_dim)\n+ mask = mask.expand(sample_shape + self.shape())\n+ result = torch.where(mask,\n+ self.component1.rsample(sample_shape),\n+ self.component0.rsample(sample_shape))\n return result\n \n def log_prob(self, value):\n@@ -122,8 +126,9 @@\n mask = self.mask\n if mask.shape != mask_shape:\n mask = mask.expand(mask_shape)\n- result = self.component0.log_prob(value)\n- result[mask] = self.component1.log_prob(value)[mask]\n+ result = torch.where(mask,\n+ self.component1.log_prob(value),\n+ self.component0.log_prob(value))\n return result\n \n @lazy_property\n", "issue": "MaskedMixture distribution doesn't work properly when Delta distribution is used as a component\n### Issue Description\r\nMaskedMixture fails when Delta distribution is used as a component distribution. In particular, sampling from MaskedMixture outputs wrong values and also alters the Delta distribution used as a component.\r\n\r\n### Code Snippet\r\n```py\r\nimport sys\r\nimport torch\r\nimport pyro\r\nimport pyro.distributions as dist\r\n\r\nprint(sys.version)\r\nprint(pyro.__version__)\r\nprint(torch.__version__)\r\n\r\ndelta = dist.Delta(torch.tensor([0.]))\r\ngamma = dist.Gamma(torch.ones(2)*100., torch.ones(1))\r\nm = torch.tensor([0, 1]).bool()\r\n\r\nprint(\"\\nDelta dist before sampling:\", delta)\r\n\r\nmasked_mixture = dist.MaskedMixture(m, delta, gamma)\r\nprint(\"\\nSample masked mixture:\", masked_mixture.sample())\r\n\r\nprint(\"\\nDelta dist after sampling:\", delta)\r\n```\r\nreturns:\r\n```\r\n3.7.6 (default, Jan 8 2020, 19:59:22)\r\n[GCC 7.3.0]\r\n1.2.0\r\n1.4.0\r\n\r\nDelta dist before sampling: Delta(v: tensor([0.]), log_density: tensor([0.]))\r\n\r\nSample masked mixture: tensor([103.3137, 103.3137])\r\n\r\nDelta dist after sampling: Delta(v: tensor([103.3137]), log_density: tensor([0.]))\r\n```\r\npossible solution (at least it fixes the example above) is to use torch.where in pyro/distributions/mixture.py file like below:\r\n\r\n```diff\r\n def sample(self, sample_shape=torch.Size()):\r\n mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask\r\n- result = self.component0.sample(sample_shape)\r\n- result[mask] = self.component1.sample(sample_shape)[mask]\r\n+ result = torch.where(mask,\r\n+ self.component1.sample(sample_shape),\r\n+ self.component0.sample(sample_shape))\r\n return result\r\n```\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.utils import lazy_property\n\nfrom pyro.distributions.torch_distribution import TorchDistribution\nfrom pyro.distributions.util import broadcast_shape\n\n\nclass MaskedConstraint(constraints.Constraint):\n \"\"\"\n Combines two constraints interleaved elementwise by a mask.\n\n :param torch.Tensor mask: boolean mask tensor (of dtype ``torch.bool``)\n :param torch.constraints.Constraint constraint0: constraint that holds\n wherever ``mask == 0``\n :param torch.constraints.Constraint constraint1: constraint that holds\n wherever ``mask == 1``\n \"\"\"\n def __init__(self, mask, constraint0, constraint1):\n self.mask = mask\n self.constraint0 = constraint0\n self.constraint1 = constraint1\n\n def check(self, value):\n result = self.constraint0.check(value)\n mask = self.mask.expand(result.shape) if result.shape != self.mask.shape else self.mask\n result[mask] = self.constraint1.check(value)[mask]\n return result\n\n\nclass MaskedMixture(TorchDistribution):\n \"\"\"\n A masked deterministic mixture of two distributions.\n\n This is useful when the mask is sampled from another distribution,\n possibly correlated across the batch. Often the mask can be\n marginalized out via enumeration.\n\n Example::\n\n change_point = pyro.sample(\"change_point\",\n dist.Categorical(torch.ones(len(data) + 1)),\n infer={'enumerate': 'parallel'})\n mask = torch.arange(len(data), dtype=torch.long) >= changepoint\n with pyro.plate(\"data\", len(data)):\n pyro.sample(\"obs\", MaskedMixture(mask, dist1, dist2), obs=data)\n\n :param torch.Tensor mask: A byte tensor toggling between ``component0``\n and ``component1``.\n :param pyro.distributions.TorchDistribution component0: a distribution\n for batch elements ``mask == 0``.\n :param pyro.distributions.TorchDistribution component1: a distribution\n for batch elements ``mask == 1``.\n \"\"\"\n arg_constraints = {} # nothing can be constrained\n\n def __init__(self, mask, component0, component1, validate_args=None):\n if not torch.is_tensor(mask) or mask.dtype != torch.bool:\n raise ValueError('Expected mask to be a BoolTensor but got {}'.format(type(mask)))\n if component0.event_shape != component1.event_shape:\n raise ValueError('components event_shape disagree: {} vs {}'\n .format(component0.event_shape, component1.event_shape))\n batch_shape = broadcast_shape(mask.shape, component0.batch_shape, component1.batch_shape)\n if mask.shape != batch_shape:\n mask = mask.expand(batch_shape)\n if component0.batch_shape != batch_shape:\n component0 = component0.expand(batch_shape)\n if component1.batch_shape != batch_shape:\n component1 = component1.expand(batch_shape)\n\n self.mask = mask\n self.component0 = component0\n self.component1 = component1\n super(MaskedMixture, self).__init__(batch_shape, component0.event_shape, validate_args)\n\n # We need to disable _validate_sample on each component since samples are only valid on the\n # component from which they are drawn. Instead we perform validation using a MaskedConstraint.\n self.component0._validate_args = False\n self.component1._validate_args = False\n\n @property\n def has_rsample(self):\n return self.component0.has_rsample and self.component1.has_rsample\n\n @constraints.dependent_property\n def support(self):\n if self.component0.support is self.component1.support:\n return self.component0.support\n return MaskedConstraint(self.mask, self.component0.support, self.component1.support)\n\n def expand(self, batch_shape):\n try:\n return super(MaskedMixture, self).expand(batch_shape)\n except NotImplementedError:\n mask = self.mask.expand(batch_shape)\n component0 = self.component0.expand(batch_shape)\n component1 = self.component1.expand(batch_shape)\n return type(self)(mask, component0, component1)\n\n def sample(self, sample_shape=torch.Size()):\n mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask\n result = self.component0.sample(sample_shape)\n result[mask] = self.component1.sample(sample_shape)[mask]\n return result\n\n def rsample(self, sample_shape=torch.Size()):\n mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask\n result = self.component0.rsample(sample_shape)\n result[mask] = self.component1.rsample(sample_shape)[mask]\n return result\n\n def log_prob(self, value):\n value_shape = broadcast_shape(value.shape, self.batch_shape + self.event_shape)\n if value.shape != value_shape:\n value = value.expand(value_shape)\n if self._validate_args:\n self._validate_sample(value)\n mask_shape = value_shape[:len(value_shape) - len(self.event_shape)]\n mask = self.mask\n if mask.shape != mask_shape:\n mask = mask.expand(mask_shape)\n result = self.component0.log_prob(value)\n result[mask] = self.component1.log_prob(value)[mask]\n return result\n\n @lazy_property\n def mean(self):\n result = self.component0.mean.clone()\n result[self.mask] = self.component1.mean[self.mask]\n return result\n\n @lazy_property\n def variance(self):\n result = self.component0.variance.clone()\n result[self.mask] = self.component1.variance[self.mask]\n return result\n", "path": "pyro/distributions/mixture.py"}], "after_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.utils import lazy_property\n\nfrom pyro.distributions.torch_distribution import TorchDistribution\nfrom pyro.distributions.util import broadcast_shape\n\n\nclass MaskedConstraint(constraints.Constraint):\n \"\"\"\n Combines two constraints interleaved elementwise by a mask.\n\n :param torch.Tensor mask: boolean mask tensor (of dtype ``torch.bool``)\n :param torch.constraints.Constraint constraint0: constraint that holds\n wherever ``mask == 0``\n :param torch.constraints.Constraint constraint1: constraint that holds\n wherever ``mask == 1``\n \"\"\"\n def __init__(self, mask, constraint0, constraint1):\n self.mask = mask\n self.constraint0 = constraint0\n self.constraint1 = constraint1\n\n def check(self, value):\n result = self.constraint0.check(value)\n mask = self.mask.expand(result.shape) if result.shape != self.mask.shape else self.mask\n result[mask] = self.constraint1.check(value)[mask]\n return result\n\n\nclass MaskedMixture(TorchDistribution):\n \"\"\"\n A masked deterministic mixture of two distributions.\n\n This is useful when the mask is sampled from another distribution,\n possibly correlated across the batch. Often the mask can be\n marginalized out via enumeration.\n\n Example::\n\n change_point = pyro.sample(\"change_point\",\n dist.Categorical(torch.ones(len(data) + 1)),\n infer={'enumerate': 'parallel'})\n mask = torch.arange(len(data), dtype=torch.long) >= changepoint\n with pyro.plate(\"data\", len(data)):\n pyro.sample(\"obs\", MaskedMixture(mask, dist1, dist2), obs=data)\n\n :param torch.Tensor mask: A byte tensor toggling between ``component0``\n and ``component1``.\n :param pyro.distributions.TorchDistribution component0: a distribution\n for batch elements ``mask == 0``.\n :param pyro.distributions.TorchDistribution component1: a distribution\n for batch elements ``mask == 1``.\n \"\"\"\n arg_constraints = {} # nothing can be constrained\n\n def __init__(self, mask, component0, component1, validate_args=None):\n if not torch.is_tensor(mask) or mask.dtype != torch.bool:\n raise ValueError('Expected mask to be a BoolTensor but got {}'.format(type(mask)))\n if component0.event_shape != component1.event_shape:\n raise ValueError('components event_shape disagree: {} vs {}'\n .format(component0.event_shape, component1.event_shape))\n batch_shape = broadcast_shape(mask.shape, component0.batch_shape, component1.batch_shape)\n if mask.shape != batch_shape:\n mask = mask.expand(batch_shape)\n if component0.batch_shape != batch_shape:\n component0 = component0.expand(batch_shape)\n if component1.batch_shape != batch_shape:\n component1 = component1.expand(batch_shape)\n\n self.mask = mask\n self.component0 = component0\n self.component1 = component1\n super(MaskedMixture, self).__init__(batch_shape, component0.event_shape, validate_args)\n\n # We need to disable _validate_sample on each component since samples are only valid on the\n # component from which they are drawn. Instead we perform validation using a MaskedConstraint.\n self.component0._validate_args = False\n self.component1._validate_args = False\n\n @property\n def has_rsample(self):\n return self.component0.has_rsample and self.component1.has_rsample\n\n @constraints.dependent_property\n def support(self):\n if self.component0.support is self.component1.support:\n return self.component0.support\n return MaskedConstraint(self.mask, self.component0.support, self.component1.support)\n\n def expand(self, batch_shape):\n try:\n return super(MaskedMixture, self).expand(batch_shape)\n except NotImplementedError:\n mask = self.mask.expand(batch_shape)\n component0 = self.component0.expand(batch_shape)\n component1 = self.component1.expand(batch_shape)\n return type(self)(mask, component0, component1)\n\n def sample(self, sample_shape=torch.Size()):\n mask = self.mask.reshape(self.mask.shape + (1,) * self.event_dim)\n mask = mask.expand(sample_shape + self.shape())\n result = torch.where(mask,\n self.component1.sample(sample_shape),\n self.component0.sample(sample_shape))\n return result\n\n def rsample(self, sample_shape=torch.Size()):\n mask = self.mask.reshape(self.mask.shape + (1,) * self.event_dim)\n mask = mask.expand(sample_shape + self.shape())\n result = torch.where(mask,\n self.component1.rsample(sample_shape),\n self.component0.rsample(sample_shape))\n return result\n\n def log_prob(self, value):\n value_shape = broadcast_shape(value.shape, self.batch_shape + self.event_shape)\n if value.shape != value_shape:\n value = value.expand(value_shape)\n if self._validate_args:\n self._validate_sample(value)\n mask_shape = value_shape[:len(value_shape) - len(self.event_shape)]\n mask = self.mask\n if mask.shape != mask_shape:\n mask = mask.expand(mask_shape)\n result = torch.where(mask,\n self.component1.log_prob(value),\n self.component0.log_prob(value))\n return result\n\n @lazy_property\n def mean(self):\n result = self.component0.mean.clone()\n result[self.mask] = self.component1.mean[self.mask]\n return result\n\n @lazy_property\n def variance(self):\n result = self.component0.variance.clone()\n result[self.mask] = self.component1.variance[self.mask]\n return result\n", "path": "pyro/distributions/mixture.py"}]}
2,250
416
gh_patches_debug_15485
rasdani/github-patches
git_diff
keras-team__autokeras-1145
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- IO API, multi-modal classification, predict method problem ### Bug Description IO API, multi-modal classification, predict method problem ### Bug Reproduction https://github.com/datamllab/automl-in-action-notebooks/blob/master/3.4.2-Functional-API-Multi-Input.ipynb ### Setup Details Include the details about the versions of: - OS type and version: - Python: - autokeras: 1.0.2 - keras-tuner: - scikit-learn: - numpy: - pandas: - tensorflow: 2.1.0 ### Additional context <!--- If applicable, add any other context about the problem. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `autokeras/keras_layers.py` Content: ``` 1 import inspect 2 3 import tensorflow as tf 4 from tensorflow.keras.layers.experimental import preprocessing 5 from tensorflow.python.keras.layers.preprocessing import index_lookup 6 from tensorflow.python.util import nest 7 8 CombinerPreprocessingLayer = inspect.getmro(preprocessing.Normalization)[1] 9 Combiner = inspect.getmro(preprocessing.Normalization()._combiner.__class__)[1] 10 11 INT = 'int' 12 NONE = 'none' 13 ONE_HOT = 'one-hot' 14 15 16 class MultiColumnCategoricalEncoding(preprocessing.PreprocessingLayer): 17 """Encode the categorical features to numerical features. 18 19 # Arguments 20 encoding: A list of strings, which has the same number of elements as the 21 columns in the structured data. Each of the strings specifies the 22 encoding method used for the corresponding column. Use 'int' for 23 categorical columns and 'none' for numerical columns. 24 """ 25 26 # TODO: Support one-hot encoding. 27 # TODO: Support frequency encoding. 28 29 def __init__(self, encoding, **kwargs): 30 super().__init__(**kwargs) 31 self.encoding = encoding 32 self.encoding_layers = [] 33 for encoding in self.encoding: 34 if encoding == NONE: 35 self.encoding_layers.append(None) 36 elif encoding == INT: 37 self.encoding_layers.append(index_lookup.IndexLookup()) 38 elif encoding == ONE_HOT: 39 self.encoding_layers.append(None) 40 41 def build(self, input_shape): 42 for encoding_layer in self.encoding_layers: 43 if encoding_layer is not None: 44 encoding_layer.build(tf.TensorShape([1])) 45 46 def call(self, inputs): 47 input_nodes = nest.flatten(inputs)[0] 48 split_inputs = tf.split(input_nodes, [1] * len(self.encoding), axis=-1) 49 output_nodes = [] 50 for input_node, encoding_layer in zip(split_inputs, self.encoding_layers): 51 if encoding_layer is None: 52 output_nodes.append(tf.strings.to_number(input_node, tf.float32)) 53 else: 54 output_nodes.append(tf.cast(encoding_layer(input_node), tf.float32)) 55 return tf.keras.layers.Concatenate()(output_nodes) 56 57 def adapt(self, data): 58 for index, encoding_layer in enumerate(self.encoding_layers): 59 if encoding_layer is None: 60 continue 61 data_column = data.map(lambda x: tf.slice(x, [0, index], [-1, 1])) 62 encoding_layer.adapt(data_column) 63 64 def get_config(self): 65 config = { 66 'encoding': self.encoding, 67 } 68 base_config = super().get_config() 69 return dict(list(base_config.items()) + list(config.items())) 70 71 72 CUSTOM_OBJECTS = { 73 'MultiColumnCategoricalEncoding': MultiColumnCategoricalEncoding, 74 } 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/autokeras/keras_layers.py b/autokeras/keras_layers.py --- a/autokeras/keras_layers.py +++ b/autokeras/keras_layers.py @@ -49,7 +49,12 @@ output_nodes = [] for input_node, encoding_layer in zip(split_inputs, self.encoding_layers): if encoding_layer is None: - output_nodes.append(tf.strings.to_number(input_node, tf.float32)) + number = tf.strings.to_number(input_node, tf.float32) + # Replace NaN with 0. + imputed = tf.where(tf.math.is_nan(number), + tf.zeros_like(number), + number) + output_nodes.append(imputed) else: output_nodes.append(tf.cast(encoding_layer(input_node), tf.float32)) return tf.keras.layers.Concatenate()(output_nodes)
{"golden_diff": "diff --git a/autokeras/keras_layers.py b/autokeras/keras_layers.py\n--- a/autokeras/keras_layers.py\n+++ b/autokeras/keras_layers.py\n@@ -49,7 +49,12 @@\n output_nodes = []\n for input_node, encoding_layer in zip(split_inputs, self.encoding_layers):\n if encoding_layer is None:\n- output_nodes.append(tf.strings.to_number(input_node, tf.float32))\n+ number = tf.strings.to_number(input_node, tf.float32)\n+ # Replace NaN with 0.\n+ imputed = tf.where(tf.math.is_nan(number),\n+ tf.zeros_like(number),\n+ number)\n+ output_nodes.append(imputed)\n else:\n output_nodes.append(tf.cast(encoding_layer(input_node), tf.float32))\n return tf.keras.layers.Concatenate()(output_nodes)\n", "issue": "IO API, multi-modal classification, predict method problem\n### Bug Description\r\nIO API, multi-modal classification, predict method problem\r\n\r\n\r\n### Bug Reproduction\r\n\r\nhttps://github.com/datamllab/automl-in-action-notebooks/blob/master/3.4.2-Functional-API-Multi-Input.ipynb\r\n\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version:\r\n - Python: \r\n - autokeras: 1.0.2\r\n - keras-tuner:\r\n - scikit-learn:\r\n - numpy:\r\n - pandas:\r\n - tensorflow: 2.1.0\r\n\r\n### Additional context\r\n<!---\r\nIf applicable, add any other context about the problem.\r\n-->\r\n\n", "before_files": [{"content": "import inspect\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom tensorflow.python.keras.layers.preprocessing import index_lookup\nfrom tensorflow.python.util import nest\n\nCombinerPreprocessingLayer = inspect.getmro(preprocessing.Normalization)[1]\nCombiner = inspect.getmro(preprocessing.Normalization()._combiner.__class__)[1]\n\nINT = 'int'\nNONE = 'none'\nONE_HOT = 'one-hot'\n\n\nclass MultiColumnCategoricalEncoding(preprocessing.PreprocessingLayer):\n \"\"\"Encode the categorical features to numerical features.\n\n # Arguments\n encoding: A list of strings, which has the same number of elements as the\n columns in the structured data. Each of the strings specifies the\n encoding method used for the corresponding column. Use 'int' for\n categorical columns and 'none' for numerical columns.\n \"\"\"\n\n # TODO: Support one-hot encoding.\n # TODO: Support frequency encoding.\n\n def __init__(self, encoding, **kwargs):\n super().__init__(**kwargs)\n self.encoding = encoding\n self.encoding_layers = []\n for encoding in self.encoding:\n if encoding == NONE:\n self.encoding_layers.append(None)\n elif encoding == INT:\n self.encoding_layers.append(index_lookup.IndexLookup())\n elif encoding == ONE_HOT:\n self.encoding_layers.append(None)\n\n def build(self, input_shape):\n for encoding_layer in self.encoding_layers:\n if encoding_layer is not None:\n encoding_layer.build(tf.TensorShape([1]))\n\n def call(self, inputs):\n input_nodes = nest.flatten(inputs)[0]\n split_inputs = tf.split(input_nodes, [1] * len(self.encoding), axis=-1)\n output_nodes = []\n for input_node, encoding_layer in zip(split_inputs, self.encoding_layers):\n if encoding_layer is None:\n output_nodes.append(tf.strings.to_number(input_node, tf.float32))\n else:\n output_nodes.append(tf.cast(encoding_layer(input_node), tf.float32))\n return tf.keras.layers.Concatenate()(output_nodes)\n\n def adapt(self, data):\n for index, encoding_layer in enumerate(self.encoding_layers):\n if encoding_layer is None:\n continue\n data_column = data.map(lambda x: tf.slice(x, [0, index], [-1, 1]))\n encoding_layer.adapt(data_column)\n\n def get_config(self):\n config = {\n 'encoding': self.encoding,\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nCUSTOM_OBJECTS = {\n 'MultiColumnCategoricalEncoding': MultiColumnCategoricalEncoding,\n}\n", "path": "autokeras/keras_layers.py"}], "after_files": [{"content": "import inspect\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom tensorflow.python.keras.layers.preprocessing import index_lookup\nfrom tensorflow.python.util import nest\n\nCombinerPreprocessingLayer = inspect.getmro(preprocessing.Normalization)[1]\nCombiner = inspect.getmro(preprocessing.Normalization()._combiner.__class__)[1]\n\nINT = 'int'\nNONE = 'none'\nONE_HOT = 'one-hot'\n\n\nclass MultiColumnCategoricalEncoding(preprocessing.PreprocessingLayer):\n \"\"\"Encode the categorical features to numerical features.\n\n # Arguments\n encoding: A list of strings, which has the same number of elements as the\n columns in the structured data. Each of the strings specifies the\n encoding method used for the corresponding column. Use 'int' for\n categorical columns and 'none' for numerical columns.\n \"\"\"\n\n # TODO: Support one-hot encoding.\n # TODO: Support frequency encoding.\n\n def __init__(self, encoding, **kwargs):\n super().__init__(**kwargs)\n self.encoding = encoding\n self.encoding_layers = []\n for encoding in self.encoding:\n if encoding == NONE:\n self.encoding_layers.append(None)\n elif encoding == INT:\n self.encoding_layers.append(index_lookup.IndexLookup())\n elif encoding == ONE_HOT:\n self.encoding_layers.append(None)\n\n def build(self, input_shape):\n for encoding_layer in self.encoding_layers:\n if encoding_layer is not None:\n encoding_layer.build(tf.TensorShape([1]))\n\n def call(self, inputs):\n input_nodes = nest.flatten(inputs)[0]\n split_inputs = tf.split(input_nodes, [1] * len(self.encoding), axis=-1)\n output_nodes = []\n for input_node, encoding_layer in zip(split_inputs, self.encoding_layers):\n if encoding_layer is None:\n number = tf.strings.to_number(input_node, tf.float32)\n # Replace NaN with 0.\n imputed = tf.where(tf.math.is_nan(number),\n tf.zeros_like(number),\n number)\n output_nodes.append(imputed)\n else:\n output_nodes.append(tf.cast(encoding_layer(input_node), tf.float32))\n return tf.keras.layers.Concatenate()(output_nodes)\n\n def adapt(self, data):\n for index, encoding_layer in enumerate(self.encoding_layers):\n if encoding_layer is None:\n continue\n data_column = data.map(lambda x: tf.slice(x, [0, index], [-1, 1]))\n encoding_layer.adapt(data_column)\n\n def get_config(self):\n config = {\n 'encoding': self.encoding,\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nCUSTOM_OBJECTS = {\n 'MultiColumnCategoricalEncoding': MultiColumnCategoricalEncoding,\n}\n", "path": "autokeras/keras_layers.py"}]}
1,107
193
gh_patches_debug_9724
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-6648
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Duplicate answers in DNS queries #### Problem Description Two duplicate records are returned for each unique A/AAAA record in a DNS query when using DNS mode. #### Steps to reproduce the behavior: ##### Without mitmproxy 1. Run `dig +short google.com` 2. Correct output: `142.250.193.206` ##### With mitmproxy 1. Start mitmproxy `mitmproxy --mode dns@53535` 2. Run `dig @127.0.0.1 -p 53535 +short google.com` 3. Output with duplicates: ``` 142.250.193.206 142.250.193.206 142.250.193.206 ``` #### System Information ``` Mitmproxy: 11.0.0.dev (+19, commit d638213) Python: 3.12.1 OpenSSL: OpenSSL 3.1.4 24 Oct 2023 Platform: Linux-6.6.14-200.fc39.x86_64-x86_64-with-glibc2.38 ``` #### Additional Notes This is happening because the `dns_resolver` addon calls `getaddrinfo` here: https://github.com/mitmproxy/mitmproxy/blob/1a02ebb89f6765d827f2fe0086dfe5960eb6e093/mitmproxy/addons/dns_resolver.py#L29 Which is returning one tuple each for UDP, TCP and a raw socket. We could just do the following since I assume all requests are currently using UDP: ```python addrinfos = await loop.getaddrinfo(host=question.name, port=0, family=family, type=socket.SOCK_DGRAM) ``` What do you think? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/addons/dns_resolver.py` Content: ``` 1 import asyncio 2 import ipaddress 3 import socket 4 from collections.abc import Callable 5 from collections.abc import Iterable 6 7 from mitmproxy import dns 8 from mitmproxy.proxy import mode_specs 9 10 IP4_PTR_SUFFIX = ".in-addr.arpa" 11 IP6_PTR_SUFFIX = ".ip6.arpa" 12 13 14 class ResolveError(Exception): 15 """Exception thrown by different resolve methods.""" 16 17 def __init__(self, response_code: int) -> None: 18 assert response_code != dns.response_codes.NOERROR 19 self.response_code = response_code 20 21 22 async def resolve_question_by_name( 23 question: dns.Question, 24 loop: asyncio.AbstractEventLoop, 25 family: socket.AddressFamily, 26 ip: Callable[[str], ipaddress.IPv4Address | ipaddress.IPv6Address], 27 ) -> Iterable[dns.ResourceRecord]: 28 try: 29 addrinfos = await loop.getaddrinfo(host=question.name, port=0, family=family) 30 except socket.gaierror as e: 31 if e.errno == socket.EAI_NONAME: 32 raise ResolveError(dns.response_codes.NXDOMAIN) 33 else: 34 # NOTE might fail on Windows for IPv6 queries: 35 # https://stackoverflow.com/questions/66755681/getaddrinfo-c-on-windows-not-handling-ipv6-correctly-returning-error-code-1 36 raise ResolveError(dns.response_codes.SERVFAIL) # pragma: no cover 37 return map( 38 lambda addrinfo: dns.ResourceRecord( 39 name=question.name, 40 type=question.type, 41 class_=question.class_, 42 ttl=dns.ResourceRecord.DEFAULT_TTL, 43 data=ip(addrinfo[4][0]).packed, 44 ), 45 addrinfos, 46 ) 47 48 49 async def resolve_question_by_addr( 50 question: dns.Question, 51 loop: asyncio.AbstractEventLoop, 52 suffix: str, 53 sockaddr: Callable[[list[str]], tuple[str, int] | tuple[str, int, int, int]], 54 ) -> Iterable[dns.ResourceRecord]: 55 try: 56 addr = sockaddr(question.name[: -len(suffix)].split(".")[::-1]) 57 except ValueError: 58 raise ResolveError(dns.response_codes.FORMERR) 59 try: 60 name, _ = await loop.getnameinfo(addr, flags=socket.NI_NAMEREQD) 61 except socket.gaierror as e: 62 raise ResolveError( 63 dns.response_codes.NXDOMAIN 64 if e.errno == socket.EAI_NONAME 65 else dns.response_codes.SERVFAIL 66 ) 67 return [ 68 dns.ResourceRecord( 69 name=question.name, 70 type=question.type, 71 class_=question.class_, 72 ttl=dns.ResourceRecord.DEFAULT_TTL, 73 data=dns.domain_names.pack(name), 74 ) 75 ] 76 77 78 async def resolve_question( 79 question: dns.Question, loop: asyncio.AbstractEventLoop 80 ) -> Iterable[dns.ResourceRecord]: 81 """Resolve the question into resource record(s), throwing ResolveError if an error condition occurs.""" 82 83 if question.class_ != dns.classes.IN: 84 raise ResolveError(dns.response_codes.NOTIMP) 85 if question.type == dns.types.A: 86 return await resolve_question_by_name( 87 question, loop, socket.AddressFamily.AF_INET, ipaddress.IPv4Address 88 ) 89 elif question.type == dns.types.AAAA: 90 return await resolve_question_by_name( 91 question, loop, socket.AddressFamily.AF_INET6, ipaddress.IPv6Address 92 ) 93 elif question.type == dns.types.PTR: 94 name_lower = question.name.lower() 95 if name_lower.endswith(IP4_PTR_SUFFIX): 96 return await resolve_question_by_addr( 97 question=question, 98 loop=loop, 99 suffix=IP4_PTR_SUFFIX, 100 sockaddr=lambda x: (str(ipaddress.IPv4Address(".".join(x))), 0), 101 ) 102 elif name_lower.endswith(IP6_PTR_SUFFIX): 103 return await resolve_question_by_addr( 104 question=question, 105 loop=loop, 106 suffix=IP6_PTR_SUFFIX, 107 sockaddr=lambda x: ( 108 str(ipaddress.IPv6Address(bytes.fromhex("".join(x)))), 109 0, 110 0, 111 0, 112 ), 113 ) 114 else: 115 raise ResolveError(dns.response_codes.FORMERR) 116 else: 117 raise ResolveError(dns.response_codes.NOTIMP) 118 119 120 async def resolve_message( 121 message: dns.Message, loop: asyncio.AbstractEventLoop 122 ) -> dns.Message: 123 try: 124 if not message.query: 125 raise ResolveError( 126 dns.response_codes.REFUSED 127 ) # we cannot resolve an answer 128 if message.op_code != dns.op_codes.QUERY: 129 raise ResolveError( 130 dns.response_codes.NOTIMP 131 ) # inverse queries and others are not supported 132 rrs: list[dns.ResourceRecord] = [] 133 for question in message.questions: 134 rrs.extend(await resolve_question(question, loop)) 135 except ResolveError as e: 136 return message.fail(e.response_code) 137 else: 138 return message.succeed(rrs) 139 140 141 class DnsResolver: 142 async def dns_request(self, flow: dns.DNSFlow) -> None: 143 should_resolve = ( 144 ( 145 isinstance(flow.client_conn.proxy_mode, mode_specs.DnsMode) 146 or ( 147 isinstance(flow.client_conn.proxy_mode, mode_specs.WireGuardMode) 148 and flow.server_conn.address == ("10.0.0.53", 53) 149 ) 150 ) 151 and flow.live 152 and not flow.response 153 and not flow.error 154 ) 155 if should_resolve: 156 # TODO: We need to handle overly long responses here. 157 flow.response = await resolve_message( 158 flow.request, asyncio.get_running_loop() 159 ) 160 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/addons/dns_resolver.py b/mitmproxy/addons/dns_resolver.py --- a/mitmproxy/addons/dns_resolver.py +++ b/mitmproxy/addons/dns_resolver.py @@ -26,7 +26,9 @@ ip: Callable[[str], ipaddress.IPv4Address | ipaddress.IPv6Address], ) -> Iterable[dns.ResourceRecord]: try: - addrinfos = await loop.getaddrinfo(host=question.name, port=0, family=family) + addrinfos = await loop.getaddrinfo( + host=question.name, port=0, family=family, type=socket.SOCK_STREAM + ) except socket.gaierror as e: if e.errno == socket.EAI_NONAME: raise ResolveError(dns.response_codes.NXDOMAIN)
{"golden_diff": "diff --git a/mitmproxy/addons/dns_resolver.py b/mitmproxy/addons/dns_resolver.py\n--- a/mitmproxy/addons/dns_resolver.py\n+++ b/mitmproxy/addons/dns_resolver.py\n@@ -26,7 +26,9 @@\n ip: Callable[[str], ipaddress.IPv4Address | ipaddress.IPv6Address],\n ) -> Iterable[dns.ResourceRecord]:\n try:\n- addrinfos = await loop.getaddrinfo(host=question.name, port=0, family=family)\n+ addrinfos = await loop.getaddrinfo(\n+ host=question.name, port=0, family=family, type=socket.SOCK_STREAM\n+ )\n except socket.gaierror as e:\n if e.errno == socket.EAI_NONAME:\n raise ResolveError(dns.response_codes.NXDOMAIN)\n", "issue": "Duplicate answers in DNS queries\n#### Problem Description\r\n\r\nTwo duplicate records are returned for each unique A/AAAA record in a DNS query when using DNS mode.\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\n##### Without mitmproxy\r\n\r\n1. Run `dig +short google.com`\r\n2. Correct output: `142.250.193.206`\r\n\r\n##### With mitmproxy\r\n\r\n1. Start mitmproxy `mitmproxy --mode dns@53535`\r\n2. Run `dig @127.0.0.1 -p 53535 +short google.com`\r\n3. Output with duplicates:\r\n ```\r\n 142.250.193.206\r\n 142.250.193.206\r\n 142.250.193.206\r\n ```\r\n\r\n#### System Information\r\n\r\n```\r\nMitmproxy: 11.0.0.dev (+19, commit d638213)\r\nPython: 3.12.1\r\nOpenSSL: OpenSSL 3.1.4 24 Oct 2023\r\nPlatform: Linux-6.6.14-200.fc39.x86_64-x86_64-with-glibc2.38\r\n```\r\n\r\n\r\n#### Additional Notes\r\n\r\nThis is happening because the `dns_resolver` addon calls `getaddrinfo` here:\r\n\r\nhttps://github.com/mitmproxy/mitmproxy/blob/1a02ebb89f6765d827f2fe0086dfe5960eb6e093/mitmproxy/addons/dns_resolver.py#L29\r\n\r\nWhich is returning one tuple each for UDP, TCP and a raw socket.\r\n\r\nWe could just do the following since I assume all requests are currently using UDP:\r\n```python\r\naddrinfos = await loop.getaddrinfo(host=question.name, port=0, family=family, type=socket.SOCK_DGRAM)\r\n```\r\n\r\nWhat do you think?\n", "before_files": [{"content": "import asyncio\nimport ipaddress\nimport socket\nfrom collections.abc import Callable\nfrom collections.abc import Iterable\n\nfrom mitmproxy import dns\nfrom mitmproxy.proxy import mode_specs\n\nIP4_PTR_SUFFIX = \".in-addr.arpa\"\nIP6_PTR_SUFFIX = \".ip6.arpa\"\n\n\nclass ResolveError(Exception):\n \"\"\"Exception thrown by different resolve methods.\"\"\"\n\n def __init__(self, response_code: int) -> None:\n assert response_code != dns.response_codes.NOERROR\n self.response_code = response_code\n\n\nasync def resolve_question_by_name(\n question: dns.Question,\n loop: asyncio.AbstractEventLoop,\n family: socket.AddressFamily,\n ip: Callable[[str], ipaddress.IPv4Address | ipaddress.IPv6Address],\n) -> Iterable[dns.ResourceRecord]:\n try:\n addrinfos = await loop.getaddrinfo(host=question.name, port=0, family=family)\n except socket.gaierror as e:\n if e.errno == socket.EAI_NONAME:\n raise ResolveError(dns.response_codes.NXDOMAIN)\n else:\n # NOTE might fail on Windows for IPv6 queries:\n # https://stackoverflow.com/questions/66755681/getaddrinfo-c-on-windows-not-handling-ipv6-correctly-returning-error-code-1\n raise ResolveError(dns.response_codes.SERVFAIL) # pragma: no cover\n return map(\n lambda addrinfo: dns.ResourceRecord(\n name=question.name,\n type=question.type,\n class_=question.class_,\n ttl=dns.ResourceRecord.DEFAULT_TTL,\n data=ip(addrinfo[4][0]).packed,\n ),\n addrinfos,\n )\n\n\nasync def resolve_question_by_addr(\n question: dns.Question,\n loop: asyncio.AbstractEventLoop,\n suffix: str,\n sockaddr: Callable[[list[str]], tuple[str, int] | tuple[str, int, int, int]],\n) -> Iterable[dns.ResourceRecord]:\n try:\n addr = sockaddr(question.name[: -len(suffix)].split(\".\")[::-1])\n except ValueError:\n raise ResolveError(dns.response_codes.FORMERR)\n try:\n name, _ = await loop.getnameinfo(addr, flags=socket.NI_NAMEREQD)\n except socket.gaierror as e:\n raise ResolveError(\n dns.response_codes.NXDOMAIN\n if e.errno == socket.EAI_NONAME\n else dns.response_codes.SERVFAIL\n )\n return [\n dns.ResourceRecord(\n name=question.name,\n type=question.type,\n class_=question.class_,\n ttl=dns.ResourceRecord.DEFAULT_TTL,\n data=dns.domain_names.pack(name),\n )\n ]\n\n\nasync def resolve_question(\n question: dns.Question, loop: asyncio.AbstractEventLoop\n) -> Iterable[dns.ResourceRecord]:\n \"\"\"Resolve the question into resource record(s), throwing ResolveError if an error condition occurs.\"\"\"\n\n if question.class_ != dns.classes.IN:\n raise ResolveError(dns.response_codes.NOTIMP)\n if question.type == dns.types.A:\n return await resolve_question_by_name(\n question, loop, socket.AddressFamily.AF_INET, ipaddress.IPv4Address\n )\n elif question.type == dns.types.AAAA:\n return await resolve_question_by_name(\n question, loop, socket.AddressFamily.AF_INET6, ipaddress.IPv6Address\n )\n elif question.type == dns.types.PTR:\n name_lower = question.name.lower()\n if name_lower.endswith(IP4_PTR_SUFFIX):\n return await resolve_question_by_addr(\n question=question,\n loop=loop,\n suffix=IP4_PTR_SUFFIX,\n sockaddr=lambda x: (str(ipaddress.IPv4Address(\".\".join(x))), 0),\n )\n elif name_lower.endswith(IP6_PTR_SUFFIX):\n return await resolve_question_by_addr(\n question=question,\n loop=loop,\n suffix=IP6_PTR_SUFFIX,\n sockaddr=lambda x: (\n str(ipaddress.IPv6Address(bytes.fromhex(\"\".join(x)))),\n 0,\n 0,\n 0,\n ),\n )\n else:\n raise ResolveError(dns.response_codes.FORMERR)\n else:\n raise ResolveError(dns.response_codes.NOTIMP)\n\n\nasync def resolve_message(\n message: dns.Message, loop: asyncio.AbstractEventLoop\n) -> dns.Message:\n try:\n if not message.query:\n raise ResolveError(\n dns.response_codes.REFUSED\n ) # we cannot resolve an answer\n if message.op_code != dns.op_codes.QUERY:\n raise ResolveError(\n dns.response_codes.NOTIMP\n ) # inverse queries and others are not supported\n rrs: list[dns.ResourceRecord] = []\n for question in message.questions:\n rrs.extend(await resolve_question(question, loop))\n except ResolveError as e:\n return message.fail(e.response_code)\n else:\n return message.succeed(rrs)\n\n\nclass DnsResolver:\n async def dns_request(self, flow: dns.DNSFlow) -> None:\n should_resolve = (\n (\n isinstance(flow.client_conn.proxy_mode, mode_specs.DnsMode)\n or (\n isinstance(flow.client_conn.proxy_mode, mode_specs.WireGuardMode)\n and flow.server_conn.address == (\"10.0.0.53\", 53)\n )\n )\n and flow.live\n and not flow.response\n and not flow.error\n )\n if should_resolve:\n # TODO: We need to handle overly long responses here.\n flow.response = await resolve_message(\n flow.request, asyncio.get_running_loop()\n )\n", "path": "mitmproxy/addons/dns_resolver.py"}], "after_files": [{"content": "import asyncio\nimport ipaddress\nimport socket\nfrom collections.abc import Callable\nfrom collections.abc import Iterable\n\nfrom mitmproxy import dns\nfrom mitmproxy.proxy import mode_specs\n\nIP4_PTR_SUFFIX = \".in-addr.arpa\"\nIP6_PTR_SUFFIX = \".ip6.arpa\"\n\n\nclass ResolveError(Exception):\n \"\"\"Exception thrown by different resolve methods.\"\"\"\n\n def __init__(self, response_code: int) -> None:\n assert response_code != dns.response_codes.NOERROR\n self.response_code = response_code\n\n\nasync def resolve_question_by_name(\n question: dns.Question,\n loop: asyncio.AbstractEventLoop,\n family: socket.AddressFamily,\n ip: Callable[[str], ipaddress.IPv4Address | ipaddress.IPv6Address],\n) -> Iterable[dns.ResourceRecord]:\n try:\n addrinfos = await loop.getaddrinfo(\n host=question.name, port=0, family=family, type=socket.SOCK_STREAM\n )\n except socket.gaierror as e:\n if e.errno == socket.EAI_NONAME:\n raise ResolveError(dns.response_codes.NXDOMAIN)\n else:\n # NOTE might fail on Windows for IPv6 queries:\n # https://stackoverflow.com/questions/66755681/getaddrinfo-c-on-windows-not-handling-ipv6-correctly-returning-error-code-1\n raise ResolveError(dns.response_codes.SERVFAIL) # pragma: no cover\n return map(\n lambda addrinfo: dns.ResourceRecord(\n name=question.name,\n type=question.type,\n class_=question.class_,\n ttl=dns.ResourceRecord.DEFAULT_TTL,\n data=ip(addrinfo[4][0]).packed,\n ),\n addrinfos,\n )\n\n\nasync def resolve_question_by_addr(\n question: dns.Question,\n loop: asyncio.AbstractEventLoop,\n suffix: str,\n sockaddr: Callable[[list[str]], tuple[str, int] | tuple[str, int, int, int]],\n) -> Iterable[dns.ResourceRecord]:\n try:\n addr = sockaddr(question.name[: -len(suffix)].split(\".\")[::-1])\n except ValueError:\n raise ResolveError(dns.response_codes.FORMERR)\n try:\n name, _ = await loop.getnameinfo(addr, flags=socket.NI_NAMEREQD)\n except socket.gaierror as e:\n raise ResolveError(\n dns.response_codes.NXDOMAIN\n if e.errno == socket.EAI_NONAME\n else dns.response_codes.SERVFAIL\n )\n return [\n dns.ResourceRecord(\n name=question.name,\n type=question.type,\n class_=question.class_,\n ttl=dns.ResourceRecord.DEFAULT_TTL,\n data=dns.domain_names.pack(name),\n )\n ]\n\n\nasync def resolve_question(\n question: dns.Question, loop: asyncio.AbstractEventLoop\n) -> Iterable[dns.ResourceRecord]:\n \"\"\"Resolve the question into resource record(s), throwing ResolveError if an error condition occurs.\"\"\"\n\n if question.class_ != dns.classes.IN:\n raise ResolveError(dns.response_codes.NOTIMP)\n if question.type == dns.types.A:\n return await resolve_question_by_name(\n question, loop, socket.AddressFamily.AF_INET, ipaddress.IPv4Address\n )\n elif question.type == dns.types.AAAA:\n return await resolve_question_by_name(\n question, loop, socket.AddressFamily.AF_INET6, ipaddress.IPv6Address\n )\n elif question.type == dns.types.PTR:\n name_lower = question.name.lower()\n if name_lower.endswith(IP4_PTR_SUFFIX):\n return await resolve_question_by_addr(\n question=question,\n loop=loop,\n suffix=IP4_PTR_SUFFIX,\n sockaddr=lambda x: (str(ipaddress.IPv4Address(\".\".join(x))), 0),\n )\n elif name_lower.endswith(IP6_PTR_SUFFIX):\n return await resolve_question_by_addr(\n question=question,\n loop=loop,\n suffix=IP6_PTR_SUFFIX,\n sockaddr=lambda x: (\n str(ipaddress.IPv6Address(bytes.fromhex(\"\".join(x)))),\n 0,\n 0,\n 0,\n ),\n )\n else:\n raise ResolveError(dns.response_codes.FORMERR)\n else:\n raise ResolveError(dns.response_codes.NOTIMP)\n\n\nasync def resolve_message(\n message: dns.Message, loop: asyncio.AbstractEventLoop\n) -> dns.Message:\n try:\n if not message.query:\n raise ResolveError(\n dns.response_codes.REFUSED\n ) # we cannot resolve an answer\n if message.op_code != dns.op_codes.QUERY:\n raise ResolveError(\n dns.response_codes.NOTIMP\n ) # inverse queries and others are not supported\n rrs: list[dns.ResourceRecord] = []\n for question in message.questions:\n rrs.extend(await resolve_question(question, loop))\n except ResolveError as e:\n return message.fail(e.response_code)\n else:\n return message.succeed(rrs)\n\n\nclass DnsResolver:\n async def dns_request(self, flow: dns.DNSFlow) -> None:\n should_resolve = (\n (\n isinstance(flow.client_conn.proxy_mode, mode_specs.DnsMode)\n or (\n isinstance(flow.client_conn.proxy_mode, mode_specs.WireGuardMode)\n and flow.server_conn.address == (\"10.0.0.53\", 53)\n )\n )\n and flow.live\n and not flow.response\n and not flow.error\n )\n if should_resolve:\n # TODO: We need to handle overly long responses here.\n flow.response = await resolve_message(\n flow.request, asyncio.get_running_loop()\n )\n", "path": "mitmproxy/addons/dns_resolver.py"}]}
2,288
183
gh_patches_debug_32391
rasdani/github-patches
git_diff
DataDog__dd-agent-2189
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Mesos master port in mesos_slave.py is hardcoded https://github.com/DataDog/dd-agent/blob/master/checks.d/mesos_slave.py#L132 Effecting mesos_slave configuration doesn't work if the master port is other than 5050. Probably should be added in configuration. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checks.d/mesos_slave.py` Content: ``` 1 """Mesos Slave check 2 3 Collects metrics from mesos slave node. 4 """ 5 # 3rd party 6 import requests 7 8 # project 9 from checks import AgentCheck, CheckException 10 11 12 class MesosSlave(AgentCheck): 13 GAUGE = AgentCheck.gauge 14 MONOTONIC_COUNT = AgentCheck.monotonic_count 15 SERVICE_CHECK_NAME = "mesos_slave.can_connect" 16 service_check_needed = True 17 18 TASK_STATUS = { 19 'TASK_STARTING' : AgentCheck.OK, 20 'TASK_RUNNING' : AgentCheck.OK, 21 'TASK_FINISHED' : AgentCheck.OK, 22 'TASK_FAILED' : AgentCheck.CRITICAL, 23 'TASK_KILLED' : AgentCheck.WARNING, 24 'TASK_LOST' : AgentCheck.CRITICAL, 25 'TASK_STAGING' : AgentCheck.OK, 26 'TASK_ERROR' : AgentCheck.CRITICAL, 27 } 28 29 TASK_METRICS = { 30 'cpus' : ('mesos.state.task.cpu', GAUGE), 31 'mem' : ('mesos.state.task.mem', GAUGE), 32 'disk' : ('mesos.state.task.disk', GAUGE), 33 } 34 35 SLAVE_TASKS_METRICS = { 36 'slave/tasks_failed' : ('mesos.slave.tasks_failed', MONOTONIC_COUNT), 37 'slave/tasks_finished' : ('mesos.slave.tasks_finished', MONOTONIC_COUNT), 38 'slave/tasks_killed' : ('mesos.slave.tasks_killed', MONOTONIC_COUNT), 39 'slave/tasks_lost' : ('mesos.slave.tasks_lost', MONOTONIC_COUNT), 40 'slave/tasks_running' : ('mesos.slave.tasks_running', GAUGE), 41 'slave/tasks_staging' : ('mesos.slave.tasks_staging', GAUGE), 42 'slave/tasks_starting' : ('mesos.slave.tasks_starting', GAUGE), 43 } 44 45 SYSTEM_METRICS = { 46 'system/cpus_total' : ('mesos.stats.system.cpus_total', GAUGE), 47 'system/load_15min' : ('mesos.stats.system.load_15min', GAUGE), 48 'system/load_1min' : ('mesos.stats.system.load_1min', GAUGE), 49 'system/load_5min' : ('mesos.stats.system.load_5min', GAUGE), 50 'system/mem_free_bytes' : ('mesos.stats.system.mem_free_bytes', GAUGE), 51 'system/mem_total_bytes' : ('mesos.stats.system.mem_total_bytes', GAUGE), 52 'slave/registered' : ('mesos.stats.registered', GAUGE), 53 'slave/uptime_secs' : ('mesos.stats.uptime_secs', GAUGE), 54 } 55 56 SLAVE_RESOURCE_METRICS = { 57 'slave/cpus_percent' : ('mesos.slave.cpus_percent', GAUGE), 58 'slave/cpus_total' : ('mesos.slave.cpus_total', GAUGE), 59 'slave/cpus_used' : ('mesos.slave.cpus_used', GAUGE), 60 'slave/disk_percent' : ('mesos.slave.disk_percent', GAUGE), 61 'slave/disk_total' : ('mesos.slave.disk_total', GAUGE), 62 'slave/disk_used' : ('mesos.slave.disk_used', GAUGE), 63 'slave/mem_percent' : ('mesos.slave.mem_percent', GAUGE), 64 'slave/mem_total' : ('mesos.slave.mem_total', GAUGE), 65 'slave/mem_used' : ('mesos.slave.mem_used', GAUGE), 66 } 67 68 SLAVE_EXECUTORS_METRICS = { 69 'slave/executors_registering' : ('mesos.slave.executors_registering', GAUGE), 70 'slave/executors_running' : ('mesos.slave.executors_running', GAUGE), 71 'slave/executors_terminated' : ('mesos.slave.executors_terminated', GAUGE), 72 'slave/executors_terminating' : ('mesos.slave.executors_terminating', GAUGE), 73 } 74 75 STATS_METRICS = { 76 'slave/frameworks_active' : ('mesos.slave.frameworks_active', GAUGE), 77 'slave/invalid_framework_messages' : ('mesos.slave.invalid_framework_messages', GAUGE), 78 'slave/invalid_status_updates' : ('mesos.slave.invalid_status_updates', GAUGE), 79 'slave/recovery_errors' : ('mesos.slave.recovery_errors', GAUGE), 80 'slave/valid_framework_messages' : ('mesos.slave.valid_framework_messages', GAUGE), 81 'slave/valid_status_updates' : ('mesos.slave.valid_status_updates', GAUGE), 82 } 83 84 def __init__(self, name, init_config, agentConfig, instances=None): 85 AgentCheck.__init__(self, name, init_config, agentConfig, instances) 86 self.cluster_name = None 87 88 def _get_json(self, url, timeout): 89 tags = ["url:%s" % url] 90 msg = None 91 status = None 92 try: 93 r = requests.get(url, timeout=timeout) 94 if r.status_code != 200: 95 status = AgentCheck.CRITICAL 96 msg = "Got %s when hitting %s" % (r.status_code, url) 97 else: 98 status = AgentCheck.OK 99 msg = "Mesos master instance detected at %s " % url 100 except requests.exceptions.Timeout as e: 101 # If there's a timeout 102 msg = "%s seconds timeout when hitting %s" % (timeout, url) 103 status = AgentCheck.CRITICAL 104 except Exception as e: 105 msg = str(e) 106 status = AgentCheck.CRITICAL 107 finally: 108 if self.service_check_needed: 109 self.service_check(self.SERVICE_CHECK_NAME, status, tags=tags, message=msg) 110 self.service_check_needed = False 111 if status is AgentCheck.CRITICAL: 112 raise CheckException("Cannot connect to mesos, please check your configuration.") 113 114 return r.json() 115 116 def _get_state(self, url, timeout): 117 return self._get_json(url + '/state.json', timeout) 118 119 def _get_stats(self, url, timeout): 120 if self.version >= [0, 22, 0]: 121 endpoint = '/metrics/snapshot' 122 else: 123 endpoint = '/stats.json' 124 return self._get_json(url + endpoint, timeout) 125 126 def _get_constant_attributes(self, url, timeout): 127 state_metrics = None 128 if self.cluster_name is None: 129 state_metrics = self._get_state(url, timeout) 130 if state_metrics is not None: 131 self.version = map(int, state_metrics['version'].split('.')) 132 master_state = self._get_state('http://' + state_metrics['master_hostname'] + ':5050', timeout) 133 if master_state is not None: 134 self.cluster_name = master_state.get('cluster') 135 136 return state_metrics 137 138 def check(self, instance): 139 if 'url' not in instance: 140 raise Exception('Mesos instance missing "url" value.') 141 142 url = instance['url'] 143 instance_tags = instance.get('tags', []) 144 tasks = instance.get('tasks', []) 145 default_timeout = self.init_config.get('default_timeout', 5) 146 timeout = float(instance.get('timeout', default_timeout)) 147 148 state_metrics = self._get_constant_attributes(url, timeout) 149 tags = None 150 151 if state_metrics is None: 152 state_metrics = self._get_state(url, timeout) 153 if state_metrics: 154 tags = [ 155 'mesos_pid:{0}'.format(state_metrics['pid']), 156 'mesos_node:slave', 157 ] 158 if self.cluster_name: 159 tags.append('mesos_cluster:{0}'.format(self.cluster_name)) 160 161 tags += instance_tags 162 163 for task in tasks: 164 for framework in state_metrics['frameworks']: 165 for executor in framework['executors']: 166 for t in executor['tasks']: 167 if task.lower() in t['name'].lower() and t['slave_id'] == state_metrics['id']: 168 task_tags = ['task_name:' + t['name']] + tags 169 self.service_check(t['name'] + '.ok', self.TASK_STATUS[t['state']], tags=task_tags) 170 for key_name, (metric_name, metric_func) in self.TASK_METRICS.iteritems(): 171 metric_func(self, metric_name, t['resources'][key_name], tags=task_tags) 172 173 stats_metrics = self._get_stats(url, timeout) 174 if stats_metrics: 175 tags = tags if tags else instance_tags 176 metrics = [self.SLAVE_TASKS_METRICS, self.SYSTEM_METRICS, self.SLAVE_RESOURCE_METRICS, 177 self.SLAVE_EXECUTORS_METRICS, self.STATS_METRICS] 178 for m in metrics: 179 for key_name, (metric_name, metric_func) in m.iteritems(): 180 metric_func(self, metric_name, stats_metrics[key_name], tags=tags) 181 182 self.service_check_needed = True 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checks.d/mesos_slave.py b/checks.d/mesos_slave.py --- a/checks.d/mesos_slave.py +++ b/checks.d/mesos_slave.py @@ -8,6 +8,7 @@ # project from checks import AgentCheck, CheckException +DEFAULT_MASTER_PORT = 5050 class MesosSlave(AgentCheck): GAUGE = AgentCheck.gauge @@ -123,13 +124,16 @@ endpoint = '/stats.json' return self._get_json(url + endpoint, timeout) - def _get_constant_attributes(self, url, timeout): + def _get_constant_attributes(self, url, timeout, master_port): state_metrics = None if self.cluster_name is None: state_metrics = self._get_state(url, timeout) if state_metrics is not None: self.version = map(int, state_metrics['version'].split('.')) - master_state = self._get_state('http://' + state_metrics['master_hostname'] + ':5050', timeout) + master_state = self._get_state( + 'http://{0}:{1}'.format(state_metrics['master_hostname'], master_port), + timeout + ) if master_state is not None: self.cluster_name = master_state.get('cluster') @@ -144,8 +148,9 @@ tasks = instance.get('tasks', []) default_timeout = self.init_config.get('default_timeout', 5) timeout = float(instance.get('timeout', default_timeout)) + master_port = instance.get("master_port", DEFAULT_MASTER_PORT) - state_metrics = self._get_constant_attributes(url, timeout) + state_metrics = self._get_constant_attributes(url, timeout, master_port) tags = None if state_metrics is None:
{"golden_diff": "diff --git a/checks.d/mesos_slave.py b/checks.d/mesos_slave.py\n--- a/checks.d/mesos_slave.py\n+++ b/checks.d/mesos_slave.py\n@@ -8,6 +8,7 @@\n # project\n from checks import AgentCheck, CheckException\n \n+DEFAULT_MASTER_PORT = 5050\n \n class MesosSlave(AgentCheck):\n GAUGE = AgentCheck.gauge\n@@ -123,13 +124,16 @@\n endpoint = '/stats.json'\n return self._get_json(url + endpoint, timeout)\n \n- def _get_constant_attributes(self, url, timeout):\n+ def _get_constant_attributes(self, url, timeout, master_port):\n state_metrics = None\n if self.cluster_name is None:\n state_metrics = self._get_state(url, timeout)\n if state_metrics is not None:\n self.version = map(int, state_metrics['version'].split('.'))\n- master_state = self._get_state('http://' + state_metrics['master_hostname'] + ':5050', timeout)\n+ master_state = self._get_state(\n+ 'http://{0}:{1}'.format(state_metrics['master_hostname'], master_port),\n+ timeout\n+ )\n if master_state is not None:\n self.cluster_name = master_state.get('cluster')\n \n@@ -144,8 +148,9 @@\n tasks = instance.get('tasks', [])\n default_timeout = self.init_config.get('default_timeout', 5)\n timeout = float(instance.get('timeout', default_timeout))\n+ master_port = instance.get(\"master_port\", DEFAULT_MASTER_PORT)\n \n- state_metrics = self._get_constant_attributes(url, timeout)\n+ state_metrics = self._get_constant_attributes(url, timeout, master_port)\n tags = None\n \n if state_metrics is None:\n", "issue": "Mesos master port in mesos_slave.py is hardcoded\nhttps://github.com/DataDog/dd-agent/blob/master/checks.d/mesos_slave.py#L132\n\nEffecting mesos_slave configuration doesn't work if the master port is other than 5050.\nProbably should be added in configuration.\n\n", "before_files": [{"content": "\"\"\"Mesos Slave check\n\nCollects metrics from mesos slave node.\n\"\"\"\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck, CheckException\n\n\nclass MesosSlave(AgentCheck):\n GAUGE = AgentCheck.gauge\n MONOTONIC_COUNT = AgentCheck.monotonic_count\n SERVICE_CHECK_NAME = \"mesos_slave.can_connect\"\n service_check_needed = True\n\n TASK_STATUS = {\n 'TASK_STARTING' : AgentCheck.OK,\n 'TASK_RUNNING' : AgentCheck.OK,\n 'TASK_FINISHED' : AgentCheck.OK,\n 'TASK_FAILED' : AgentCheck.CRITICAL,\n 'TASK_KILLED' : AgentCheck.WARNING,\n 'TASK_LOST' : AgentCheck.CRITICAL,\n 'TASK_STAGING' : AgentCheck.OK,\n 'TASK_ERROR' : AgentCheck.CRITICAL,\n }\n\n TASK_METRICS = {\n 'cpus' : ('mesos.state.task.cpu', GAUGE),\n 'mem' : ('mesos.state.task.mem', GAUGE),\n 'disk' : ('mesos.state.task.disk', GAUGE),\n }\n\n SLAVE_TASKS_METRICS = {\n 'slave/tasks_failed' : ('mesos.slave.tasks_failed', MONOTONIC_COUNT),\n 'slave/tasks_finished' : ('mesos.slave.tasks_finished', MONOTONIC_COUNT),\n 'slave/tasks_killed' : ('mesos.slave.tasks_killed', MONOTONIC_COUNT),\n 'slave/tasks_lost' : ('mesos.slave.tasks_lost', MONOTONIC_COUNT),\n 'slave/tasks_running' : ('mesos.slave.tasks_running', GAUGE),\n 'slave/tasks_staging' : ('mesos.slave.tasks_staging', GAUGE),\n 'slave/tasks_starting' : ('mesos.slave.tasks_starting', GAUGE),\n }\n\n SYSTEM_METRICS = {\n 'system/cpus_total' : ('mesos.stats.system.cpus_total', GAUGE),\n 'system/load_15min' : ('mesos.stats.system.load_15min', GAUGE),\n 'system/load_1min' : ('mesos.stats.system.load_1min', GAUGE),\n 'system/load_5min' : ('mesos.stats.system.load_5min', GAUGE),\n 'system/mem_free_bytes' : ('mesos.stats.system.mem_free_bytes', GAUGE),\n 'system/mem_total_bytes' : ('mesos.stats.system.mem_total_bytes', GAUGE),\n 'slave/registered' : ('mesos.stats.registered', GAUGE),\n 'slave/uptime_secs' : ('mesos.stats.uptime_secs', GAUGE),\n }\n\n SLAVE_RESOURCE_METRICS = {\n 'slave/cpus_percent' : ('mesos.slave.cpus_percent', GAUGE),\n 'slave/cpus_total' : ('mesos.slave.cpus_total', GAUGE),\n 'slave/cpus_used' : ('mesos.slave.cpus_used', GAUGE),\n 'slave/disk_percent' : ('mesos.slave.disk_percent', GAUGE),\n 'slave/disk_total' : ('mesos.slave.disk_total', GAUGE),\n 'slave/disk_used' : ('mesos.slave.disk_used', GAUGE),\n 'slave/mem_percent' : ('mesos.slave.mem_percent', GAUGE),\n 'slave/mem_total' : ('mesos.slave.mem_total', GAUGE),\n 'slave/mem_used' : ('mesos.slave.mem_used', GAUGE),\n }\n\n SLAVE_EXECUTORS_METRICS = {\n 'slave/executors_registering' : ('mesos.slave.executors_registering', GAUGE),\n 'slave/executors_running' : ('mesos.slave.executors_running', GAUGE),\n 'slave/executors_terminated' : ('mesos.slave.executors_terminated', GAUGE),\n 'slave/executors_terminating' : ('mesos.slave.executors_terminating', GAUGE),\n }\n\n STATS_METRICS = {\n 'slave/frameworks_active' : ('mesos.slave.frameworks_active', GAUGE),\n 'slave/invalid_framework_messages' : ('mesos.slave.invalid_framework_messages', GAUGE),\n 'slave/invalid_status_updates' : ('mesos.slave.invalid_status_updates', GAUGE),\n 'slave/recovery_errors' : ('mesos.slave.recovery_errors', GAUGE),\n 'slave/valid_framework_messages' : ('mesos.slave.valid_framework_messages', GAUGE),\n 'slave/valid_status_updates' : ('mesos.slave.valid_status_updates', GAUGE),\n }\n\n def __init__(self, name, init_config, agentConfig, instances=None):\n AgentCheck.__init__(self, name, init_config, agentConfig, instances)\n self.cluster_name = None\n\n def _get_json(self, url, timeout):\n tags = [\"url:%s\" % url]\n msg = None\n status = None\n try:\n r = requests.get(url, timeout=timeout)\n if r.status_code != 200:\n status = AgentCheck.CRITICAL\n msg = \"Got %s when hitting %s\" % (r.status_code, url)\n else:\n status = AgentCheck.OK\n msg = \"Mesos master instance detected at %s \" % url\n except requests.exceptions.Timeout as e:\n # If there's a timeout\n msg = \"%s seconds timeout when hitting %s\" % (timeout, url)\n status = AgentCheck.CRITICAL\n except Exception as e:\n msg = str(e)\n status = AgentCheck.CRITICAL\n finally:\n if self.service_check_needed:\n self.service_check(self.SERVICE_CHECK_NAME, status, tags=tags, message=msg)\n self.service_check_needed = False\n if status is AgentCheck.CRITICAL:\n raise CheckException(\"Cannot connect to mesos, please check your configuration.\")\n\n return r.json()\n\n def _get_state(self, url, timeout):\n return self._get_json(url + '/state.json', timeout)\n\n def _get_stats(self, url, timeout):\n if self.version >= [0, 22, 0]:\n endpoint = '/metrics/snapshot'\n else:\n endpoint = '/stats.json'\n return self._get_json(url + endpoint, timeout)\n\n def _get_constant_attributes(self, url, timeout):\n state_metrics = None\n if self.cluster_name is None:\n state_metrics = self._get_state(url, timeout)\n if state_metrics is not None:\n self.version = map(int, state_metrics['version'].split('.'))\n master_state = self._get_state('http://' + state_metrics['master_hostname'] + ':5050', timeout)\n if master_state is not None:\n self.cluster_name = master_state.get('cluster')\n\n return state_metrics\n\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('Mesos instance missing \"url\" value.')\n\n url = instance['url']\n instance_tags = instance.get('tags', [])\n tasks = instance.get('tasks', [])\n default_timeout = self.init_config.get('default_timeout', 5)\n timeout = float(instance.get('timeout', default_timeout))\n\n state_metrics = self._get_constant_attributes(url, timeout)\n tags = None\n\n if state_metrics is None:\n state_metrics = self._get_state(url, timeout)\n if state_metrics:\n tags = [\n 'mesos_pid:{0}'.format(state_metrics['pid']),\n 'mesos_node:slave',\n ]\n if self.cluster_name:\n tags.append('mesos_cluster:{0}'.format(self.cluster_name))\n\n tags += instance_tags\n\n for task in tasks:\n for framework in state_metrics['frameworks']:\n for executor in framework['executors']:\n for t in executor['tasks']:\n if task.lower() in t['name'].lower() and t['slave_id'] == state_metrics['id']:\n task_tags = ['task_name:' + t['name']] + tags\n self.service_check(t['name'] + '.ok', self.TASK_STATUS[t['state']], tags=task_tags)\n for key_name, (metric_name, metric_func) in self.TASK_METRICS.iteritems():\n metric_func(self, metric_name, t['resources'][key_name], tags=task_tags)\n\n stats_metrics = self._get_stats(url, timeout)\n if stats_metrics:\n tags = tags if tags else instance_tags\n metrics = [self.SLAVE_TASKS_METRICS, self.SYSTEM_METRICS, self.SLAVE_RESOURCE_METRICS,\n self.SLAVE_EXECUTORS_METRICS, self.STATS_METRICS]\n for m in metrics:\n for key_name, (metric_name, metric_func) in m.iteritems():\n metric_func(self, metric_name, stats_metrics[key_name], tags=tags)\n\n self.service_check_needed = True\n", "path": "checks.d/mesos_slave.py"}], "after_files": [{"content": "\"\"\"Mesos Slave check\n\nCollects metrics from mesos slave node.\n\"\"\"\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck, CheckException\n\nDEFAULT_MASTER_PORT = 5050\n\nclass MesosSlave(AgentCheck):\n GAUGE = AgentCheck.gauge\n MONOTONIC_COUNT = AgentCheck.monotonic_count\n SERVICE_CHECK_NAME = \"mesos_slave.can_connect\"\n service_check_needed = True\n\n TASK_STATUS = {\n 'TASK_STARTING' : AgentCheck.OK,\n 'TASK_RUNNING' : AgentCheck.OK,\n 'TASK_FINISHED' : AgentCheck.OK,\n 'TASK_FAILED' : AgentCheck.CRITICAL,\n 'TASK_KILLED' : AgentCheck.WARNING,\n 'TASK_LOST' : AgentCheck.CRITICAL,\n 'TASK_STAGING' : AgentCheck.OK,\n 'TASK_ERROR' : AgentCheck.CRITICAL,\n }\n\n TASK_METRICS = {\n 'cpus' : ('mesos.state.task.cpu', GAUGE),\n 'mem' : ('mesos.state.task.mem', GAUGE),\n 'disk' : ('mesos.state.task.disk', GAUGE),\n }\n\n SLAVE_TASKS_METRICS = {\n 'slave/tasks_failed' : ('mesos.slave.tasks_failed', MONOTONIC_COUNT),\n 'slave/tasks_finished' : ('mesos.slave.tasks_finished', MONOTONIC_COUNT),\n 'slave/tasks_killed' : ('mesos.slave.tasks_killed', MONOTONIC_COUNT),\n 'slave/tasks_lost' : ('mesos.slave.tasks_lost', MONOTONIC_COUNT),\n 'slave/tasks_running' : ('mesos.slave.tasks_running', GAUGE),\n 'slave/tasks_staging' : ('mesos.slave.tasks_staging', GAUGE),\n 'slave/tasks_starting' : ('mesos.slave.tasks_starting', GAUGE),\n }\n\n SYSTEM_METRICS = {\n 'system/cpus_total' : ('mesos.stats.system.cpus_total', GAUGE),\n 'system/load_15min' : ('mesos.stats.system.load_15min', GAUGE),\n 'system/load_1min' : ('mesos.stats.system.load_1min', GAUGE),\n 'system/load_5min' : ('mesos.stats.system.load_5min', GAUGE),\n 'system/mem_free_bytes' : ('mesos.stats.system.mem_free_bytes', GAUGE),\n 'system/mem_total_bytes' : ('mesos.stats.system.mem_total_bytes', GAUGE),\n 'slave/registered' : ('mesos.stats.registered', GAUGE),\n 'slave/uptime_secs' : ('mesos.stats.uptime_secs', GAUGE),\n }\n\n SLAVE_RESOURCE_METRICS = {\n 'slave/cpus_percent' : ('mesos.slave.cpus_percent', GAUGE),\n 'slave/cpus_total' : ('mesos.slave.cpus_total', GAUGE),\n 'slave/cpus_used' : ('mesos.slave.cpus_used', GAUGE),\n 'slave/disk_percent' : ('mesos.slave.disk_percent', GAUGE),\n 'slave/disk_total' : ('mesos.slave.disk_total', GAUGE),\n 'slave/disk_used' : ('mesos.slave.disk_used', GAUGE),\n 'slave/mem_percent' : ('mesos.slave.mem_percent', GAUGE),\n 'slave/mem_total' : ('mesos.slave.mem_total', GAUGE),\n 'slave/mem_used' : ('mesos.slave.mem_used', GAUGE),\n }\n\n SLAVE_EXECUTORS_METRICS = {\n 'slave/executors_registering' : ('mesos.slave.executors_registering', GAUGE),\n 'slave/executors_running' : ('mesos.slave.executors_running', GAUGE),\n 'slave/executors_terminated' : ('mesos.slave.executors_terminated', GAUGE),\n 'slave/executors_terminating' : ('mesos.slave.executors_terminating', GAUGE),\n }\n\n STATS_METRICS = {\n 'slave/frameworks_active' : ('mesos.slave.frameworks_active', GAUGE),\n 'slave/invalid_framework_messages' : ('mesos.slave.invalid_framework_messages', GAUGE),\n 'slave/invalid_status_updates' : ('mesos.slave.invalid_status_updates', GAUGE),\n 'slave/recovery_errors' : ('mesos.slave.recovery_errors', GAUGE),\n 'slave/valid_framework_messages' : ('mesos.slave.valid_framework_messages', GAUGE),\n 'slave/valid_status_updates' : ('mesos.slave.valid_status_updates', GAUGE),\n }\n\n def __init__(self, name, init_config, agentConfig, instances=None):\n AgentCheck.__init__(self, name, init_config, agentConfig, instances)\n self.cluster_name = None\n\n def _get_json(self, url, timeout):\n tags = [\"url:%s\" % url]\n msg = None\n status = None\n try:\n r = requests.get(url, timeout=timeout)\n if r.status_code != 200:\n status = AgentCheck.CRITICAL\n msg = \"Got %s when hitting %s\" % (r.status_code, url)\n else:\n status = AgentCheck.OK\n msg = \"Mesos master instance detected at %s \" % url\n except requests.exceptions.Timeout as e:\n # If there's a timeout\n msg = \"%s seconds timeout when hitting %s\" % (timeout, url)\n status = AgentCheck.CRITICAL\n except Exception as e:\n msg = str(e)\n status = AgentCheck.CRITICAL\n finally:\n if self.service_check_needed:\n self.service_check(self.SERVICE_CHECK_NAME, status, tags=tags, message=msg)\n self.service_check_needed = False\n if status is AgentCheck.CRITICAL:\n raise CheckException(\"Cannot connect to mesos, please check your configuration.\")\n\n return r.json()\n\n def _get_state(self, url, timeout):\n return self._get_json(url + '/state.json', timeout)\n\n def _get_stats(self, url, timeout):\n if self.version >= [0, 22, 0]:\n endpoint = '/metrics/snapshot'\n else:\n endpoint = '/stats.json'\n return self._get_json(url + endpoint, timeout)\n\n def _get_constant_attributes(self, url, timeout, master_port):\n state_metrics = None\n if self.cluster_name is None:\n state_metrics = self._get_state(url, timeout)\n if state_metrics is not None:\n self.version = map(int, state_metrics['version'].split('.'))\n master_state = self._get_state(\n 'http://{0}:{1}'.format(state_metrics['master_hostname'], master_port),\n timeout\n )\n if master_state is not None:\n self.cluster_name = master_state.get('cluster')\n\n return state_metrics\n\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('Mesos instance missing \"url\" value.')\n\n url = instance['url']\n instance_tags = instance.get('tags', [])\n tasks = instance.get('tasks', [])\n default_timeout = self.init_config.get('default_timeout', 5)\n timeout = float(instance.get('timeout', default_timeout))\n master_port = instance.get(\"master_port\", DEFAULT_MASTER_PORT)\n\n state_metrics = self._get_constant_attributes(url, timeout, master_port)\n tags = None\n\n if state_metrics is None:\n state_metrics = self._get_state(url, timeout)\n if state_metrics:\n tags = [\n 'mesos_pid:{0}'.format(state_metrics['pid']),\n 'mesos_node:slave',\n ]\n if self.cluster_name:\n tags.append('mesos_cluster:{0}'.format(self.cluster_name))\n\n tags += instance_tags\n\n for task in tasks:\n for framework in state_metrics['frameworks']:\n for executor in framework['executors']:\n for t in executor['tasks']:\n if task.lower() in t['name'].lower() and t['slave_id'] == state_metrics['id']:\n task_tags = ['task_name:' + t['name']] + tags\n self.service_check(t['name'] + '.ok', self.TASK_STATUS[t['state']], tags=task_tags)\n for key_name, (metric_name, metric_func) in self.TASK_METRICS.iteritems():\n metric_func(self, metric_name, t['resources'][key_name], tags=task_tags)\n\n stats_metrics = self._get_stats(url, timeout)\n if stats_metrics:\n tags = tags if tags else instance_tags\n metrics = [self.SLAVE_TASKS_METRICS, self.SYSTEM_METRICS, self.SLAVE_RESOURCE_METRICS,\n self.SLAVE_EXECUTORS_METRICS, self.STATS_METRICS]\n for m in metrics:\n for key_name, (metric_name, metric_func) in m.iteritems():\n metric_func(self, metric_name, stats_metrics[key_name], tags=tags)\n\n self.service_check_needed = True\n", "path": "checks.d/mesos_slave.py"}]}
2,729
401
gh_patches_debug_21069
rasdani/github-patches
git_diff
fossasia__open-event-server-9044
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot save the badge field ``` HINT: You will need to rewrite or cast the expression. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/data/app/app/api/helpers/db.py", line 27, in save_to_db db.session.commit() File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/scoping.py", line 163, in do return getattr(self.registry(), name)(*args, **kwargs) File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 1046, in commit self.transaction.commit() File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 504, in commit self._prepare_impl() File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 483, in _prepare_impl self.session.flush() File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2540, in flush self._flush(objects) File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2682, in _flush transaction.rollback(_capture_exception=True) File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__ compat.raise_( File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 182, in raise_ raise exception File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2642, in _flush flush_context.execute() File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute rec.execute(self) File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/unitofwork.py", line 586, in execute persistence.save_obj( File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/persistence.py", line 239, in save_obj _emit_insert_statements( File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/persistence.py", line 1135, in _emit_insert_statements result = cached_connections[connection].execute( File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1011, in execute return meth(self, multiparams, params) File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection return connection._execute_clauseelement(self, multiparams, params) File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1124, in _execute_clauseelement ret = self._execute_context( File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1316, in _execute_context self._handle_dbapi_exception( File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1510, in _handle_dbapi_exception util.raise_( File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 182, in raise_ raise exception File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1276, in _execute_context self.dialect.do_execute( File "/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 608, in do_execute cursor.execute(statement, parameters) sqlalchemy.exc.ProgrammingError: (psycopg2.errors.DatatypeMismatch) column "font_weight" is of type integer but expression is of type json[] LINE 1: ...e', 'Last Name', 'Sample Text', 14, 'Arial', CAST(ARRAY['{"n... ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py` Content: ``` 1 """empty message 2 3 Revision ID: 8b5bc48e1d4c 4 Revises: 21c79d253f21 5 Create Date: 2023-08-01 14:10:12.187180 6 7 """ 8 9 from alembic import op 10 import sqlalchemy as sa 11 from sqlalchemy.dialects import postgresql 12 13 # revision identifiers, used by Alembic. 14 revision = '8b5bc48e1d4c' 15 down_revision = '21c79d253f21' 16 17 18 def upgrade(): 19 # ### commands auto generated by Alembic - please adjust! ### 20 op.alter_column('badge_field_forms', 'font_weight', 21 existing_type=sa.TEXT(), 22 type_=postgresql.ARRAY(sa.JSON()), 23 postgresql_using='font_weight::json[]', 24 existing_nullable=True) 25 # ### end Alembic commands ### 26 27 28 def downgrade(): 29 # ### commands auto generated by Alembic - please adjust! ### 30 op.alter_column('badge_field_forms', 'font_weight', 31 existing_type=postgresql.ARRAY(sa.JSON()), 32 type_=sa.TEXT(), 33 existing_nullable=True) 34 # ### end Alembic commands ### 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py b/migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py --- a/migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py +++ b/migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py @@ -17,18 +17,15 @@ def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.alter_column('badge_field_forms', 'font_weight', - existing_type=sa.TEXT(), - type_=postgresql.ARRAY(sa.JSON()), - postgresql_using='font_weight::json[]', - existing_nullable=True) + op.drop_column('badge_field_forms', 'font_weight') + op.add_column('badge_field_forms', sa.Column('font_weight', + postgresql.ARRAY(sa.JSON()), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.alter_column('badge_field_forms', 'font_weight', - existing_type=postgresql.ARRAY(sa.JSON()), - type_=sa.TEXT(), - existing_nullable=True) + op.drop_column('badge_field_forms', 'font_weight') + op.add_column('badge_field_forms', sa.Column('font_weight', + sa.Integer(), nullable=True)) # ### end Alembic commands ###
{"golden_diff": "diff --git a/migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py b/migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py\n--- a/migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py\n+++ b/migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py\n@@ -17,18 +17,15 @@\n \n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n- op.alter_column('badge_field_forms', 'font_weight',\n- existing_type=sa.TEXT(),\n- type_=postgresql.ARRAY(sa.JSON()),\n- postgresql_using='font_weight::json[]',\n- existing_nullable=True)\n+ op.drop_column('badge_field_forms', 'font_weight')\n+ op.add_column('badge_field_forms', sa.Column('font_weight',\n+ postgresql.ARRAY(sa.JSON()), nullable=True))\n # ### end Alembic commands ###\n \n \n def downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n- op.alter_column('badge_field_forms', 'font_weight',\n- existing_type=postgresql.ARRAY(sa.JSON()),\n- type_=sa.TEXT(),\n- existing_nullable=True)\n+ op.drop_column('badge_field_forms', 'font_weight')\n+ op.add_column('badge_field_forms', sa.Column('font_weight',\n+ sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n", "issue": "Cannot save the badge field\n```\r\nHINT: You will need to rewrite or cast the expression.\r\n\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/data/app/app/api/helpers/db.py\", line 27, in save_to_db\r\n db.session.commit()\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/scoping.py\", line 163, in do\r\n return getattr(self.registry(), name)(*args, **kwargs)\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py\", line 1046, in commit\r\n self.transaction.commit()\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py\", line 504, in commit\r\n self._prepare_impl()\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py\", line 483, in _prepare_impl\r\n self.session.flush()\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py\", line 2540, in flush\r\n self._flush(objects)\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py\", line 2682, in _flush\r\n transaction.rollback(_capture_exception=True)\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py\", line 68, in __exit__\r\n compat.raise_(\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/util/compat.py\", line 182, in raise_\r\n raise exception\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py\", line 2642, in _flush\r\n flush_context.execute()\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/unitofwork.py\", line 422, in execute\r\n rec.execute(self)\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/unitofwork.py\", line 586, in execute\r\n persistence.save_obj(\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/persistence.py\", line 239, in save_obj\r\n _emit_insert_statements(\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/orm/persistence.py\", line 1135, in _emit_insert_statements\r\n result = cached_connections[connection].execute(\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 1011, in execute\r\n return meth(self, multiparams, params)\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/sql/elements.py\", line 298, in _execute_on_connection\r\n return connection._execute_clauseelement(self, multiparams, params)\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 1124, in _execute_clauseelement\r\n ret = self._execute_context(\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 1316, in _execute_context\r\n self._handle_dbapi_exception(\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 1510, in _handle_dbapi_exception\r\n util.raise_(\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/util/compat.py\", line 182, in raise_\r\n raise exception\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 1276, in _execute_context\r\n self.dialect.do_execute(\r\n File \"/opt/pysetup/.venv/lib/python3.8/site-packages/sqlalchemy/engine/default.py\", line 608, in do_execute\r\n cursor.execute(statement, parameters)\r\nsqlalchemy.exc.ProgrammingError: (psycopg2.errors.DatatypeMismatch) column \"font_weight\" is of type integer but expression is of type json[]\r\nLINE 1: ...e', 'Last Name', 'Sample Text', 14, 'Arial', CAST(ARRAY['{\"n...\r\n```\n", "before_files": [{"content": "\"\"\"empty message\n\nRevision ID: 8b5bc48e1d4c\nRevises: 21c79d253f21\nCreate Date: 2023-08-01 14:10:12.187180\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '8b5bc48e1d4c'\ndown_revision = '21c79d253f21'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('badge_field_forms', 'font_weight',\n existing_type=sa.TEXT(),\n type_=postgresql.ARRAY(sa.JSON()),\n postgresql_using='font_weight::json[]',\n existing_nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('badge_field_forms', 'font_weight',\n existing_type=postgresql.ARRAY(sa.JSON()),\n type_=sa.TEXT(),\n existing_nullable=True)\n # ### end Alembic commands ###\n", "path": "migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py"}], "after_files": [{"content": "\"\"\"empty message\n\nRevision ID: 8b5bc48e1d4c\nRevises: 21c79d253f21\nCreate Date: 2023-08-01 14:10:12.187180\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '8b5bc48e1d4c'\ndown_revision = '21c79d253f21'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('badge_field_forms', 'font_weight')\n op.add_column('badge_field_forms', sa.Column('font_weight',\n postgresql.ARRAY(sa.JSON()), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('badge_field_forms', 'font_weight')\n op.add_column('badge_field_forms', sa.Column('font_weight',\n sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n", "path": "migrations/versions/rev-2023-08-01-14:10:12-8b5bc48e1d4c_.py"}]}
1,619
412
gh_patches_debug_15969
rasdani/github-patches
git_diff
conda__conda-10090
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 'conda create --dry-run --yes' deletes existing environment. ## Current Behavior If `--dry-run` and `--yes` are _both_ passed to `conda create`, and the named environment already exists, the named environment is deleted. ### Steps to Reproduce ``` # create the env conda create -n tmp -y conda env list | grep tmp # --dry-run without --yes leaves env in place conda create -n tmp --dry-run conda env list | grep tmp # --dry-run with --yes deletes the env conda create -n tmp --dry-run --yes conda env list | grep tmp ``` ## Expected Behavior 'conda create --dry-run' should not delete an existing environment. ## Environment Information <details open><summary><code>`conda info`</code></summary><p> ``` active environment : garpy.conda-dev active env location : /Users/aschweit/miniconda3/envs/garpy.conda-dev shell level : 2 user config file : /Users/aschweit/.condarc populated config files : /Users/aschweit/.condarc conda version : 4.8.2 conda-build version : 3.19.2 python version : 3.7.6.final.0 virtual packages : __osx=10.14.6 base environment : /Users/aschweit/miniconda3 (writable) channel URLs : https://repo.anaconda.com/pkgs/main/osx-64 https://repo.anaconda.com/pkgs/main/noarch https://repo.anaconda.com/pkgs/r/osx-64 https://repo.anaconda.com/pkgs/r/noarch package cache : /Users/aschweit/miniconda3/pkgs /Users/aschweit/.conda/pkgs envs directories : /Users/aschweit/miniconda3/envs /Users/aschweit/.conda/envs platform : osx-64 user-agent : conda/4.8.2 requests/2.23.0 CPython/3.7.6 Darwin/18.7.0 OSX/10.14.6 UID:GID : 501:20 netrc file : None offline mode : False ``` </p></details> <details open><summary><code>`conda config --show-sources`</code></summary><p> ``` ==> /Users/aschweit/.condarc <== auto_update_conda: False auto_stack: 1 ``` </p></details> <details><summary><code>`conda list --show-channel-urls`</code></summary><p> ``` # packages in environment at /Users/aschweit/miniconda3: # # Name Version Build Channel appdirs 1.4.3 py_1 conda-forge arrow 0.15.6 py37hc8dfbb8_1 conda-forge attrs 19.3.0 py_0 conda-forge beautifulsoup4 4.9.0 py37hc8dfbb8_0 conda-forge binaryornot 0.4.4 py_1 conda-forge black 19.10b0 py37_0 conda-forge bleach 3.1.4 py_0 defaults brotlipy 0.7.0 py37h9bfed18_1000 conda-forge bzip2 1.0.8 h0b31af3_2 conda-forge ca-certificates 2020.4.5.1 hecc5488_0 conda-forge certifi 2020.4.5.1 py37hc8dfbb8_0 conda-forge cffi 1.14.0 py37h356ff06_0 conda-forge chardet 3.0.4 py37hc8dfbb8_1006 conda-forge click 7.1.2 pyh9f0ad1d_0 conda-forge cmarkgfm 0.4.2 py37h1de35cc_0 defaults conda 4.8.2 py37_0 defaults conda-build 3.19.2 py37hc8dfbb8_2 conda-forge conda-package-handling 1.6.0 py37h9bfed18_2 conda-forge cookiecutter 1.7.2 pyh9f0ad1d_0 conda-forge cryptography 2.9.2 py37he655712_0 conda-forge curl 7.69.1 ha441bb4_0 defaults decorator 4.4.2 py_0 defaults docutils 0.16 py37_0 defaults expat 2.2.9 h4a8c4bd_2 conda-forge filelock 3.0.10 py_0 conda-forge future 0.18.2 py37_0 defaults gettext 0.19.8.1 h46ab8bc_1002 conda-forge git 2.26.2 pl526hcc376a2_0 conda-forge glob2 0.7 py_0 conda-forge icu 67.1 h4a8c4bd_0 conda-forge idna 2.9 py_1 conda-forge importlib-metadata 1.6.0 py37hc8dfbb8_0 conda-forge importlib_metadata 1.6.0 0 conda-forge jinja2 2.11.2 pyh9f0ad1d_0 conda-forge jinja2-time 0.2.0 py_2 conda-forge krb5 1.17.1 hddcf347_0 defaults libarchive 3.3.3 h02796b4_1008 conda-forge libcurl 7.69.1 h051b688_0 defaults libcxx 10.0.0 h1af66ff_2 conda-forge libedit 3.1.20181209 hb402a30_0 defaults libffi 3.2.1 h4a8c4bd_1007 conda-forge libiconv 1.15 h0b31af3_1006 conda-forge liblief 0.9.0 h3e78482_1 conda-forge libssh2 1.9.0 ha12b0ac_1 defaults libxml2 2.9.10 hc06c4ae_1 conda-forge lz4-c 1.9.2 h4a8c4bd_1 conda-forge lzo 2.10 h1de35cc_1000 conda-forge markupsafe 1.1.1 py37h9bfed18_1 conda-forge mypy_extensions 0.4.3 py37hc8dfbb8_1 conda-forge ncurses 6.1 h0a44026_1002 conda-forge networkx 2.4 py_0 defaults openssl 1.1.1g h0b31af3_0 conda-forge packaging 20.1 py_0 conda-forge pathspec 0.8.0 pyh9f0ad1d_0 conda-forge pcre 8.44 h4a8c4bd_0 conda-forge perl 5.26.2 haec8ef5_1006 conda-forge pip 20.1 pyh9f0ad1d_0 conda-forge pkginfo 1.5.0.1 py_0 conda-forge pluggy 0.13.1 py37hc8dfbb8_1 conda-forge poyo 0.5.0 py_0 conda-forge psutil 5.7.0 py37h9bfed18_1 conda-forge py 1.8.1 py_0 conda-forge py-lief 0.9.0 py37h0ceac7d_1 conda-forge pycosat 0.6.3 py37h9bfed18_1004 conda-forge pycparser 2.20 py_0 conda-forge pygments 2.6.1 py_0 defaults pyopenssl 19.1.0 py_1 conda-forge pyparsing 2.4.7 pyh9f0ad1d_0 conda-forge pysocks 1.7.1 py37hc8dfbb8_1 conda-forge python 3.7.6 h359304d_2 defaults python-dateutil 2.8.1 py_0 conda-forge python-libarchive-c 2.9 py37_0 conda-forge python-slugify 4.0.0 pyh9f0ad1d_1 conda-forge python.app 2 py37_10 defaults python_abi 3.7 1_cp37m conda-forge pytz 2020.1 pyh9f0ad1d_0 conda-forge pyyaml 5.3.1 py37h9bfed18_0 conda-forge readline 7.0 hcfe32e1_1001 conda-forge readme_renderer 24.0 py37_0 defaults regex 2020.5.14 py37h9bfed18_0 conda-forge requests 2.23.0 pyh8c360ce_2 conda-forge requests-toolbelt 0.9.1 py_0 defaults ripgrep 12.1.0 h0b31af3_0 conda-forge ruamel_yaml 0.15.80 py37h9bfed18_1001 conda-forge setuptools 46.3.0 py37hc8dfbb8_0 conda-forge six 1.14.0 py_1 conda-forge soupsieve 1.9.4 py37hc8dfbb8_1 conda-forge sqlite 3.31.1 ha441bb4_0 defaults text-unidecode 1.3 py_0 conda-forge tk 8.6.10 hbbe82c9_0 conda-forge toml 0.10.0 py_0 conda-forge tox 3.15.0 py37hc8dfbb8_0 conda-forge tox-conda 0.2.1 py_0 conda-forge tqdm 4.46.0 pyh9f0ad1d_0 conda-forge tree 1.8.0 h0b31af3_1 conda-forge twine 2.0.0 py_0 defaults typed-ast 1.4.1 py37h0b31af3_0 conda-forge typing_extensions 3.7.4.2 py_0 conda-forge unidecode 1.1.1 py_0 conda-forge urllib3 1.25.9 py_0 conda-forge virtualenv 16.7.5 py_0 conda-forge webencodings 0.5.1 py37_1 defaults wheel 0.34.2 py_1 conda-forge whichcraft 0.6.1 py_0 conda-forge xz 5.2.5 h0b31af3_0 conda-forge yaml 0.2.4 h0b31af3_0 conda-forge zipp 3.1.0 py_0 conda-forge zlib 1.2.11 h0b31af3_1006 conda-forge zstd 1.4.4 h4b3e974_3 conda-forge ``` </p></details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda/cli/main_create.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright (C) 2012 Anaconda, Inc 3 # SPDX-License-Identifier: BSD-3-Clause 4 from __future__ import absolute_import, division, print_function, unicode_literals 5 6 from logging import getLogger 7 from os.path import isdir 8 9 from .common import confirm_yn 10 from .install import install 11 from ..base.context import context 12 from ..common.path import paths_equal 13 from ..exceptions import CondaValueError 14 from ..gateways.disk.delete import rm_rf 15 from ..gateways.disk.test import is_conda_environment 16 17 log = getLogger(__name__) 18 19 20 def execute(args, parser): 21 if is_conda_environment(context.target_prefix): 22 if paths_equal(context.target_prefix, context.root_prefix): 23 raise CondaValueError("The target prefix is the base prefix. Aborting.") 24 confirm_yn("WARNING: A conda environment already exists at '%s'\n" 25 "Remove existing environment" % context.target_prefix, 26 default='no', 27 dry_run=False) 28 log.info("Removing existing environment %s", context.target_prefix) 29 rm_rf(context.target_prefix) 30 elif isdir(context.target_prefix): 31 confirm_yn("WARNING: A directory already exists at the target location '%s'\n" 32 "but it is not a conda environment.\n" 33 "Continue creating environment" % context.target_prefix, 34 default='no', 35 dry_run=False) 36 37 install(args, parser, 'create') 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda/cli/main_create.py b/conda/cli/main_create.py --- a/conda/cli/main_create.py +++ b/conda/cli/main_create.py @@ -21,6 +21,10 @@ if is_conda_environment(context.target_prefix): if paths_equal(context.target_prefix, context.root_prefix): raise CondaValueError("The target prefix is the base prefix. Aborting.") + if context.dry_run: + # Taking the "easy" way out, rather than trying to fake removing + # the existing environment before creating a new one. + raise CondaValueError("Cannot `create --dry-run` with an existing conda environment") confirm_yn("WARNING: A conda environment already exists at '%s'\n" "Remove existing environment" % context.target_prefix, default='no',
{"golden_diff": "diff --git a/conda/cli/main_create.py b/conda/cli/main_create.py\n--- a/conda/cli/main_create.py\n+++ b/conda/cli/main_create.py\n@@ -21,6 +21,10 @@\n if is_conda_environment(context.target_prefix):\n if paths_equal(context.target_prefix, context.root_prefix):\n raise CondaValueError(\"The target prefix is the base prefix. Aborting.\")\n+ if context.dry_run:\n+ # Taking the \"easy\" way out, rather than trying to fake removing\n+ # the existing environment before creating a new one.\n+ raise CondaValueError(\"Cannot `create --dry-run` with an existing conda environment\")\n confirm_yn(\"WARNING: A conda environment already exists at '%s'\\n\"\n \"Remove existing environment\" % context.target_prefix,\n default='no',\n", "issue": "'conda create --dry-run --yes' deletes existing environment. \n## Current Behavior\r\nIf `--dry-run` and `--yes` are _both_ passed to `conda create`, and the named environment already exists, the named environment is deleted. \r\n\r\n\r\n### Steps to Reproduce\r\n```\r\n# create the env\r\nconda create -n tmp -y \r\nconda env list | grep tmp\r\n\r\n# --dry-run without --yes leaves env in place\r\nconda create -n tmp --dry-run\r\nconda env list | grep tmp\r\n\r\n# --dry-run with --yes deletes the env\r\nconda create -n tmp --dry-run --yes\r\nconda env list | grep tmp\r\n\r\n```\r\n\r\n\r\n## Expected Behavior\r\n'conda create --dry-run' should not delete an existing environment. \r\n\r\n## Environment Information\r\n<details open><summary><code>`conda info`</code></summary><p>\r\n\r\n```\r\n active environment : garpy.conda-dev\r\n active env location : /Users/aschweit/miniconda3/envs/garpy.conda-dev\r\n shell level : 2\r\n user config file : /Users/aschweit/.condarc\r\n populated config files : /Users/aschweit/.condarc\r\n conda version : 4.8.2\r\n conda-build version : 3.19.2\r\n python version : 3.7.6.final.0\r\n virtual packages : __osx=10.14.6\r\n base environment : /Users/aschweit/miniconda3 (writable)\r\n channel URLs : https://repo.anaconda.com/pkgs/main/osx-64\r\n https://repo.anaconda.com/pkgs/main/noarch\r\n https://repo.anaconda.com/pkgs/r/osx-64\r\n https://repo.anaconda.com/pkgs/r/noarch\r\n package cache : /Users/aschweit/miniconda3/pkgs\r\n /Users/aschweit/.conda/pkgs\r\n envs directories : /Users/aschweit/miniconda3/envs\r\n /Users/aschweit/.conda/envs\r\n platform : osx-64\r\n user-agent : conda/4.8.2 requests/2.23.0 CPython/3.7.6 Darwin/18.7.0 OSX/10.14.6\r\n UID:GID : 501:20\r\n netrc file : None\r\n offline mode : False\r\n\r\n```\r\n</p></details>\r\n\r\n\r\n<details open><summary><code>`conda config --show-sources`</code></summary><p>\r\n\r\n```\r\n==> /Users/aschweit/.condarc <==\r\nauto_update_conda: False\r\nauto_stack: 1\r\n```\r\n</p></details>\r\n\r\n\r\n<details><summary><code>`conda list --show-channel-urls`</code></summary><p>\r\n\r\n```\r\n# packages in environment at /Users/aschweit/miniconda3:\r\n#\r\n# Name Version Build Channel\r\nappdirs 1.4.3 py_1 conda-forge\r\narrow 0.15.6 py37hc8dfbb8_1 conda-forge\r\nattrs 19.3.0 py_0 conda-forge\r\nbeautifulsoup4 4.9.0 py37hc8dfbb8_0 conda-forge\r\nbinaryornot 0.4.4 py_1 conda-forge\r\nblack 19.10b0 py37_0 conda-forge\r\nbleach 3.1.4 py_0 defaults\r\nbrotlipy 0.7.0 py37h9bfed18_1000 conda-forge\r\nbzip2 1.0.8 h0b31af3_2 conda-forge\r\nca-certificates 2020.4.5.1 hecc5488_0 conda-forge\r\ncertifi 2020.4.5.1 py37hc8dfbb8_0 conda-forge\r\ncffi 1.14.0 py37h356ff06_0 conda-forge\r\nchardet 3.0.4 py37hc8dfbb8_1006 conda-forge\r\nclick 7.1.2 pyh9f0ad1d_0 conda-forge\r\ncmarkgfm 0.4.2 py37h1de35cc_0 defaults\r\nconda 4.8.2 py37_0 defaults\r\nconda-build 3.19.2 py37hc8dfbb8_2 conda-forge\r\nconda-package-handling 1.6.0 py37h9bfed18_2 conda-forge\r\ncookiecutter 1.7.2 pyh9f0ad1d_0 conda-forge\r\ncryptography 2.9.2 py37he655712_0 conda-forge\r\ncurl 7.69.1 ha441bb4_0 defaults\r\ndecorator 4.4.2 py_0 defaults\r\ndocutils 0.16 py37_0 defaults\r\nexpat 2.2.9 h4a8c4bd_2 conda-forge\r\nfilelock 3.0.10 py_0 conda-forge\r\nfuture 0.18.2 py37_0 defaults\r\ngettext 0.19.8.1 h46ab8bc_1002 conda-forge\r\ngit 2.26.2 pl526hcc376a2_0 conda-forge\r\nglob2 0.7 py_0 conda-forge\r\nicu 67.1 h4a8c4bd_0 conda-forge\r\nidna 2.9 py_1 conda-forge\r\nimportlib-metadata 1.6.0 py37hc8dfbb8_0 conda-forge\r\nimportlib_metadata 1.6.0 0 conda-forge\r\njinja2 2.11.2 pyh9f0ad1d_0 conda-forge\r\njinja2-time 0.2.0 py_2 conda-forge\r\nkrb5 1.17.1 hddcf347_0 defaults\r\nlibarchive 3.3.3 h02796b4_1008 conda-forge\r\nlibcurl 7.69.1 h051b688_0 defaults\r\nlibcxx 10.0.0 h1af66ff_2 conda-forge\r\nlibedit 3.1.20181209 hb402a30_0 defaults\r\nlibffi 3.2.1 h4a8c4bd_1007 conda-forge\r\nlibiconv 1.15 h0b31af3_1006 conda-forge\r\nliblief 0.9.0 h3e78482_1 conda-forge\r\nlibssh2 1.9.0 ha12b0ac_1 defaults\r\nlibxml2 2.9.10 hc06c4ae_1 conda-forge\r\nlz4-c 1.9.2 h4a8c4bd_1 conda-forge\r\nlzo 2.10 h1de35cc_1000 conda-forge\r\nmarkupsafe 1.1.1 py37h9bfed18_1 conda-forge\r\nmypy_extensions 0.4.3 py37hc8dfbb8_1 conda-forge\r\nncurses 6.1 h0a44026_1002 conda-forge\r\nnetworkx 2.4 py_0 defaults\r\nopenssl 1.1.1g h0b31af3_0 conda-forge\r\npackaging 20.1 py_0 conda-forge\r\npathspec 0.8.0 pyh9f0ad1d_0 conda-forge\r\npcre 8.44 h4a8c4bd_0 conda-forge\r\nperl 5.26.2 haec8ef5_1006 conda-forge\r\npip 20.1 pyh9f0ad1d_0 conda-forge\r\npkginfo 1.5.0.1 py_0 conda-forge\r\npluggy 0.13.1 py37hc8dfbb8_1 conda-forge\r\npoyo 0.5.0 py_0 conda-forge\r\npsutil 5.7.0 py37h9bfed18_1 conda-forge\r\npy 1.8.1 py_0 conda-forge\r\npy-lief 0.9.0 py37h0ceac7d_1 conda-forge\r\npycosat 0.6.3 py37h9bfed18_1004 conda-forge\r\npycparser 2.20 py_0 conda-forge\r\npygments 2.6.1 py_0 defaults\r\npyopenssl 19.1.0 py_1 conda-forge\r\npyparsing 2.4.7 pyh9f0ad1d_0 conda-forge\r\npysocks 1.7.1 py37hc8dfbb8_1 conda-forge\r\npython 3.7.6 h359304d_2 defaults\r\npython-dateutil 2.8.1 py_0 conda-forge\r\npython-libarchive-c 2.9 py37_0 conda-forge\r\npython-slugify 4.0.0 pyh9f0ad1d_1 conda-forge\r\npython.app 2 py37_10 defaults\r\npython_abi 3.7 1_cp37m conda-forge\r\npytz 2020.1 pyh9f0ad1d_0 conda-forge\r\npyyaml 5.3.1 py37h9bfed18_0 conda-forge\r\nreadline 7.0 hcfe32e1_1001 conda-forge\r\nreadme_renderer 24.0 py37_0 defaults\r\nregex 2020.5.14 py37h9bfed18_0 conda-forge\r\nrequests 2.23.0 pyh8c360ce_2 conda-forge\r\nrequests-toolbelt 0.9.1 py_0 defaults\r\nripgrep 12.1.0 h0b31af3_0 conda-forge\r\nruamel_yaml 0.15.80 py37h9bfed18_1001 conda-forge\r\nsetuptools 46.3.0 py37hc8dfbb8_0 conda-forge\r\nsix 1.14.0 py_1 conda-forge\r\nsoupsieve 1.9.4 py37hc8dfbb8_1 conda-forge\r\nsqlite 3.31.1 ha441bb4_0 defaults\r\ntext-unidecode 1.3 py_0 conda-forge\r\ntk 8.6.10 hbbe82c9_0 conda-forge\r\ntoml 0.10.0 py_0 conda-forge\r\ntox 3.15.0 py37hc8dfbb8_0 conda-forge\r\ntox-conda 0.2.1 py_0 conda-forge\r\ntqdm 4.46.0 pyh9f0ad1d_0 conda-forge\r\ntree 1.8.0 h0b31af3_1 conda-forge\r\ntwine 2.0.0 py_0 defaults\r\ntyped-ast 1.4.1 py37h0b31af3_0 conda-forge\r\ntyping_extensions 3.7.4.2 py_0 conda-forge\r\nunidecode 1.1.1 py_0 conda-forge\r\nurllib3 1.25.9 py_0 conda-forge\r\nvirtualenv 16.7.5 py_0 conda-forge\r\nwebencodings 0.5.1 py37_1 defaults\r\nwheel 0.34.2 py_1 conda-forge\r\nwhichcraft 0.6.1 py_0 conda-forge\r\nxz 5.2.5 h0b31af3_0 conda-forge\r\nyaml 0.2.4 h0b31af3_0 conda-forge\r\nzipp 3.1.0 py_0 conda-forge\r\nzlib 1.2.11 h0b31af3_1006 conda-forge\r\nzstd 1.4.4 h4b3e974_3 conda-forge\r\n\r\n```\r\n</p></details>\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger\nfrom os.path import isdir\n\nfrom .common import confirm_yn\nfrom .install import install\nfrom ..base.context import context\nfrom ..common.path import paths_equal\nfrom ..exceptions import CondaValueError\nfrom ..gateways.disk.delete import rm_rf\nfrom ..gateways.disk.test import is_conda_environment\n\nlog = getLogger(__name__)\n\n\ndef execute(args, parser):\n if is_conda_environment(context.target_prefix):\n if paths_equal(context.target_prefix, context.root_prefix):\n raise CondaValueError(\"The target prefix is the base prefix. Aborting.\")\n confirm_yn(\"WARNING: A conda environment already exists at '%s'\\n\"\n \"Remove existing environment\" % context.target_prefix,\n default='no',\n dry_run=False)\n log.info(\"Removing existing environment %s\", context.target_prefix)\n rm_rf(context.target_prefix)\n elif isdir(context.target_prefix):\n confirm_yn(\"WARNING: A directory already exists at the target location '%s'\\n\"\n \"but it is not a conda environment.\\n\"\n \"Continue creating environment\" % context.target_prefix,\n default='no',\n dry_run=False)\n\n install(args, parser, 'create')\n", "path": "conda/cli/main_create.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger\nfrom os.path import isdir\n\nfrom .common import confirm_yn\nfrom .install import install\nfrom ..base.context import context\nfrom ..common.path import paths_equal\nfrom ..exceptions import CondaValueError\nfrom ..gateways.disk.delete import rm_rf\nfrom ..gateways.disk.test import is_conda_environment\n\nlog = getLogger(__name__)\n\n\ndef execute(args, parser):\n if is_conda_environment(context.target_prefix):\n if paths_equal(context.target_prefix, context.root_prefix):\n raise CondaValueError(\"The target prefix is the base prefix. Aborting.\")\n if context.dry_run:\n # Taking the \"easy\" way out, rather than trying to fake removing\n # the existing environment before creating a new one.\n raise CondaValueError(\"Cannot `create --dry-run` with an existing conda environment\")\n confirm_yn(\"WARNING: A conda environment already exists at '%s'\\n\"\n \"Remove existing environment\" % context.target_prefix,\n default='no',\n dry_run=False)\n log.info(\"Removing existing environment %s\", context.target_prefix)\n rm_rf(context.target_prefix)\n elif isdir(context.target_prefix):\n confirm_yn(\"WARNING: A directory already exists at the target location '%s'\\n\"\n \"but it is not a conda environment.\\n\"\n \"Continue creating environment\" % context.target_prefix,\n default='no',\n dry_run=False)\n\n install(args, parser, 'create')\n", "path": "conda/cli/main_create.py"}]}
3,878
184